diff options
author | Ingo Molnar | 2018-02-06 21:12:31 +0100 |
---|---|---|
committer | Ingo Molnar | 2018-02-06 21:12:31 +0100 |
commit | 82845079160817cc6ac64e5321bbd935e0a47b3a (patch) | |
tree | 0886d1d52428e9db14536cae4b37db896e7c360a /include | |
parent | 32e839dda3ba576943365f0f5817ce5c843137dc (diff) | |
parent | 68c5735eaa5e680e701c9a2d1e3c7880bdf5ab66 (diff) |
Merge branch 'linus' into sched/urgent, to resolve conflicts
Conflicts:
arch/arm64/kernel/entry.S
arch/x86/Kconfig
include/linux/sched/mm.h
kernel/fork.c
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
535 files changed, 23015 insertions, 7295 deletions
diff --git a/include/asm-generic/clkdev.h b/include/asm-generic/clkdev.h deleted file mode 100644 index 4ff334749ed5..000000000000 --- a/include/asm-generic/clkdev.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * include/asm-generic/clkdev.h - * - * Based on the ARM clkdev.h: - * Copyright (C) 2008 Russell King. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Helper for the clk API to assist looking up a struct clk. - */ -#ifndef __ASM_CLKDEV_H -#define __ASM_CLKDEV_H - -#include <linux/slab.h> - -#ifndef CONFIG_COMMON_CLK -struct clk; - -static inline int __clk_get(struct clk *clk) { return 1; } -static inline void __clk_put(struct clk *clk) { } -#endif - -static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) -{ - return kzalloc(size, GFP_KERNEL); -} - -#endif diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h new file mode 100644 index 000000000000..880a292d792f --- /dev/null +++ b/include/asm-generic/dma-mapping.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_DMA_MAPPING_H +#define _ASM_GENERIC_DMA_MAPPING_H + +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return &dma_direct_ops; +} + +#endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h new file mode 100644 index 000000000000..296c65442f00 --- /dev/null +++ b/include/asm-generic/error-injection.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_ERROR_INJECTION_H +#define _ASM_GENERIC_ERROR_INJECTION_H + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +enum { + EI_ETYPE_NONE, /* Dummy value for undefined case */ + EI_ETYPE_NULL, /* Return NULL if failure */ + EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ + EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ +}; + +struct error_injection_entry { + unsigned long addr; + int etype; +}; + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +/* + * Whitelist ganerating macro. Specify functions which can be + * error-injectable using this macro. + */ +#define ALLOW_ERROR_INJECTION(fname, _etype) \ +static struct error_injection_entry __used \ + __attribute__((__section__("_error_injection_whitelist"))) \ + _eil_addr_##fname = { \ + .addr = (unsigned long)fname, \ + .etype = EI_ETYPE_##_etype, \ + }; +#else +#define ALLOW_ERROR_INJECTION(fname, _etype) +#endif +#endif + +#endif /* _ASM_GENERIC_ERROR_INJECTION_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index b1e17fcee2d0..854f96ad5ccb 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -1,12 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* Generic I/O port emulation, based on MN10300 code * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. */ #ifndef __ASM_GENERIC_PCI_IOMAP_H #define __ASM_GENERIC_PCI_IOMAP_H diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 868e68561f91..2cfa3075d148 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -309,19 +309,26 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif -#ifndef __HAVE_ARCH_PMDP_INVALIDATE -extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmdp); -#endif - -#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE -static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma, - unsigned long address, pmd_t *pmdp) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * This is an implementation of pmdp_establish() that is only suitable for an + * architecture that doesn't have hardware dirty/accessed bits. In this case we + * can't race with CPU which sets these bits and non-atomic aproach is fine. + */ +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) { - + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; } #endif +#ifndef __HAVE_ARCH_PMDP_INVALIDATE +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif + #ifndef __HAVE_ARCH_PTE_SAME static inline int pte_same(pte_t pte_a, pte_t pte_b) { diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 03cc5f9bba71..849cd8eb5ca0 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -30,6 +30,7 @@ * __ctors_start, __ctors_end * __irqentry_text_start, __irqentry_text_end * __softirqentry_text_start, __softirqentry_text_end + * __start_opd, __end_opd */ extern char _text[], _stext[], _etext[]; extern char _data[], _sdata[], _edata[]; @@ -49,12 +50,15 @@ extern char __start_once[], __end_once[]; /* Start and end of .ctors section - used for constructor calls. */ extern char __ctors_start[], __ctors_end[]; +/* Start and end of .opd section - used for function descriptors. */ +extern char __start_opd[], __end_opd[]; + extern __visible const void __nosave_begin, __nosave_end; -/* function descriptor handling (if any). Override - * in asm/sections.h */ +/* Function descriptor handling (if any). Override in asm/sections.h */ #ifndef dereference_function_descriptor #define dereference_function_descriptor(p) (p) +#define dereference_kernel_function_descriptor(p) (p) #endif /* random extra sections (if any). Override diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index a564b83bf013..1ab0e520d6fc 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -136,6 +136,15 @@ #define KPROBE_BLACKLIST() #endif +#ifdef CONFIG_FUNCTION_ERROR_INJECTION +#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ + VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\ + KEEP(*(_error_injection_whitelist)) \ + VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .; +#else +#define ERROR_INJECT_WHITELIST() +#endif + #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_ftrace_events) = .; \ @@ -568,6 +577,7 @@ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ KPROBE_BLACKLIST() \ + ERROR_INJECT_WHITELIST() \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \ diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 03b97629442c..1e26f790b03f 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -327,7 +327,12 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) */ static inline int crypto_aead_encrypt(struct aead_request *req) { - return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_aead_alg(aead)->encrypt(req); } /** @@ -356,6 +361,9 @@ static inline int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + if (req->cryptlen < crypto_aead_authsize(aead)) return -EINVAL; diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h index caaa470389e0..b83d66073db0 100644 --- a/include/crypto/chacha20.h +++ b/include/crypto/chacha20.h @@ -13,12 +13,13 @@ #define CHACHA20_IV_SIZE 16 #define CHACHA20_KEY_SIZE 32 #define CHACHA20_BLOCK_SIZE 64 +#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32)) struct chacha20_ctx { u32 key[8]; }; -void chacha20_block(u32 *state, void *stream); +void chacha20_block(u32 *state, u32 *stream); void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 0ed31fd80242..2d1849dffb80 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -71,12 +71,11 @@ struct ahash_request { /** * struct ahash_alg - asynchronous message digest definition - * @init: Initialize the transformation context. Intended only to initialize the + * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the * state of the HASH transformation at the beginning. This shall fill in * the internal structures used during the entire duration of the whole * transformation. No data processing happens at this point. - * Note: mandatory. - * @update: Push a chunk of data into the driver for transformation. This + * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This * function actually pushes blocks of data from upper layers into the * driver, which then passes those to the hardware as seen fit. This * function must not finalize the HASH transformation by calculating the @@ -85,20 +84,17 @@ struct ahash_request { * context, as this function may be called in parallel with the same * transformation object. Data processing can happen synchronously * [SHASH] or asynchronously [AHASH] at this point. - * Note: mandatory. - * @final: Retrieve result from the driver. This function finalizes the + * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the * transformation and retrieves the resulting hash from the driver and * pushes it back to upper layers. No data processing happens at this * point unless hardware requires it to finish the transformation * (then the data buffered by the device driver is processed). - * Note: mandatory. - * @finup: Combination of @update and @final. This function is effectively a + * @finup: **[optional]** Combination of @update and @final. This function is effectively a * combination of @update and @final calls issued in sequence. As some * hardware cannot do @update and @final separately, this callback was * added to allow such hardware to be used at least by IPsec. Data * processing can happen synchronously [SHASH] or asynchronously [AHASH] * at this point. - * Note: optional. * @digest: Combination of @init and @update and @final. This function * effectively behaves as the entire chain of operations, @init, * @update and @final issued in sequence. Just like @finup, this was @@ -210,7 +206,6 @@ struct crypto_ahash { unsigned int keylen); unsigned int reqsize; - bool has_setkey; struct crypto_tfm base; }; @@ -410,11 +405,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); -static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) -{ - return tfm->has_setkey; -} - /** * crypto_ahash_finup() - update and finalize message digest * @req: reference to the ahash_request handle that holds all information @@ -487,7 +477,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out) */ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) { - return crypto_ahash_reqtfm(req)->import(req, in); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->import(req, in); } /** @@ -503,7 +498,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) */ static inline int crypto_ahash_init(struct ahash_request *req) { - return crypto_ahash_reqtfm(req)->init(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return tfm->init(req); } /** @@ -855,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out) */ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) { - return crypto_shash_alg(desc->tfm)->import(desc, in); + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->import(desc, in); } /** @@ -871,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) */ static inline int crypto_shash_init(struct shash_desc *desc) { - return crypto_shash_alg(desc->tfm)->init(desc); + struct crypto_shash *tfm = desc->tfm; + + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + + return crypto_shash_alg(tfm)->init(desc); } /** diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index f38227a78eae..482461d8931d 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -245,7 +245,7 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); void af_alg_free_resources(struct af_alg_async_req *areq); void af_alg_async_cb(struct crypto_async_request *_req, int err); -unsigned int af_alg_poll(struct file *file, struct socket *sock, +__poll_t af_alg_poll(struct file *file, struct socket *sock, poll_table *wait); struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen); diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index c2bae8da642c..27040a46d50a 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) return alg->setkey != shash_no_setkey; } +bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); + int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, struct hash_alg_common *alg, struct crypto_instance *inst); diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index ccad9b2c9bd6..0f6ddac1acfc 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -28,17 +28,6 @@ struct crypto_scomp { * @free_ctx: Function frees context allocated with alloc_ctx * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @init: Initialize the cryptographic transformation object. - * This function is used to initialize the cryptographic - * transformation object. This function is called only once at - * the instantiation time, right after the transformation context - * was allocated. In case the cryptographic hardware has some - * special requirements which need to be handled by software, this - * function shall check for the precise requirement of the - * transformation and put any software fallbacks in place. - * @exit: Deinitialize the cryptographic transformation object. This is a - * counterpart to @init, used to remove various changes set in - * @init. * @base: Common crypto API algorithm data structure */ struct scomp_alg { diff --git a/include/crypto/null.h b/include/crypto/null.h index 5757c0a4b321..15aeef6e30ef 100644 --- a/include/crypto/null.h +++ b/include/crypto/null.h @@ -12,14 +12,4 @@ struct crypto_skcipher *crypto_get_default_null_skcipher(void); void crypto_put_default_null_skcipher(void); -static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void) -{ - return crypto_get_default_null_skcipher(); -} - -static inline void crypto_put_default_null_skcipher2(void) -{ - crypto_put_default_null_skcipher(); -} - #endif diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index c65567d01e8e..f718a19da82f 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -31,8 +31,6 @@ struct poly1305_desc_ctx { }; int crypto_poly1305_init(struct shash_desc *desc); -int crypto_poly1305_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen); unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen); int crypto_poly1305_update(struct shash_desc *desc, diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h new file mode 100644 index 000000000000..19ed48aefc86 --- /dev/null +++ b/include/crypto/salsa20.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values for the Salsa20 algorithm + */ + +#ifndef _CRYPTO_SALSA20_H +#define _CRYPTO_SALSA20_H + +#include <linux/types.h> + +#define SALSA20_IV_SIZE 8 +#define SALSA20_MIN_KEY_SIZE 16 +#define SALSA20_MAX_KEY_SIZE 32 +#define SALSA20_BLOCK_SIZE 64 + +struct crypto_skcipher; + +struct salsa20_ctx { + u32 initial_state[16]; +}; + +void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx, + const u8 *iv); +int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize); + +#endif /* _CRYPTO_SALSA20_H */ diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h index b9d9bd553b48..080f60c2e6b1 100644 --- a/include/crypto/sha3.h +++ b/include/crypto/sha3.h @@ -19,7 +19,6 @@ struct sha3_state { u64 st[25]; - unsigned int md_len; unsigned int rsiz; unsigned int rsizw; @@ -27,4 +26,9 @@ struct sha3_state { u8 buf[SHA3_224_BLOCK_SIZE]; }; +int crypto_sha3_init(struct shash_desc *desc); +int crypto_sha3_update(struct shash_desc *desc, const u8 *data, + unsigned int len); +int crypto_sha3_final(struct shash_desc *desc, u8 *out); + #endif diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 562001cb412b..2f327f090c3e 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -401,11 +401,6 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm, return tfm->setkey(tfm, key, keylen); } -static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm) -{ - return tfm->keysize; -} - static inline unsigned int crypto_skcipher_default_keysize( struct crypto_skcipher *tfm) { @@ -442,6 +437,9 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return tfm->encrypt(req); } @@ -460,6 +458,9 @@ static inline int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) + return -ENOKEY; + return tfm->decrypt(req); } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 59be1232d005..c6666cd09347 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -75,6 +75,7 @@ #include <drm/drm_sarea.h> #include <drm/drm_drv.h> #include <drm/drm_prime.h> +#include <drm/drm_print.h> #include <drm/drm_pci.h> #include <drm/drm_file.h> #include <drm/drm_debugfs.h> @@ -94,212 +95,16 @@ struct dma_buf_attachment; struct pci_dev; struct pci_controller; -/* - * The following categories are defined: - * - * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... - * This is the category used by the DRM_DEBUG() macro. - * - * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... - * This is the category used by the DRM_DEBUG_DRIVER() macro. - * - * KMS: used in the modesetting code. - * This is the category used by the DRM_DEBUG_KMS() macro. - * - * PRIME: used in the prime code. - * This is the category used by the DRM_DEBUG_PRIME() macro. - * - * ATOMIC: used in the atomic code. - * This is the category used by the DRM_DEBUG_ATOMIC() macro. - * - * VBL: used for verbose debug message in the vblank code - * This is the category used by the DRM_DEBUG_VBL() macro. - * - * Enabling verbose debug messages is done through the drm.debug parameter, - * each category being enabled by a bit. - * - * drm.debug=0x1 will enable CORE messages - * drm.debug=0x2 will enable DRIVER messages - * drm.debug=0x3 will enable CORE and DRIVER messages - * ... - * drm.debug=0x3f will enable all messages - * - * An interesting feature is that it's possible to enable verbose logging at - * run-time by echoing the debug value in its sysfs node: - * # echo 0xf > /sys/module/drm/parameters/debug - */ -#define DRM_UT_NONE 0x00 -#define DRM_UT_CORE 0x01 -#define DRM_UT_DRIVER 0x02 -#define DRM_UT_KMS 0x04 -#define DRM_UT_PRIME 0x08 -#define DRM_UT_ATOMIC 0x10 -#define DRM_UT_VBL 0x20 -#define DRM_UT_STATE 0x40 -#define DRM_UT_LEASE 0x80 - /***********************************************************************/ /** \name DRM template customization defaults */ /*@{*/ /***********************************************************************/ -/** \name Macros to make printk easier */ -/*@{*/ - -#define _DRM_PRINTK(once, level, fmt, ...) \ - do { \ - printk##once(KERN_##level "[" DRM_NAME "] " fmt, \ - ##__VA_ARGS__); \ - } while (0) - -#define DRM_INFO(fmt, ...) \ - _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) -#define DRM_NOTE(fmt, ...) \ - _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) -#define DRM_WARN(fmt, ...) \ - _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) - -#define DRM_INFO_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) -#define DRM_NOTE_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) -#define DRM_WARN_ONCE(fmt, ...) \ - _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) - -/** - * Error output. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_ERROR(dev, fmt, ...) \ - drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\ - fmt, ##__VA_ARGS__) -#define DRM_ERROR(fmt, ...) \ - drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__) - -/** - * Rate limited error output. Like DRM_ERROR() but won't flood the log. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ -({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - \ - if (__ratelimit(&_rs)) \ - DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ -}) -#define DRM_ERROR_RATELIMITED(fmt, ...) \ - DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) - -#define DRM_DEV_INFO(dev, fmt, ...) \ - drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \ - ##__VA_ARGS__) - -#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ -({ \ - static bool __print_once __read_mostly; \ - if (!__print_once) { \ - __print_once = true; \ - DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ - } \ -}) - -/** - * Debug output. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_DEBUG(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_DRIVER(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG_KMS(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_PRIME(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \ - fmt, ##args) -#define DRM_DEBUG_ATOMIC(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) - -#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \ - ##args) -#define DRM_DEBUG_VBL(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__) - -#define DRM_DEBUG_LEASE(fmt, ...) \ - drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__) - -#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \ -({ \ - static DEFINE_RATELIMIT_STATE(_rs, \ - DEFAULT_RATELIMIT_INTERVAL, \ - DEFAULT_RATELIMIT_BURST); \ - if (__ratelimit(&_rs)) \ - drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \ - __func__, "", fmt, ##args); \ -}) - -/** - * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. - * - * \param fmt printf() like format string. - * \param arg arguments - */ -#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \ - DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args) -#define DRM_DEBUG_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args) -#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args) -#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args) -#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \ - _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args) -#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ - DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) - -/* Format strings and argument splitters to simplify printing - * various "complex" objects - */ - -/*@}*/ - -/***********************************************************************/ /** \name Internal types and structures */ /*@{*/ #define DRM_IF_VERSION(maj, min) (maj << 16 | min) - /** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 5afd6e364fb6..1c27526c499e 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -189,12 +189,40 @@ struct drm_private_state_funcs { struct drm_private_state *state); }; +/** + * struct drm_private_obj - base struct for driver private atomic object + * + * A driver private object is initialized by calling + * drm_atomic_private_obj_init() and cleaned up by calling + * drm_atomic_private_obj_fini(). + * + * Currently only tracks the state update functions and the opaque driver + * private state itself, but in the future might also track which + * &drm_modeset_lock is required to duplicate and update this object's state. + */ struct drm_private_obj { + /** + * @state: Current atomic state for this driver private object. + */ struct drm_private_state *state; + /** + * @funcs: + * + * Functions to manipulate the state of this driver private object, see + * &drm_private_state_funcs. + */ const struct drm_private_state_funcs *funcs; }; +/** + * struct drm_private_state - base struct for driver private object state + * @state: backpointer to global drm_atomic_state + * + * Currently only contains a backpointer to the overall atomic update, but in + * the future also might hold synchronization information similar to e.g. + * &drm_crtc.commit. + */ struct drm_private_state { struct drm_atomic_state *state; }; @@ -218,6 +246,10 @@ struct __drm_private_objs_state { * @num_private_objs: size of the @private_objs array * @private_objs: pointer to array of private object pointers * @acquire_ctx: acquire context for this atomic modeset state update + * + * States are added to an atomic update by calling drm_atomic_get_crtc_state(), + * drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for + * private state structures, drm_atomic_get_private_obj_state(). */ struct drm_atomic_state { struct kref ref; diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index d2b56cc657e9..4842ee9485ce 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -38,6 +38,13 @@ struct drm_private_state; int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state); +int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, + const struct drm_crtc_state *crtc_state, + const struct drm_rect *clip, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled); int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check(struct drm_device *dev, diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 5971577016a2..ed38df4ac204 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -177,6 +177,35 @@ enum drm_link_status { }; /** + * enum drm_panel_orientation - panel_orientation info for &drm_display_info + * + * This enum is used to track the (LCD) panel orientation. There are no + * separate #defines for the uapi! + * + * @DRM_MODE_PANEL_ORIENTATION_UNKNOWN: The drm driver has not provided any + * panel orientation information (normal + * for non panels) in this case the "panel + * orientation" connector prop will not be + * attached. + * @DRM_MODE_PANEL_ORIENTATION_NORMAL: The top side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP: The top side of the panel matches the + * bottom side of the device's casing, iow + * the panel is mounted upside-down. + * @DRM_MODE_PANEL_ORIENTATION_LEFT_UP: The left side of the panel matches the + * top side of the device's casing. + * @DRM_MODE_PANEL_ORIENTATION_RIGHT_UP: The right side of the panel matches the + * top side of the device's casing. + */ +enum drm_panel_orientation { + DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1, + DRM_MODE_PANEL_ORIENTATION_NORMAL = 0, + DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, + DRM_MODE_PANEL_ORIENTATION_LEFT_UP, + DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, +}; + +/** * struct drm_display_info - runtime data about the connected sink * * Describes a given display (e.g. CRT or flat panel) and its limitations. For @@ -224,6 +253,15 @@ struct drm_display_info { #define DRM_COLOR_FORMAT_YCRCB420 (1<<3) /** + * @panel_orientation: Read only connector property for built-in panels, + * indicating the orientation of the panel vs the device's casing. + * drm_connector_init() sets this to DRM_MODE_PANEL_ORIENTATION_UNKNOWN. + * When not UNKNOWN this gets used by the drm_fb_helpers to rotate the + * fb to compensate and gets exported as prop to userspace. + */ + int panel_orientation; + + /** * @color_formats: HDMI Color formats, selects between RGB and YCrCb * modes. Used DRM_COLOR_FORMAT\_ defines, which are _not_ the same ones * as used to describe the pixel format in framebuffers, and also don't @@ -271,6 +309,11 @@ struct drm_display_info { bool dvi_dual; /** + * @has_hdmi_infoframe: Does the sink support the HDMI infoframe? + */ + bool has_hdmi_infoframe; + + /** * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even * more stuff redundant with @bus_formats. */ @@ -705,7 +748,6 @@ struct drm_cmdline_mode { * @force: a DRM_FORCE_<foo> state for forced mode sets * @override_edid: has the EDID been overwritten through debugfs for testing? * @encoder_ids: valid encoders for this connector - * @encoder: encoder driving this connector, if any * @eld: EDID-like data, if present * @latency_present: AV delay info from ELD, if found * @video_latency: video latency info from ELD, if found @@ -875,7 +917,13 @@ struct drm_connector { #define DRM_CONNECTOR_MAX_ENCODER 3 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; - struct drm_encoder *encoder; /* currently active encoder */ + /** + * @encoder: Currently bound encoder driving this connector, if any. + * Only really meaningful for non-atomic drivers. Atomic drivers should + * instead look at &drm_connector_state.best_encoder, and in case they + * need the CRTC driving this output, &drm_connector_state.crtc. + */ + struct drm_encoder *encoder; #define MAX_ELD_BYTES 128 /* EDID bits */ @@ -1035,6 +1083,8 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, const struct edid *edid); void drm_mode_connector_set_link_status_property(struct drm_connector *connector, uint64_t link_status); +int drm_connector_init_panel_orientation_property( + struct drm_connector *connector, int width, int height); /** * struct drm_tile_group - Tile group metadata diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index e21af87a2f3c..7c4fa32f3fc6 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -17,6 +17,7 @@ struct drm_vblank_crtc; struct drm_sg_mem; struct drm_local_map; struct drm_vma_offset_manager; +struct drm_fb_helper; struct inode; @@ -185,6 +186,14 @@ struct drm_device { struct drm_vma_offset_manager *vma_offset_manager; /*@} */ int switch_power_state; + + /** + * @fb_helper: + * + * Pointer to the fbdev emulation structure. + * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini(). + */ + struct drm_fb_helper *fb_helper; }; #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 2623a1255481..da58a428c8d7 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -635,6 +635,7 @@ # define DP_SET_POWER_D0 0x1 # define DP_SET_POWER_D3 0x2 # define DP_SET_POWER_MASK 0x3 +# define DP_SET_POWER_D3_AUX_ON 0x5 #define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ # define DP_EDP_11 0x00 diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 412e83a4d3db..d32b688eb346 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -39,6 +39,7 @@ struct drm_minor; struct dma_buf_attachment; struct drm_display_mode; struct drm_mode_create_dumb; +struct drm_printer; /* driver capabilities and requirements mask */ #define DRIVER_USE_AGP 0x1 @@ -429,6 +430,20 @@ struct drm_driver { void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); /** + * @gem_print_info: + * + * If driver subclasses struct &drm_gem_object, it can implement this + * optional hook for printing additional driver specific info. + * + * drm_printf_indent() should be used in the callback passing it the + * indent argument. + * + * This callback is called from drm_gem_print_info(). + */ + void (*gem_print_info)(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + + /** * @gem_create_object: constructor for gem objects * * Hook for allocating the GEM object struct, for use by core @@ -592,13 +607,6 @@ struct drm_driver { int dev_priv_size; }; -__printf(6, 7) -void drm_dev_printk(const struct device *dev, const char *level, - unsigned int category, const char *function_name, - const char *prefix, const char *format, ...); -__printf(3, 4) -void drm_printk(const char *level, unsigned int category, - const char *format, ...); extern unsigned int drm_debug; int drm_dev_init(struct drm_device *dev, diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index efe6d5a8e834..8d89a9c3748d 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -333,7 +333,6 @@ struct drm_encoder; struct drm_connector; struct drm_display_mode; -void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, @@ -357,6 +356,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, bool is_hdmi2_sink); int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, + struct drm_connector *connector, const struct drm_display_mode *mode); void drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index ee4cfbe63c52..fb299696c7c4 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -88,7 +88,6 @@ struct drm_encoder_funcs { * @head: list management * @base: base KMS object * @name: human readable name, can be overwritten by the driver - * @crtc: currently bound CRTC * @bridge: bridge associated to the encoder * @funcs: control functions * @helper_private: mid-layer private data @@ -166,6 +165,11 @@ struct drm_encoder { */ uint32_t possible_clones; + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check + * &drm_connector_state.crtc. + */ struct drm_crtc *crtc; struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index faf56c53df28..d532f88a8d55 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -16,6 +16,13 @@ struct drm_mode_fb_cmd2; struct drm_plane; struct drm_plane_state; +int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev, + unsigned int preferred_bpp, unsigned int max_conn_count, + const struct drm_framebuffer_funcs *funcs); +int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp, + unsigned int max_conn_count); +void drm_fb_cma_fbdev_fini(struct drm_device *dev); + struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev, unsigned int preferred_bpp, unsigned int max_conn_count, const struct drm_framebuffer_funcs *funcs); @@ -36,11 +43,5 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb, struct drm_plane_state *state, unsigned int plane); -#ifdef CONFIG_DEBUG_FS -struct seq_file; - -int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg); -#endif - #endif diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 33fe95927742..b069433e7fc1 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -33,6 +33,7 @@ struct drm_fb_helper; #include <drm/drm_crtc.h> +#include <drm/drm_device.h> #include <linux/kgdb.h> enum mode_set_atomic { @@ -48,6 +49,7 @@ struct drm_fb_helper_crtc { struct drm_mode_set mode_set; struct drm_display_mode *desired_mode; int x, y; + int rotation; }; /** @@ -159,6 +161,13 @@ struct drm_fb_helper { int connector_count; int connector_info_alloc_count; /** + * @sw_rotations: + * Bitmask of all rotations requested for panel-orientation which + * could not be handled in hardware. If only one bit is set + * fbdev->fbcon_rotate_hint gets set to the requested rotation. + */ + int sw_rotations; + /** * @connector_info: * * Array of per-connector information. Do not iterate directly, but use @@ -267,6 +276,7 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagelist); +int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper); ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos); @@ -310,6 +320,16 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn); int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); + +int drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count); +void drm_fb_helper_fbdev_teardown(struct drm_device *dev); + +void drm_fb_helper_lastclose(struct drm_device *dev); +void drm_fb_helper_output_poll_changed(struct drm_device *dev); #else static inline void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, @@ -321,11 +341,17 @@ static inline int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper, int max_conn) { + /* So drivers can use it to free the struct */ + helper->dev = dev; + dev->fb_helper = helper; + return 0; } static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) { + if (helper && helper->dev) + helper->dev->fb_helper = NULL; } static inline int drm_fb_helper_blank(int blank, struct fb_info *info) @@ -398,6 +424,11 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info, { } +static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) +{ + return -ENODEV; +} + static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) @@ -507,6 +538,32 @@ drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, return 0; } +static inline int +drm_fb_helper_fbdev_setup(struct drm_device *dev, + struct drm_fb_helper *fb_helper, + const struct drm_fb_helper_funcs *funcs, + unsigned int preferred_bpp, + unsigned int max_conn_count) +{ + /* So drivers can use it to free the struct */ + dev->fb_helper = fb_helper; + + return 0; +} + +static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev) +{ + dev->fb_helper = NULL; +} + +static inline void drm_fb_helper_lastclose(struct drm_device *dev) +{ +} + +static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) +{ +} + #endif static inline int diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 0e0c868451a5..5176c3797680 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -364,7 +364,7 @@ int drm_open(struct inode *inode, struct file *filp); ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); int drm_release(struct inode *inode, struct file *filp); -unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); +__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait); int drm_event_reserve_init_locked(struct drm_device *dev, struct drm_file *file_priv, struct drm_pending_event *p, diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 4c5ee4ae54df..c50502c656e5 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -121,6 +121,12 @@ struct drm_framebuffer { * @base: base modeset object structure, contains the reference count. */ struct drm_mode_object base; + + /** + * @comm: Name of the process allocating the fb, used for fb dumping. + */ + char comm[TASK_COMM_LEN]; + /** * @format: framebuffer format information */ @@ -264,7 +270,7 @@ static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb) * * This functions returns the framebuffer's reference count. */ -static inline uint32_t drm_framebuffer_read_refcount(struct drm_framebuffer *fb) +static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb) { return kref_read(&fb->base.refcount); } diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 520e3feb502c..19777145cf8e 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -9,7 +9,9 @@ * struct drm_gem_cma_object - GEM object backed by CMA memory allocations * @base: base GEM object * @paddr: physical address of the backing memory - * @sgt: scatter/gather table for imported PRIME buffers + * @sgt: scatter/gather table for imported PRIME buffers. The table can have + * more than one entry but they are guaranteed to have contiguous + * DMA addresses. * @vaddr: kernel virtual address of the backing memory */ struct drm_gem_cma_object { @@ -21,11 +23,8 @@ struct drm_gem_cma_object { void *vaddr; }; -static inline struct drm_gem_cma_object * -to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) -{ - return container_of(gem_obj, struct drm_gem_cma_object, base); -} +#define to_drm_gem_cma_obj(gem_obj) \ + container_of(gem_obj, struct drm_gem_cma_object, base) #ifndef CONFIG_MMU #define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ @@ -91,9 +90,8 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, unsigned long flags); #endif -#ifdef CONFIG_DEBUG_FS -void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); -#endif +void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object * diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 8d10fc97801c..101f566ae43d 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -386,7 +386,7 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, * @color: opaque tag value to use for this node * @mode: fine-tune the allocation search and placement * - * This is a simplified version of drm_mm_insert_node_in_range_generic() with no + * This is a simplified version of drm_mm_insert_node_in_range() with no * range restrictions applied. * * The preallocated node must be cleared to 0. diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index b0ce26d71296..2cb6f02df64a 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -269,6 +269,9 @@ struct drm_mode_config_funcs { * state easily. If this hook is implemented, drivers must also * implement @atomic_state_clear and @atomic_state_free. * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. + * * RETURNS: * * A new &drm_atomic_state on success or NULL on failure. @@ -290,6 +293,9 @@ struct drm_mode_config_funcs { * * Drivers that implement this must call drm_atomic_state_default_clear() * to clear common state. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. */ void (*atomic_state_clear)(struct drm_atomic_state *state); @@ -302,6 +308,9 @@ struct drm_mode_config_funcs { * * Drivers that implement this must call * drm_atomic_state_default_release() to release common resources. + * + * Subclassing of &drm_atomic_state is deprecated in favour of using + * &drm_private_state and &drm_private_obj. */ void (*atomic_state_free)(struct drm_atomic_state *state); }; @@ -751,6 +760,13 @@ struct drm_mode_config { */ struct drm_property *non_desktop_property; + /** + * @panel_orientation_property: Optional connector property indicating + * how the lcd-panel is mounted inside the casing (e.g. normal or + * upside-down). + */ + struct drm_property *panel_orientation_property; + /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; @@ -768,7 +784,7 @@ struct drm_mode_config { bool allow_fb_modifiers; /** - * @modifiers: Plane property to list support modifier/format + * @modifiers_property: Plane property to list support modifier/format * combination. */ struct drm_property *modifiers_property; @@ -776,6 +792,15 @@ struct drm_mode_config { /* cursor size */ uint32_t cursor_width, cursor_height; + /** + * @suspend_state: + * + * Atomic state when suspended. + * Set by drm_mode_config_helper_suspend() and cleared by + * drm_mode_config_helper_resume(). + */ + struct drm_atomic_state *suspend_state; + const struct drm_mode_config_helper_funcs *helper_private; }; diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h index cb0ec92e11e6..efa337f03129 100644 --- a/include/drm/drm_modeset_helper.h +++ b/include/drm/drm_modeset_helper.h @@ -34,4 +34,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_device *dev, int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs); +int drm_mode_config_helper_suspend(struct drm_device *dev); +int drm_mode_config_helper_resume(struct drm_device *dev); + #endif diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 16646c44b7df..3e76ca805b0f 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -801,9 +801,6 @@ struct drm_connector_helper_funcs { * resolution can call drm_add_modes_noedid(), and mark the preferred * one using drm_set_preferred_mode(). * - * Finally drivers that support audio probably want to update the ELD - * data, too, using drm_edid_to_eld(). - * * This function is only called after the @detect hook has indicated * that a sink is connected and when the EDID isn't overridden through * sysfs or the kernel commandline. diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 571615079230..8185e3468a23 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -474,8 +474,8 @@ enum drm_plane_type { * @format_types: array of formats supported by this plane * @format_count: number of formats supported * @format_default: driver hasn't supplied supported formats for the plane - * @crtc: currently bound CRTC - * @fb: currently bound fb + * @modifiers: array of modifiers supported by this plane + * @modifier_count: number of modifiers supported * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by * drm_mode_set_config_internal() to implement correct refcounting. * @funcs: helper functions @@ -512,7 +512,17 @@ struct drm_plane { uint64_t *modifiers; unsigned int modifier_count; + /** + * @crtc: Currently bound CRTC, only really meaningful for non-atomic + * drivers. Atomic drivers should instead check &drm_plane_state.crtc. + */ struct drm_crtc *crtc; + + /** + * @fb: Currently bound framebuffer, only really meaningful for + * non-atomic drivers. Atomic drivers should instead check + * &drm_plane_state.fb. + */ struct drm_framebuffer *fb; struct drm_framebuffer *old_fb; diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 7c8a00ceadb7..8aa49c0ecd4d 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -38,11 +38,6 @@ */ #define DRM_PLANE_HELPER_NO_SCALING (1<<16) -int drm_plane_helper_check_state(struct drm_plane_state *state, - const struct drm_rect *clip, - int min_scale, int max_scale, - bool can_position, - bool can_update_disabled); int drm_plane_helper_check_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index ca4d7c6321f2..2a4a42e59a47 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -80,6 +80,29 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf); __printf(2, 3) void drm_printf(struct drm_printer *p, const char *f, ...); +__printf(2, 0) +/** + * drm_vprintf - print to a &drm_printer stream + * @p: the &drm_printer + * @fmt: format string + * @va: the va_list + */ +static inline void +drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) +{ + struct va_format vaf = { .fmt = fmt, .va = va }; + + p->printfn(p, &vaf); +} + +/** + * drm_printf_indent - Print to a &drm_printer stream with indentation + * @printer: DRM printer + * @indent: Tab indentation level (max 5) + * @fmt: Format string + */ +#define drm_printf_indent(printer, indent, fmt, ...) \ + drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) /** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file @@ -128,4 +151,200 @@ static inline struct drm_printer drm_debug_printer(const char *prefix) }; return p; } + +/* + * The following categories are defined: + * + * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... + * This is the category used by the DRM_DEBUG() macro. + * + * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... + * This is the category used by the DRM_DEBUG_DRIVER() macro. + * + * KMS: used in the modesetting code. + * This is the category used by the DRM_DEBUG_KMS() macro. + * + * PRIME: used in the prime code. + * This is the category used by the DRM_DEBUG_PRIME() macro. + * + * ATOMIC: used in the atomic code. + * This is the category used by the DRM_DEBUG_ATOMIC() macro. + * + * VBL: used for verbose debug message in the vblank code + * This is the category used by the DRM_DEBUG_VBL() macro. + * + * Enabling verbose debug messages is done through the drm.debug parameter, + * each category being enabled by a bit. + * + * drm.debug=0x1 will enable CORE messages + * drm.debug=0x2 will enable DRIVER messages + * drm.debug=0x3 will enable CORE and DRIVER messages + * ... + * drm.debug=0x3f will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node: + * # echo 0xf > /sys/module/drm/parameters/debug + */ +#define DRM_UT_NONE 0x00 +#define DRM_UT_CORE 0x01 +#define DRM_UT_DRIVER 0x02 +#define DRM_UT_KMS 0x04 +#define DRM_UT_PRIME 0x08 +#define DRM_UT_ATOMIC 0x10 +#define DRM_UT_VBL 0x20 +#define DRM_UT_STATE 0x40 +#define DRM_UT_LEASE 0x80 + +__printf(6, 7) +void drm_dev_printk(const struct device *dev, const char *level, + unsigned int category, const char *function_name, + const char *prefix, const char *format, ...); +__printf(3, 4) +void drm_printk(const char *level, unsigned int category, + const char *format, ...); + +/* Macros to make printk easier */ + +#define _DRM_PRINTK(once, level, fmt, ...) \ + do { \ + printk##once(KERN_##level "[" DRM_NAME "] " fmt, \ + ##__VA_ARGS__); \ + } while (0) + +#define DRM_INFO(fmt, ...) \ + _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE(fmt, ...) \ + _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN(fmt, ...) \ + _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) + +#define DRM_INFO_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) + +/** + * Error output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\ + fmt, ##__VA_ARGS__) +#define DRM_ERROR(fmt, ...) \ + drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__) + +/** + * Rate limited error output. Like DRM_ERROR() but won't flood the log. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ +}) +#define DRM_ERROR_RATELIMITED(fmt, ...) \ + DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) + +#define DRM_DEV_INFO(dev, fmt, ...) \ + drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \ + ##__VA_ARGS__) + +#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ +({ \ + static bool __print_once __read_mostly; \ + if (!__print_once) { \ + __print_once = true; \ + DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ + } \ +}) + +/** + * Debug output. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_DRIVER(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG_KMS(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_PRIME(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \ + fmt, ##args) +#define DRM_DEBUG_ATOMIC(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) + +#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \ + ##args) +#define DRM_DEBUG_VBL(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__) + +#define DRM_DEBUG_LEASE(fmt, ...) \ + drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__) + +#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + if (__ratelimit(&_rs)) \ + drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \ + __func__, "", fmt, ##args); \ +}) + +/** + * Rate limited debug output. Like DRM_DEBUG() but won't flood the log. + * + * @dev: device pointer + * @fmt: printf() like format string. + */ +#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \ + DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args) +#define DRM_DEBUG_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args) +#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args) +#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args) +#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \ + _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args) +#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ + DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) + #endif /* DRM_PRINT_H_ */ diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 43e2f382d2f0..3980602472c0 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -33,36 +33,31 @@ struct drm_syncobj_cb; /** * struct drm_syncobj - sync object. * - * This structure defines a generic sync object which wraps a dma fence. + * This structure defines a generic sync object which wraps a &dma_fence. */ struct drm_syncobj { /** - * @refcount: - * - * Reference count of this object. + * @refcount: Reference count of this object. */ struct kref refcount; /** * @fence: * NULL or a pointer to the fence bound to this object. * - * This field should not be used directly. Use drm_syncobj_fence_get - * and drm_syncobj_replace_fence instead. + * This field should not be used directly. Use drm_syncobj_fence_get() + * and drm_syncobj_replace_fence() instead. */ - struct dma_fence *fence; + struct dma_fence __rcu *fence; /** - * @cb_list: - * List of callbacks to call when the fence gets replaced + * @cb_list: List of callbacks to call when the &fence gets replaced. */ struct list_head cb_list; /** - * @lock: - * locks cb_list and write-locks fence. + * @lock: Protects &cb_list and write-locks &fence. */ spinlock_t lock; /** - * @file: - * a file backing for this syncobj. + * @file: A file backing for this syncobj. */ struct file *file; }; @@ -73,7 +68,7 @@ typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj, /** * struct drm_syncobj_cb - callback for drm_syncobj_add_callback * @node: used by drm_syncob_add_callback to append this struct to - * syncobj::cb_list + * &drm_syncobj.cb_list * @func: drm_syncobj_func_t to call * * This struct will be initialized by drm_syncobj_add_callback, additional @@ -92,7 +87,7 @@ void drm_syncobj_free(struct kref *kref); * drm_syncobj_get - acquire a syncobj reference * @obj: sync object * - * This acquires additional reference to @obj. It is illegal to call this + * This acquires an additional reference to @obj. It is illegal to call this * without already holding a reference. No locks required. */ static inline void @@ -111,6 +106,17 @@ drm_syncobj_put(struct drm_syncobj *obj) kref_put(&obj->refcount, drm_syncobj_free); } +/** + * drm_syncobj_fence_get - get a reference to a fence in a sync object + * @syncobj: sync object. + * + * This acquires additional reference to &drm_syncobj.fence contained in @obj, + * if not NULL. It is illegal to call this without already holding a reference. + * No locks required. + * + * Returns: + * Either the fence of @obj or NULL if there's none. + */ static inline struct dma_fence * drm_syncobj_fence_get(struct drm_syncobj *syncobj) { diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h new file mode 100644 index 000000000000..a803988d8579 --- /dev/null +++ b/include/drm/drm_utils.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Function prototypes for misc. drm utility functions. + * Specifically this file is for function prototypes for functions which + * may also be used outside of drm code (e.g. in fbdev drivers). + * + * Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com> + */ + +#ifndef __DRM_UTILS_H__ +#define __DRM_UTILS_H__ + +int drm_get_panel_orientation_quirk(int width, int height); + +#endif diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index d84d52f6d2b1..8758df94e9a0 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -152,7 +152,7 @@ static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) * Start address of @node for page-based addressing. 0 if the node does not * have an offset allocated. */ -static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node) +static inline unsigned long drm_vma_node_start(const struct drm_vma_offset_node *node) { return node->vm_node.start; } diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000000..dfd54fb94e10 --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,173 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include <drm/spsc_queue.h> +#include <linux/dma-fence.h> + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH_SW, + DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_MAX, + DRM_SCHED_PRIORITY_INVALID = -1, + DRM_SCHED_PRIORITY_UNSET = -2 +}; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct drm_sched_entity { + struct list_head list; + struct drm_sched_rq *rq; + spinlock_t rq_lock; + struct drm_gpu_scheduler *sched; + + spinlock_t queue_lock; + struct spsc_queue job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct dma_fence *dependency; + struct dma_fence_cb cb; + atomic_t *guilty; /* points to ctx's guilty */ +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct drm_sched_rq { + spinlock_t lock; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +struct drm_sched_fence { + struct dma_fence scheduled; + struct dma_fence finished; + struct dma_fence_cb cb; + struct dma_fence *parent; + struct drm_gpu_scheduler *sched; + spinlock_t lock; + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +struct drm_sched_job { + struct spsc_node queue_node; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct dma_fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return (s_job && atomic_inc_return(&s_job->karma) > threshold); +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct drm_sched_backend_ops { + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + void (*timedout_job)(struct drm_sched_job *sched_job); + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct task_struct *thread; + struct list_head ring_mirror_list; + spinlock_t job_list_lock; + int hang_limit; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, long timeout, + const char *name); +void drm_sched_fini(struct drm_gpu_scheduler *sched); + +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty); +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity); +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq); + +struct drm_sched_fence *drm_sched_fence_create( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +#endif diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h new file mode 100644 index 000000000000..0789e8d0a0e1 --- /dev/null +++ b/include/drm/gpu_scheduler_trace.h @@ -0,0 +1,82 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include <drm/drmP.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_scheduler +#define TRACE_INCLUDE_FILE gpu_scheduler_trace + +TRACE_EVENT(drm_sched_job, + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), + TP_ARGS(sched_job, entity), + TP_STRUCT__entry( + __field(struct drm_sched_entity *, entity) + __field(struct dma_fence *, fence) + __field(const char *, name) + __field(uint64_t, id) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = entity; + __entry->id = sched_job->id; + __entry->fence = &sched_job->s_fence->finished; + __entry->name = sched_job->sched->name; + __entry->job_count = spsc_queue_count(&entity->job_queue); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) +); + +TRACE_EVENT(drm_sched_process_job, + TP_PROTO(struct drm_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct dma_fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->finished; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 4e1b274e1164..c9e5a6621b95 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -36,6 +36,9 @@ extern bool i915_gpu_lower(void); extern bool i915_gpu_busy(void); extern bool i915_gpu_turbo_disable(void); +/* Exported from arch/x86/kernel/early-quirks.c */ +extern struct resource intel_graphics_stolen_res; + /* * The Bridge device's PCI config space has information about the * fb aperture size and the amount of pre-reserved memory. diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 972a25633525..5db0458dd832 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -373,24 +373,46 @@ /* CFL S */ #define INTEL_CFL_S_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \ - INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */ + INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */ #define INTEL_CFL_S_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */ + INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ /* CFL H */ #define INTEL_CFL_H_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \ INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */ -/* CFL U */ +/* CFL U GT1 */ +#define INTEL_CFL_U_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA1, info), \ + INTEL_VGA_DEVICE(0x3EA4, info) + +/* CFL U GT2 */ +#define INTEL_CFL_U_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA0, info), \ + INTEL_VGA_DEVICE(0x3EA3, info), \ + INTEL_VGA_DEVICE(0x3EA9, info) + +/* CFL U GT3 */ #define INTEL_CFL_U_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */ + INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */ + +#define INTEL_CFL_IDS(info) \ + INTEL_CFL_S_GT1_IDS(info), \ + INTEL_CFL_S_GT2_IDS(info), \ + INTEL_CFL_H_GT2_IDS(info), \ + INTEL_CFL_U_GT1_IDS(info), \ + INTEL_CFL_U_GT2_IDS(info), \ + INTEL_CFL_U_GT3_IDS(info) /* CNL U 2+2 */ #define INTEL_CNL_U_GT2_IDS(info) \ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index c5db7975c640..2324c84a25c0 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -5,9 +5,8 @@ #define _DRM_INTEL_GTT_H void intel_gtt_get(u64 *gtt_total, - u32 *stolen_size, phys_addr_t *mappable_base, - u64 *mappable_end); + resource_size_t *mappable_end); int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000000..125f096c88cb --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include <linux/atomic.h> +#include <linux/preempt.h> + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h index 83346ddb9dba..5d0e82b36eaf 100644 --- a/include/drm/tinydrm/mipi-dbi.h +++ b/include/drm/tinydrm/mipi-dbi.h @@ -72,10 +72,12 @@ void mipi_dbi_pipe_enable(struct drm_simple_display_pipe *pipe, void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe); void mipi_dbi_hw_reset(struct mipi_dbi *mipi); bool mipi_dbi_display_is_on(struct mipi_dbi *mipi); +u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len); int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val); int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len); - +int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, + struct drm_clip_rect *clip, bool swap); /** * mipi_dbi_command - MIPI DCS command with optional parameter(s) * @mipi: MIPI structure diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h index 4774fe3d4273..07a9a11fe19d 100644 --- a/include/drm/tinydrm/tinydrm.h +++ b/include/drm/tinydrm/tinydrm.h @@ -19,16 +19,12 @@ * @drm: DRM device * @pipe: Display pipe structure * @dirty_lock: Serializes framebuffer flushing - * @fbdev_cma: CMA fbdev structure - * @suspend_state: Atomic state when suspended * @fb_funcs: Framebuffer functions used when creating framebuffers */ struct tinydrm_device { struct drm_device *drm; struct drm_simple_display_pipe pipe; struct mutex dirty_lock; - struct drm_fbdev_cma *fbdev_cma; - struct drm_atomic_state *suspend_state; const struct drm_framebuffer_funcs *fb_funcs; }; @@ -46,6 +42,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) */ #define TINYDRM_GEM_DRIVER_OPS \ .gem_free_object = tinydrm_gem_cma_free_object, \ + .gem_print_info = drm_gem_cma_print_info, \ .gem_vm_ops = &drm_gem_cma_vm_ops, \ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ @@ -81,7 +78,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) .type = DRM_MODE_TYPE_DRIVER, \ .clock = 1 /* pass validation */ -void tinydrm_lastclose(struct drm_device *drm); void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); struct drm_gem_object * tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, @@ -92,8 +88,6 @@ int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, struct drm_driver *driver); int devm_tinydrm_register(struct tinydrm_device *tdev); void tinydrm_shutdown(struct tinydrm_device *tdev); -int tinydrm_suspend(struct tinydrm_device *tdev); -int tinydrm_resume(struct tinydrm_device *tdev); void tinydrm_display_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index fa07be197945..2cd025c2abe7 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -224,7 +224,6 @@ struct ttm_buffer_object { */ uint64_t offset; /* GPU address space is independent of CPU word size */ - uint32_t cur_placement; struct sg_table *sg; @@ -260,6 +259,25 @@ struct ttm_bo_kmap_obj { }; /** + * struct ttm_operation_ctx + * + * @interruptible: Sleep interruptible if sleeping. + * @no_wait_gpu: Return immediately if the GPU is busy. + * @allow_reserved_eviction: Allow eviction of reserved BOs. + * @resv: Reservation object to allow reserved evictions with. + * + * Context for TTM operations like changing buffer placement or general memory + * allocation. + */ +struct ttm_operation_ctx { + bool interruptible; + bool no_wait_gpu; + bool allow_reserved_eviction; + struct reservation_object *resv; + uint64_t bytes_moved; +}; + +/** * ttm_bo_reference - reference a struct ttm_buffer_object * * @bo: The buffer object. @@ -288,8 +306,7 @@ ttm_bo_reference(struct ttm_buffer_object *bo) * Returns -EBUSY if no_wait is true and the buffer is busy. * Returns -ERESTARTSYS if interrupted by a signal. */ -extern int ttm_bo_wait(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait); +int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); /** * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo @@ -300,17 +317,15 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, * * Returns true if the placement is compatible */ -extern bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, - uint32_t *new_flags); +bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem, + uint32_t *new_flags); /** * ttm_bo_validate * * @bo: The buffer object. * @placement: Proposed placement for the buffer object. - * @interruptible: Sleep interruptible if sleeping. - * @no_wait_gpu: Return immediately if the GPU is busy. + * @ctx: validation parameters. * * Changes placement and caching policy of the buffer object * according proposed placement. @@ -320,10 +335,9 @@ extern bool ttm_bo_mem_compat(struct ttm_placement *placement, * -EBUSY if no_wait is true and buffer busy. * -ERESTARTSYS if interrupted by a signal. */ -extern int ttm_bo_validate(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - bool interruptible, - bool no_wait_gpu); +int ttm_bo_validate(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_operation_ctx *ctx); /** * ttm_bo_unref @@ -332,7 +346,7 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, * * Unreference and clear a pointer to a buffer object. */ -extern void ttm_bo_unref(struct ttm_buffer_object **bo); +void ttm_bo_unref(struct ttm_buffer_object **bo); /** * ttm_bo_add_to_lru @@ -344,7 +358,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo); * This function must be called with struct ttm_bo_global::lru_lock held, and * is typically called immediately prior to unreserving a bo. */ -extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); /** * ttm_bo_del_from_lru @@ -356,7 +370,7 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); * and is usually called just immediately after the bo has been reserved to * avoid recursive reservation from lru lists. */ -extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); +void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); /** * ttm_bo_move_to_lru_tail @@ -367,7 +381,7 @@ extern void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); * object. This function must be called with struct ttm_bo_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ -extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); /** * ttm_bo_lock_delayed_workqueue @@ -376,15 +390,14 @@ extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); * Returns * True if the workqueue was queued at the time */ -extern int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); +int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); /** * ttm_bo_unlock_delayed_workqueue * * Allows the delayed workqueue to run. */ -extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, - int resched); +void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); /** * ttm_bo_eviction_valuable @@ -411,8 +424,7 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * -EBUSY if the buffer is busy and no_wait is true. * -ERESTARTSYS if interrupted by a signal. */ -extern int -ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); +int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); /** * ttm_bo_synccpu_write_release: @@ -421,7 +433,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait); * * Releases a synccpu lock. */ -extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); +void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); /** * ttm_bo_acc_size @@ -448,8 +460,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * @type: Requested type of buffer object. * @flags: Initial placement flags. * @page_alignment: Data alignment in pages. - * @interruptible: If needing to sleep to wait for GPU resources, - * sleep interruptible. + * @ctx: TTM operation context for memory allocation. * @persistent_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member * holds a pointer to a persistent shmem object. Typically, this would @@ -480,18 +491,18 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ -extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev, - struct ttm_buffer_object *bo, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interrubtible, - struct file *persistent_swap_storage, - size_t acc_size, - struct sg_table *sg, - struct reservation_object *resv, - void (*destroy) (struct ttm_buffer_object *)); +int ttm_bo_init_reserved(struct ttm_bo_device *bdev, + struct ttm_buffer_object *bo, + unsigned long size, + enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, + struct ttm_operation_ctx *ctx, + struct file *persistent_swap_storage, + size_t acc_size, + struct sg_table *sg, + struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_init @@ -531,19 +542,13 @@ extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. */ - -extern int ttm_bo_init(struct ttm_bo_device *bdev, - struct ttm_buffer_object *bo, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interrubtible, - struct file *persistent_swap_storage, - size_t acc_size, - struct sg_table *sg, - struct reservation_object *resv, - void (*destroy) (struct ttm_buffer_object *)); +int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, + unsigned long size, enum ttm_bo_type type, + struct ttm_placement *placement, + uint32_t page_alignment, bool interrubtible, + struct file *persistent_swap_storage, size_t acc_size, + struct sg_table *sg, struct reservation_object *resv, + void (*destroy) (struct ttm_buffer_object *)); /** * ttm_bo_create @@ -569,15 +574,11 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev, * -EINVAL: Invalid placement flags. * -ERESTARTSYS: Interrupted by signal while waiting for resources. */ - -extern int ttm_bo_create(struct ttm_bo_device *bdev, - unsigned long size, - enum ttm_bo_type type, - struct ttm_placement *placement, - uint32_t page_alignment, - bool interruptible, - struct file *persistent_swap_storage, - struct ttm_buffer_object **p_bo); +int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, + enum ttm_bo_type type, struct ttm_placement *placement, + uint32_t page_alignment, bool interruptible, + struct file *persistent_swap_storage, + struct ttm_buffer_object **p_bo); /** * ttm_bo_init_mm @@ -594,9 +595,9 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev, * -ENOMEM: Not enough memory. * May also return driver-specified errors. */ +int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, + unsigned long p_size); -extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, - unsigned long p_size); /** * ttm_bo_clean_mm * @@ -623,8 +624,7 @@ extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, * -EINVAL: invalid or uninitialized memory type. * -EBUSY: There are still buffers left in this memory type. */ - -extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); +int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); /** * ttm_bo_evict_mm @@ -644,8 +644,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type); * -ERESTARTSYS: The call was interrupted by a signal while waiting to * evict a buffer. */ - -extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); /** * ttm_kmap_obj_virtual @@ -658,7 +657,6 @@ extern int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); * If *is_iomem is 1 on return, the virtual address points to an io memory area, * that should strictly be accessed by the iowriteXX() and similar functions. */ - static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, bool *is_iomem) { @@ -682,9 +680,8 @@ static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ - -extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, - unsigned long num_pages, struct ttm_bo_kmap_obj *map); +int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct ttm_bo_kmap_obj *map); /** * ttm_bo_kunmap @@ -693,8 +690,7 @@ extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, * * Unmaps a kernel map set up by ttm_bo_kmap. */ - -extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); +void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); /** * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object. @@ -706,20 +702,7 @@ extern void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); * This function is intended to be called by the fbdev mmap method * if the fbdev address space is to be backed by a bo. */ - -extern int ttm_fbdev_mmap(struct vm_area_struct *vma, - struct ttm_buffer_object *bo); - -/** - * ttm_bo_default_iomem_pfn - get a pfn for a page offset - * - * @bo: the BO we need to look up the pfn for - * @page_offset: offset inside the BO to look up. - * - * Calculate the PFN for iomem based mappings during page fault - */ -unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, - unsigned long page_offset); +int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo); /** * ttm_bo_mmap - mmap out of the ttm device address space. @@ -731,9 +714,8 @@ unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, * This function is intended to be called by the device mmap method. * if the device address space is to be backed by the bo manager. */ - -extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, - struct ttm_bo_device *bdev); +int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, + struct ttm_bo_device *bdev); /** * ttm_bo_io @@ -755,11 +737,12 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, * the function may return -ERESTARTSYS if * interrupted by a signal. */ +ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, + const char __user *wbuf, char __user *rbuf, + size_t count, loff_t *f_pos, bool write); -extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, - const char __user *wbuf, char __user *rbuf, - size_t count, loff_t *f_pos, bool write); - -extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); -extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); +int ttm_bo_swapout(struct ttm_bo_global *glob, + struct ttm_operation_ctx *ctx); +void ttm_bo_swapout_all(struct ttm_bo_device *bdev); +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); #endif diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 5f821a9b3a1f..94064b126e8e 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -352,7 +352,8 @@ struct ttm_bo_driver { * Returns: * -ENOMEM: Out of memory. */ - int (*ttm_tt_populate)(struct ttm_tt *ttm); + int (*ttm_tt_populate)(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate @@ -409,15 +410,13 @@ struct ttm_bo_driver { * @bo: the buffer to move * @evict: whether this motion is evicting the buffer from * the graphics address space - * @interruptible: Use interruptible sleeps if possible when sleeping. - * @no_wait: whether this should give up and return -EBUSY - * if this move would require sleeping + * @ctx: context for this move with parameters * @new_mem: the new memory region receiving the buffer * * Move a buffer between two memory regions. */ int (*move)(struct ttm_buffer_object *bo, bool evict, - bool interruptible, bool no_wait_gpu, + struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem); /** @@ -524,7 +523,6 @@ struct ttm_bo_global { struct kobject kobj; struct ttm_mem_global *mem_glob; struct page *dummy_read_page; - struct ttm_mem_shrink shrink; struct mutex device_list_mutex; spinlock_t lru_lock; @@ -627,12 +625,12 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask) * Returns: * NULL: Out of memory. */ -extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); -extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); +int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); /** * ttm_tt_fini @@ -641,8 +639,8 @@ extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bde * * Free memory of ttm_tt structure */ -extern void ttm_tt_fini(struct ttm_tt *ttm); -extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); +void ttm_tt_fini(struct ttm_tt *ttm); +void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); /** * ttm_ttm_bind: @@ -652,7 +650,8 @@ extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); * * Bind the pages of @ttm to an aperture location identified by @bo_mem */ -extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); +int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem, + struct ttm_operation_ctx *ctx); /** * ttm_ttm_destroy: @@ -661,7 +660,7 @@ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); * * Unbind, unpopulate and destroy common struct ttm_tt. */ -extern void ttm_tt_destroy(struct ttm_tt *ttm); +void ttm_tt_destroy(struct ttm_tt *ttm); /** * ttm_ttm_unbind: @@ -670,7 +669,7 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm); * * Unbind a struct ttm_tt. */ -extern void ttm_tt_unbind(struct ttm_tt *ttm); +void ttm_tt_unbind(struct ttm_tt *ttm); /** * ttm_tt_swapin: @@ -679,7 +678,7 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm); * * Swap in a previously swap out ttm_tt. */ -extern int ttm_tt_swapin(struct ttm_tt *ttm); +int ttm_tt_swapin(struct ttm_tt *ttm); /** * ttm_tt_set_placement_caching: @@ -694,9 +693,8 @@ extern int ttm_tt_swapin(struct ttm_tt *ttm); * hit RAM. This function may be very costly as it involves global TLB * and cache flushes and potential page splitting / combining. */ -extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); -extern int ttm_tt_swapout(struct ttm_tt *ttm, - struct file *persistent_swap_storage); +int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); +int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage); /** * ttm_tt_unpopulate - free pages from a ttm @@ -705,7 +703,7 @@ extern int ttm_tt_swapout(struct ttm_tt *ttm, * * Calls the driver method to free all pages from a ttm */ -extern void ttm_tt_unpopulate(struct ttm_tt *ttm); +void ttm_tt_unpopulate(struct ttm_tt *ttm); /* * ttm_bo.c @@ -720,8 +718,7 @@ extern void ttm_tt_unpopulate(struct ttm_tt *ttm); * Returns true if the memory described by @mem is PCI memory, * false otherwise. */ -extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); +bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); /** * ttm_bo_mem_space @@ -742,21 +739,19 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, * fragmentation or concurrent allocators. * -ERESTARTSYS: An interruptible sleep was interrupted by a signal. */ -extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, - struct ttm_placement *placement, - struct ttm_mem_reg *mem, - bool interruptible, - bool no_wait_gpu); +int ttm_bo_mem_space(struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem, + struct ttm_operation_ctx *ctx); -extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, +void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); +void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); -extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem); -extern void ttm_bo_global_release(struct drm_global_reference *ref); -extern int ttm_bo_global_init(struct drm_global_reference *ref); +void ttm_bo_global_release(struct drm_global_reference *ref); +int ttm_bo_global_init(struct drm_global_reference *ref); -extern int ttm_bo_device_release(struct ttm_bo_device *bdev); +int ttm_bo_device_release(struct ttm_bo_device *bdev); /** * ttm_bo_device_init @@ -773,18 +768,17 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev); * Returns: * !0: Failure. */ -extern int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_bo_global *glob, - struct ttm_bo_driver *driver, - struct address_space *mapping, - uint64_t file_page_offset, bool need_dma32); +int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, + struct ttm_bo_driver *driver, + struct address_space *mapping, + uint64_t file_page_offset, bool need_dma32); /** * ttm_bo_unmap_virtual * * @bo: tear down the virtual mappings for this BO */ -extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); /** * ttm_bo_unmap_virtual @@ -793,16 +787,15 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); * * The caller must take ttm_mem_io_lock before calling this function. */ -extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); -extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); -extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); -extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, - bool interruptible); -extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); +int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); +void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); +int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); +void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); -extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); -extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); +void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo); +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); /** * __ttm_bo_reserve: @@ -836,14 +829,14 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, if (WARN_ON(ticket)) return -EBUSY; - success = ww_mutex_trylock(&bo->resv->lock); + success = reservation_object_trylock(bo->resv); return success ? 0 : -EBUSY; } if (interruptible) - ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); + ret = reservation_object_lock_interruptible(bo->resv, ticket); else - ret = ww_mutex_lock(&bo->resv->lock, ticket); + ret = reservation_object_lock(bo->resv, ticket); if (ret == -EINTR) return -ERESTARTSYS; return ret; @@ -941,18 +934,6 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, } /** - * __ttm_bo_unreserve - * @bo: A pointer to a struct ttm_buffer_object. - * - * Unreserve a previous reservation of @bo where the buffer object is - * already on lru lists. - */ -static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) -{ - ww_mutex_unlock(&bo->resv->lock); -} - -/** * ttm_bo_unreserve * * @bo: A pointer to a struct ttm_buffer_object. @@ -966,20 +947,7 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) ttm_bo_add_to_lru(bo); spin_unlock(&bo->glob->lru_lock); } - __ttm_bo_unreserve(bo); -} - -/** - * ttm_bo_unreserve_ticket - * @bo: A pointer to a struct ttm_buffer_object. - * @ticket: ww_acquire_ctx used for reserving - * - * Unreserve a previous reservation of @bo made with @ticket. - */ -static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, - struct ww_acquire_ctx *t) -{ - ttm_bo_unreserve(bo); + reservation_object_unlock(bo->resv); } /* @@ -1008,9 +976,9 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, * !0: Failure. */ -extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_ttm(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); /** * ttm_bo_move_memcpy @@ -1030,9 +998,9 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, * !0: Failure. */ -extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx, + struct ttm_mem_reg *new_mem); /** * ttm_bo_free_old_node @@ -1041,7 +1009,7 @@ extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * * Utility function to free an old placement after a successful move. */ -extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); +void ttm_bo_free_old_node(struct ttm_buffer_object *bo); /** * ttm_bo_move_accel_cleanup. @@ -1058,10 +1026,9 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * destroyed when the move is complete. This will help pipeline * buffer moves. */ - -extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_mem_reg *new_mem); +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, bool evict, + struct ttm_mem_reg *new_mem); /** * ttm_bo_pipeline_move. @@ -1087,7 +1054,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, * Utility function that returns the pgprot_t that should be used for * setting up a PTE with the caching model indicated by @c_state. */ -extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); +pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; @@ -1108,11 +1075,11 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; * for TT memory. This function uses the linux agpgart interface to * bind and unbind memory backing a ttm_tt. */ -extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, - struct agp_bridge_data *bridge, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page); -int ttm_agp_tt_populate(struct ttm_tt *ttm); +struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, + struct agp_bridge_data *bridge, + unsigned long size, uint32_t page_flags, + struct page *dummy_read_page); +int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); #endif diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 2c1e3598effe..8936285b6543 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -35,20 +35,7 @@ #include <linux/errno.h> #include <linux/kobject.h> #include <linux/mm.h> - -/** - * struct ttm_mem_shrink - callback to shrink TTM memory usage. - * - * @do_shrink: The callback function. - * - * Arguments to the do_shrink functions are intended to be passed using - * inheritance. That is, the argument class derives from struct ttm_mem_shrink, - * and can be accessed using container_of(). - */ - -struct ttm_mem_shrink { - int (*do_shrink) (struct ttm_mem_shrink *); -}; +#include "ttm_bo_api.h" /** * struct ttm_mem_global - Global memory accounting structure. @@ -76,7 +63,7 @@ struct ttm_mem_shrink { struct ttm_mem_zone; struct ttm_mem_global { struct kobject kobj; - struct ttm_mem_shrink *shrink; + struct ttm_bo_global *bo_glob; struct workqueue_struct *swap_queue; struct work_struct work; spinlock_t lock; @@ -90,67 +77,15 @@ struct ttm_mem_global { #endif }; -/** - * ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object - * - * @shrink: The object to initialize. - * @func: The callback function. - */ - -static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink, - int (*func) (struct ttm_mem_shrink *)) -{ - shrink->do_shrink = func; -} - -/** - * ttm_mem_register_shrink - register a struct ttm_mem_shrink object. - * - * @glob: The struct ttm_mem_global object to register with. - * @shrink: An initialized struct ttm_mem_shrink object to register. - * - * Returns: - * -EBUSY: There's already a callback registered. (May change). - */ - -static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, - struct ttm_mem_shrink *shrink) -{ - spin_lock(&glob->lock); - if (glob->shrink != NULL) { - spin_unlock(&glob->lock); - return -EBUSY; - } - glob->shrink = shrink; - spin_unlock(&glob->lock); - return 0; -} - -/** - * ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object. - * - * @glob: The struct ttm_mem_global object to unregister from. - * @shrink: A previously registert struct ttm_mem_shrink object. - * - */ - -static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, - struct ttm_mem_shrink *shrink) -{ - spin_lock(&glob->lock); - BUG_ON(glob->shrink != shrink); - glob->shrink = NULL; - spin_unlock(&glob->lock); -} - extern int ttm_mem_global_init(struct ttm_mem_global *glob); extern void ttm_mem_global_release(struct ttm_mem_global *glob); extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - bool no_wait, bool interruptible); + struct ttm_operation_ctx *ctx); extern void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx); extern void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, uint64_t size); extern size_t ttm_round_pot(size_t size); diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 593811362a91..4d9b019d253c 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void); * * Add backing pages to all of @ttm */ -int ttm_pool_populate(struct ttm_tt *ttm); +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); /** * ttm_pool_unpopulate: @@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm); /** * Populates and DMA maps pages to fullfil a ttm_dma_populate() request */ -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, + struct ttm_operation_ctx *ctx); /** * Unpopulates and DMA unmaps pages as part of a @@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void); */ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, + struct ttm_operation_ctx *ctx); void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); #else @@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) return 0; } static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, - struct device *dev) + struct device *dev, + struct ttm_operation_ctx *ctx) { return -ENOMEM; } diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h new file mode 100644 index 000000000000..2c005376ac0e --- /dev/null +++ b/include/dt-bindings/bus/ti-sysc.h @@ -0,0 +1,22 @@ +/* TI sysc interconnect target module defines */ + +/* Generic sysc found on omap2 and later, also known as type1 */ +#define SYSC_OMAP2_CLOCKACTIVITY (3 << 8) +#define SYSC_OMAP2_EMUFREE (1 << 5) +#define SYSC_OMAP2_ENAWAKEUP (1 << 2) +#define SYSC_OMAP2_SOFTRESET (1 << 1) +#define SYSC_OMAP2_AUTOIDLE (1 << 0) + +/* Generic sysc found on omap4 and later, also known as type2 */ +#define SYSC_OMAP4_DMADISABLE (1 << 16) +#define SYSC_OMAP4_FREEEMU (1 << 1) /* Also known as EMUFREE */ +#define SYSC_OMAP4_SOFTRESET (1 << 0) + +/* SmartReflex sysc found on 36xx and later */ +#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26) + +/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */ +#define SYSC_IDLE_FORCE 0 +#define SYSC_IDLE_NO 1 +#define SYSC_IDLE_SMART 2 +#define SYSC_IDLE_SMART_WKUP 3 diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h new file mode 100644 index 000000000000..b396f00e481d --- /dev/null +++ b/include/dt-bindings/clock/am3.h @@ -0,0 +1,108 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_AM3_H +#define __DT_BINDINGS_CLK_AM3_H + +#define AM3_CLKCTRL_OFFSET 0x0 +#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET) + +/* l4_per clocks */ +#define AM3_L4_PER_CLKCTRL_OFFSET 0x14 +#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET) +#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14) +#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18) +#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c) +#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24) +#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28) +#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c) +#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30) +#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34) +#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38) +#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c) +#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40) +#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44) +#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48) +#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c) +#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50) +#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60) +#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68) +#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c) +#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70) +#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74) +#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78) +#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c) +#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80) +#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84) +#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88) +#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90) +#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94) +#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0) +#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac) +#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0) +#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4) +#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc) +#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0) +#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4) +#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc) +#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4) +#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8) +#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc) +#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0) +#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8) +#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec) +#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0) +#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4) +#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8) +#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc) +#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100) +#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c) +#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110) +#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120) +#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130) +#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c) + +/* l4_wkup clocks */ +#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4 +#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET) +#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4) +#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8) +#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc) +#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14) +#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0) +#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4) +#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8) +#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc) +#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0) +#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4) +#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8) +#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4) + +/* mpu clocks */ +#define AM3_MPU_CLKCTRL_OFFSET 0x4 +#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET) +#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4) + +/* l4_rtc clocks */ +#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0) + +/* gfx_l3 clocks */ +#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4 +#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET) +#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4) + +/* l4_cefuse clocks */ +#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20 +#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET) +#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20) + +#endif diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h new file mode 100644 index 000000000000..d21df00b3270 --- /dev/null +++ b/include/dt-bindings/clock/am4.h @@ -0,0 +1,113 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_AM4_H +#define __DT_BINDINGS_CLK_AM4_H + +#define AM4_CLKCTRL_OFFSET 0x20 +#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET) + +/* l4_wkup clocks */ +#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120) +#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220) +#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228) +#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230) +#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328) +#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338) +#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340) +#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348) +#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350) +#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358) +#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360) +#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368) + +/* mpu clocks */ +#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* gfx_l3 clocks */ +#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_rtc clocks */ +#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20) + +/* l4_per clocks */ +#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20) +#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28) +#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30) +#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40) +#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50) +#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58) +#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68) +#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70) +#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78) +#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80) +#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88) +#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90) +#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0) +#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220) +#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238) +#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240) +#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248) +#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258) +#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260) +#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268) +#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320) +#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420) +#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428) +#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430) +#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438) +#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440) +#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448) +#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450) +#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458) +#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460) +#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468) +#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478) +#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480) +#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488) +#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490) +#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498) +#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0) +#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8) +#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0) +#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8) +#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0) +#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8) +#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0) +#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500) +#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508) +#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510) +#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518) +#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520) +#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528) +#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530) +#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538) +#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540) +#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548) +#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550) +#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558) +#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560) +#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568) +#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570) +#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578) +#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580) +#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588) +#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590) +#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598) +#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0) +#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8) +#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0) +#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720) +#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20) +#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20) + +#endif diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h new file mode 100644 index 000000000000..d3558d897a4d --- /dev/null +++ b/include/dt-bindings/clock/aspeed-clock.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ + +#ifndef DT_BINDINGS_ASPEED_CLOCK_H +#define DT_BINDINGS_ASPEED_CLOCK_H + +#define ASPEED_CLK_GATE_ECLK 0 +#define ASPEED_CLK_GATE_GCLK 1 +#define ASPEED_CLK_GATE_MCLK 2 +#define ASPEED_CLK_GATE_VCLK 3 +#define ASPEED_CLK_GATE_BCLK 4 +#define ASPEED_CLK_GATE_DCLK 5 +#define ASPEED_CLK_GATE_REFCLK 6 +#define ASPEED_CLK_GATE_USBPORT2CLK 7 +#define ASPEED_CLK_GATE_LCLK 8 +#define ASPEED_CLK_GATE_USBUHCICLK 9 +#define ASPEED_CLK_GATE_D1CLK 10 +#define ASPEED_CLK_GATE_YCLK 11 +#define ASPEED_CLK_GATE_USBPORT1CLK 12 +#define ASPEED_CLK_GATE_UART1CLK 13 +#define ASPEED_CLK_GATE_UART2CLK 14 +#define ASPEED_CLK_GATE_UART5CLK 15 +#define ASPEED_CLK_GATE_ESPICLK 16 +#define ASPEED_CLK_GATE_MAC1CLK 17 +#define ASPEED_CLK_GATE_MAC2CLK 18 +#define ASPEED_CLK_GATE_RSACLK 19 +#define ASPEED_CLK_GATE_UART3CLK 20 +#define ASPEED_CLK_GATE_UART4CLK 21 +#define ASPEED_CLK_GATE_SDCLKCLK 22 +#define ASPEED_CLK_GATE_LHCCLK 23 +#define ASPEED_CLK_HPLL 24 +#define ASPEED_CLK_AHB 25 +#define ASPEED_CLK_APB 26 +#define ASPEED_CLK_UART 27 +#define ASPEED_CLK_SDIO 28 +#define ASPEED_CLK_ECLK 29 +#define ASPEED_CLK_ECLK_MUX 30 +#define ASPEED_CLK_LHCLK 31 +#define ASPEED_CLK_MAC 32 +#define ASPEED_CLK_BCLK 33 +#define ASPEED_CLK_MPLL 34 + +#define ASPEED_RESET_XDMA 0 +#define ASPEED_RESET_MCTP 1 +#define ASPEED_RESET_ADC 2 +#define ASPEED_RESET_JTAG_MASTER 3 +#define ASPEED_RESET_MIC 4 +#define ASPEED_RESET_PWM 5 +#define ASPEED_RESET_PCIVGA 6 +#define ASPEED_RESET_I2C 7 +#define ASPEED_RESET_AHB 8 + +#endif diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h new file mode 100644 index 000000000000..941ac70e7f30 --- /dev/null +++ b/include/dt-bindings/clock/axg-clkc.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ +/* + * Meson-AXG clock tree IDs + * + * Copyright (c) 2017 Amlogic, Inc. All rights reserved. + */ + +#ifndef __AXG_CLKC_H +#define __AXG_CLKC_H + +#define CLKID_SYS_PLL 0 +#define CLKID_FIXED_PLL 1 +#define CLKID_FCLK_DIV2 2 +#define CLKID_FCLK_DIV3 3 +#define CLKID_FCLK_DIV4 4 +#define CLKID_FCLK_DIV5 5 +#define CLKID_FCLK_DIV7 6 +#define CLKID_GP0_PLL 7 +#define CLKID_CLK81 10 +#define CLKID_MPLL0 11 +#define CLKID_MPLL1 12 +#define CLKID_MPLL2 13 +#define CLKID_MPLL3 14 +#define CLKID_DDR 15 +#define CLKID_AUDIO_LOCKER 16 +#define CLKID_MIPI_DSI_HOST 17 +#define CLKID_ISA 18 +#define CLKID_PL301 19 +#define CLKID_PERIPHS 20 +#define CLKID_SPICC0 21 +#define CLKID_I2C 22 +#define CLKID_RNG0 23 +#define CLKID_UART0 24 +#define CLKID_MIPI_DSI_PHY 25 +#define CLKID_SPICC1 26 +#define CLKID_PCIE_A 27 +#define CLKID_PCIE_B 28 +#define CLKID_HIU_IFACE 29 +#define CLKID_ASSIST_MISC 30 +#define CLKID_SD_EMMC_B 31 +#define CLKID_SD_EMMC_C 32 +#define CLKID_DMA 33 +#define CLKID_SPI 34 +#define CLKID_AUDIO 35 +#define CLKID_ETH 36 +#define CLKID_UART1 37 +#define CLKID_G2D 38 +#define CLKID_USB0 39 +#define CLKID_USB1 40 +#define CLKID_RESET 41 +#define CLKID_USB 42 +#define CLKID_AHB_ARB0 43 +#define CLKID_EFUSE 44 +#define CLKID_BOOT_ROM 45 +#define CLKID_AHB_DATA_BUS 46 +#define CLKID_AHB_CTRL_BUS 47 +#define CLKID_USB1_DDR_BRIDGE 48 +#define CLKID_USB0_DDR_BRIDGE 49 +#define CLKID_MMC_PCLK 50 +#define CLKID_VPU_INTR 51 +#define CLKID_SEC_AHB_AHB3_BRIDGE 52 +#define CLKID_GIC 53 +#define CLKID_AO_MEDIA_CPU 54 +#define CLKID_AO_AHB_SRAM 55 +#define CLKID_AO_AHB_BUS 56 +#define CLKID_AO_IFACE 57 +#define CLKID_AO_I2C 58 +#define CLKID_SD_EMMC_B_CLK0 59 +#define CLKID_SD_EMMC_C_CLK0 60 + +#endif /* __AXG_CLKC_H */ diff --git a/include/dt-bindings/clock/dm814.h b/include/dt-bindings/clock/dm814.h new file mode 100644 index 000000000000..0e7099a344e1 --- /dev/null +++ b/include/dt-bindings/clock/dm814.h @@ -0,0 +1,45 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DM814_H +#define __DT_BINDINGS_CLK_DM814_H + +#define DM814_CLKCTRL_OFFSET 0x0 +#define DM814_CLKCTRL_INDEX(offset) ((offset) - DM814_CLKCTRL_OFFSET) + +/* default clocks */ +#define DM814_USB_OTG_HS_CLKCTRL DM814_CLKCTRL_INDEX(0x58) + +/* alwon clocks */ +#define DM814_UART1_CLKCTRL DM814_CLKCTRL_INDEX(0x150) +#define DM814_UART2_CLKCTRL DM814_CLKCTRL_INDEX(0x154) +#define DM814_UART3_CLKCTRL DM814_CLKCTRL_INDEX(0x158) +#define DM814_GPIO1_CLKCTRL DM814_CLKCTRL_INDEX(0x15c) +#define DM814_GPIO2_CLKCTRL DM814_CLKCTRL_INDEX(0x160) +#define DM814_I2C1_CLKCTRL DM814_CLKCTRL_INDEX(0x164) +#define DM814_I2C2_CLKCTRL DM814_CLKCTRL_INDEX(0x168) +#define DM814_WD_TIMER_CLKCTRL DM814_CLKCTRL_INDEX(0x18c) +#define DM814_MCSPI1_CLKCTRL DM814_CLKCTRL_INDEX(0x190) +#define DM814_GPMC_CLKCTRL DM814_CLKCTRL_INDEX(0x1d0) +#define DM814_CPGMAC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1d4) +#define DM814_MPU_CLKCTRL DM814_CLKCTRL_INDEX(0x1dc) +#define DM814_RTC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f0) +#define DM814_TPCC_CLKCTRL DM814_CLKCTRL_INDEX(0x1f4) +#define DM814_TPTC0_CLKCTRL DM814_CLKCTRL_INDEX(0x1f8) +#define DM814_TPTC1_CLKCTRL DM814_CLKCTRL_INDEX(0x1fc) +#define DM814_TPTC2_CLKCTRL DM814_CLKCTRL_INDEX(0x200) +#define DM814_TPTC3_CLKCTRL DM814_CLKCTRL_INDEX(0x204) +#define DM814_MMC1_CLKCTRL DM814_CLKCTRL_INDEX(0x21c) +#define DM814_MMC2_CLKCTRL DM814_CLKCTRL_INDEX(0x220) +#define DM814_MMC3_CLKCTRL DM814_CLKCTRL_INDEX(0x224) + +#endif diff --git a/include/dt-bindings/clock/dm816.h b/include/dt-bindings/clock/dm816.h new file mode 100644 index 000000000000..69e8a36d783e --- /dev/null +++ b/include/dt-bindings/clock/dm816.h @@ -0,0 +1,53 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DM816_H +#define __DT_BINDINGS_CLK_DM816_H + +#define DM816_CLKCTRL_OFFSET 0x0 +#define DM816_CLKCTRL_INDEX(offset) ((offset) - DM816_CLKCTRL_OFFSET) + +/* default clocks */ +#define DM816_USB_OTG_HS_CLKCTRL DM816_CLKCTRL_INDEX(0x58) + +/* alwon clocks */ +#define DM816_UART1_CLKCTRL DM816_CLKCTRL_INDEX(0x150) +#define DM816_UART2_CLKCTRL DM816_CLKCTRL_INDEX(0x154) +#define DM816_UART3_CLKCTRL DM816_CLKCTRL_INDEX(0x158) +#define DM816_GPIO1_CLKCTRL DM816_CLKCTRL_INDEX(0x15c) +#define DM816_GPIO2_CLKCTRL DM816_CLKCTRL_INDEX(0x160) +#define DM816_I2C1_CLKCTRL DM816_CLKCTRL_INDEX(0x164) +#define DM816_I2C2_CLKCTRL DM816_CLKCTRL_INDEX(0x168) +#define DM816_TIMER1_CLKCTRL DM816_CLKCTRL_INDEX(0x170) +#define DM816_TIMER2_CLKCTRL DM816_CLKCTRL_INDEX(0x174) +#define DM816_TIMER3_CLKCTRL DM816_CLKCTRL_INDEX(0x178) +#define DM816_TIMER4_CLKCTRL DM816_CLKCTRL_INDEX(0x17c) +#define DM816_TIMER5_CLKCTRL DM816_CLKCTRL_INDEX(0x180) +#define DM816_TIMER6_CLKCTRL DM816_CLKCTRL_INDEX(0x184) +#define DM816_TIMER7_CLKCTRL DM816_CLKCTRL_INDEX(0x188) +#define DM816_WD_TIMER_CLKCTRL DM816_CLKCTRL_INDEX(0x18c) +#define DM816_MCSPI1_CLKCTRL DM816_CLKCTRL_INDEX(0x190) +#define DM816_MAILBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x194) +#define DM816_SPINBOX_CLKCTRL DM816_CLKCTRL_INDEX(0x198) +#define DM816_MMC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1b0) +#define DM816_GPMC_CLKCTRL DM816_CLKCTRL_INDEX(0x1d0) +#define DM816_DAVINCI_MDIO_CLKCTRL DM816_CLKCTRL_INDEX(0x1d4) +#define DM816_EMAC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1d8) +#define DM816_MPU_CLKCTRL DM816_CLKCTRL_INDEX(0x1dc) +#define DM816_RTC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f0) +#define DM816_TPCC_CLKCTRL DM816_CLKCTRL_INDEX(0x1f4) +#define DM816_TPTC0_CLKCTRL DM816_CLKCTRL_INDEX(0x1f8) +#define DM816_TPTC1_CLKCTRL DM816_CLKCTRL_INDEX(0x1fc) +#define DM816_TPTC2_CLKCTRL DM816_CLKCTRL_INDEX(0x200) +#define DM816_TPTC3_CLKCTRL DM816_CLKCTRL_INDEX(0x204) + +#endif diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h new file mode 100644 index 000000000000..5e1061b15aed --- /dev/null +++ b/include/dt-bindings/clock/dra7.h @@ -0,0 +1,172 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_DRA7_H +#define __DT_BINDINGS_CLK_DRA7_H + +#define DRA7_CLKCTRL_OFFSET 0x20 +#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET) + +/* mpu clocks */ +#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define DRA7_IPU_CLKCTRL_OFFSET 0x40 +#define DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - DRA7_IPU_CLKCTRL_OFFSET) +#define DRA7_MCASP1_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x50) +#define DRA7_TIMER5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x58) +#define DRA7_TIMER6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x60) +#define DRA7_TIMER7_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x68) +#define DRA7_TIMER8_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x70) +#define DRA7_I2C5_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x78) +#define DRA7_UART6_CLKCTRL DRA7_IPU_CLKCTRL_INDEX(0x80) + +/* rtc clocks */ +#define DRA7_RTC_CLKCTRL_OFFSET 0x40 +#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET) +#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44) + +/* coreaon clocks */ +#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) + +/* l3main1 clocks */ +#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) + +/* dma clocks */ +#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + +/* atl clocks */ +#define DRA7_ATL_CLKCTRL_OFFSET 0x0 +#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET) +#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0) + +/* l4cfg clocks */ +#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58) +#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60) +#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68) +#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70) +#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78) +#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90) +#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98) +#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0) + +/* l3instr clocks */ +#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) + +/* dss clocks */ +#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) + +/* l3init clocks */ +#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) +#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0) +#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8) +#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0) +#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0) +#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8) +#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0) + +/* l4per clocks */ +#define DRA7_L4PER_CLKCTRL_OFFSET 0x0 +#define DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - DRA7_L4PER_CLKCTRL_OFFSET) +#define DRA7_L4_PER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc) +#define DRA7_L4_PER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x14) +#define DRA7_TIMER10_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x28) +#define DRA7_TIMER11_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x30) +#define DRA7_TIMER2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x48) +#define DRA7_TIMER9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x50) +#define DRA7_ELM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x58) +#define DRA7_GPIO2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x60) +#define DRA7_GPIO3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x68) +#define DRA7_GPIO4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x70) +#define DRA7_GPIO5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x78) +#define DRA7_GPIO6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x80) +#define DRA7_HDQ1W_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x88) +#define DRA7_EPWMSS1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x90) +#define DRA7_EPWMSS2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x98) +#define DRA7_I2C1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa0) +#define DRA7_I2C2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xa8) +#define DRA7_I2C3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb0) +#define DRA7_I2C4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xb8) +#define DRA7_L4_PER1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc0) +#define DRA7_EPWMSS0_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc4) +#define DRA7_TIMER13_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xc8) +#define DRA7_TIMER14_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd0) +#define DRA7_TIMER15_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xd8) +#define DRA7_MCSPI1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf0) +#define DRA7_MCSPI2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0xf8) +#define DRA7_MCSPI3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x100) +#define DRA7_MCSPI4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x108) +#define DRA7_GPIO7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x110) +#define DRA7_GPIO8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x118) +#define DRA7_MMC3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x120) +#define DRA7_MMC4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x128) +#define DRA7_TIMER16_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x130) +#define DRA7_QSPI_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x138) +#define DRA7_UART1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x140) +#define DRA7_UART2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x148) +#define DRA7_UART3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x150) +#define DRA7_UART4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x158) +#define DRA7_MCASP2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x160) +#define DRA7_MCASP3_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x168) +#define DRA7_UART5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x170) +#define DRA7_MCASP5_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x178) +#define DRA7_MCASP8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x190) +#define DRA7_MCASP4_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x198) +#define DRA7_AES1_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a0) +#define DRA7_AES2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1a8) +#define DRA7_DES_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1b0) +#define DRA7_RNG_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c0) +#define DRA7_SHAM_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1c8) +#define DRA7_UART7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1d0) +#define DRA7_UART8_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e0) +#define DRA7_UART9_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1e8) +#define DRA7_DCAN2_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x1f0) +#define DRA7_MCASP6_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x204) +#define DRA7_MCASP7_CLKCTRL DRA7_L4PER_CLKCTRL_INDEX(0x208) + +/* wkupaon clocks */ +#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38) +#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40) +#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48) +#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50) +#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80) +#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88) + +#endif diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h index adb768d447a5..75d583eb84dd 100644 --- a/include/dt-bindings/clock/hi3660-clock.h +++ b/include/dt-bindings/clock/hi3660-clock.h @@ -208,4 +208,11 @@ #define HI3660_CLK_I2C6_IOMCU 3 #define HI3660_CLK_IOMCU_PERI0 4 +/* clk in stub clock */ +#define HI3660_CLK_STUB_CLUSTER0 0 +#define HI3660_CLK_STUB_CLUSTER1 1 +#define HI3660_CLK_STUB_GPU 2 +#define HI3660_CLK_STUB_DDR 3 +#define HI3660_CLK_STUB_NUM 4 + #endif /* __DTS_HI3660_CLOCK_H */ diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h new file mode 100644 index 000000000000..f51821a91216 --- /dev/null +++ b/include/dt-bindings/clock/omap5.h @@ -0,0 +1,118 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_OMAP5_H +#define __DT_BINDINGS_CLK_OMAP5_H + +#define OMAP5_CLKCTRL_OFFSET 0x20 +#define OMAP5_CLKCTRL_INDEX(offset) ((offset) - OMAP5_CLKCTRL_OFFSET) + +/* mpu clocks */ +#define OMAP5_MPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* dsp clocks */ +#define OMAP5_MMU_DSP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* abe clocks */ +#define OMAP5_L4_ABE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_MCPDM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_DMIC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_MCBSP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48) +#define OMAP5_MCBSP2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_MCBSP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58) +#define OMAP5_TIMER5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_TIMER6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70) +#define OMAP5_TIMER7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) +#define OMAP5_TIMER8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80) + +/* l3main1 clocks */ +#define OMAP5_L3_MAIN_1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* l3main2 clocks */ +#define OMAP5_L3_MAIN_2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* ipu clocks */ +#define OMAP5_MMU_IPU_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* dma clocks */ +#define OMAP5_DMA_SYSTEM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* emif clocks */ +#define OMAP5_DMM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_EMIF1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_EMIF2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) + +/* l4cfg clocks */ +#define OMAP5_L4_CFG_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_SPINLOCK_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_MAILBOX_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) + +/* l3instr clocks */ +#define OMAP5_L3_MAIN_3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_L3_INSTR_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) + +/* l4per clocks */ +#define OMAP5_TIMER10_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_TIMER11_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_TIMER3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40) +#define OMAP5_TIMER4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48) +#define OMAP5_TIMER9_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_GPIO2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x60) +#define OMAP5_GPIO3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_GPIO4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x70) +#define OMAP5_GPIO5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) +#define OMAP5_GPIO6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x80) +#define OMAP5_I2C1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa0) +#define OMAP5_I2C2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xa8) +#define OMAP5_I2C3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb0) +#define OMAP5_I2C4_CLKCTRL OMAP5_CLKCTRL_INDEX(0xb8) +#define OMAP5_L4_PER_CLKCTRL OMAP5_CLKCTRL_INDEX(0xc0) +#define OMAP5_MCSPI1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0) +#define OMAP5_MCSPI2_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf8) +#define OMAP5_MCSPI3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x100) +#define OMAP5_MCSPI4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x108) +#define OMAP5_GPIO7_CLKCTRL OMAP5_CLKCTRL_INDEX(0x110) +#define OMAP5_GPIO8_CLKCTRL OMAP5_CLKCTRL_INDEX(0x118) +#define OMAP5_MMC3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x120) +#define OMAP5_MMC4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x128) +#define OMAP5_UART1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x140) +#define OMAP5_UART2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x148) +#define OMAP5_UART3_CLKCTRL OMAP5_CLKCTRL_INDEX(0x150) +#define OMAP5_UART4_CLKCTRL OMAP5_CLKCTRL_INDEX(0x158) +#define OMAP5_MMC5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x160) +#define OMAP5_I2C5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x168) +#define OMAP5_UART5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x170) +#define OMAP5_UART6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x178) + +/* dss clocks */ +#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) + +/* l3init clocks */ +#define OMAP5_MMC1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28) +#define OMAP5_MMC2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_USB_HOST_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x58) +#define OMAP5_USB_TLL_HS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x68) +#define OMAP5_SATA_CLKCTRL OMAP5_CLKCTRL_INDEX(0x88) +#define OMAP5_OCP2SCP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe0) +#define OMAP5_OCP2SCP3_CLKCTRL OMAP5_CLKCTRL_INDEX(0xe8) +#define OMAP5_USB_OTG_SS_CLKCTRL OMAP5_CLKCTRL_INDEX(0xf0) + +/* wkupaon clocks */ +#define OMAP5_L4_WKUP_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20) +#define OMAP5_WD_TIMER2_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30) +#define OMAP5_GPIO1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38) +#define OMAP5_TIMER1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x40) +#define OMAP5_COUNTER_32K_CLKCTRL OMAP5_CLKCTRL_INDEX(0x50) +#define OMAP5_KBD_CLKCTRL OMAP5_CLKCTRL_INDEX(0x78) + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h index 370c83c3bccc..238f872e52f4 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -58,6 +58,186 @@ #define GCC_QPIC_AHB_CLK 41 #define GCC_QPIC_CLK 42 #define PCNOC_BFDCD_CLK_SRC 43 +#define GPLL2_MAIN 44 +#define GPLL2 45 +#define GPLL4_MAIN 46 +#define GPLL4 47 +#define GPLL6_MAIN 48 +#define GPLL6 49 +#define UBI32_PLL_MAIN 50 +#define UBI32_PLL 51 +#define NSS_CRYPTO_PLL_MAIN 52 +#define NSS_CRYPTO_PLL 53 +#define PCIE0_AXI_CLK_SRC 54 +#define PCIE0_AUX_CLK_SRC 55 +#define PCIE0_PIPE_CLK_SRC 56 +#define PCIE1_AXI_CLK_SRC 57 +#define PCIE1_AUX_CLK_SRC 58 +#define PCIE1_PIPE_CLK_SRC 59 +#define SDCC1_APPS_CLK_SRC 60 +#define SDCC1_ICE_CORE_CLK_SRC 61 +#define SDCC2_APPS_CLK_SRC 62 +#define USB0_MASTER_CLK_SRC 63 +#define USB0_AUX_CLK_SRC 64 +#define USB0_MOCK_UTMI_CLK_SRC 65 +#define USB0_PIPE_CLK_SRC 66 +#define USB1_MASTER_CLK_SRC 67 +#define USB1_AUX_CLK_SRC 68 +#define USB1_MOCK_UTMI_CLK_SRC 69 +#define USB1_PIPE_CLK_SRC 70 +#define GCC_XO_CLK_SRC 71 +#define SYSTEM_NOC_BFDCD_CLK_SRC 72 +#define NSS_CE_CLK_SRC 73 +#define NSS_NOC_BFDCD_CLK_SRC 74 +#define NSS_CRYPTO_CLK_SRC 75 +#define NSS_UBI0_CLK_SRC 76 +#define NSS_UBI0_DIV_CLK_SRC 77 +#define NSS_UBI1_CLK_SRC 78 +#define NSS_UBI1_DIV_CLK_SRC 79 +#define UBI_MPT_CLK_SRC 80 +#define NSS_IMEM_CLK_SRC 81 +#define NSS_PPE_CLK_SRC 82 +#define NSS_PORT1_RX_CLK_SRC 83 +#define NSS_PORT1_RX_DIV_CLK_SRC 84 +#define NSS_PORT1_TX_CLK_SRC 85 +#define NSS_PORT1_TX_DIV_CLK_SRC 86 +#define NSS_PORT2_RX_CLK_SRC 87 +#define NSS_PORT2_RX_DIV_CLK_SRC 88 +#define NSS_PORT2_TX_CLK_SRC 89 +#define NSS_PORT2_TX_DIV_CLK_SRC 90 +#define NSS_PORT3_RX_CLK_SRC 91 +#define NSS_PORT3_RX_DIV_CLK_SRC 92 +#define NSS_PORT3_TX_CLK_SRC 93 +#define NSS_PORT3_TX_DIV_CLK_SRC 94 +#define NSS_PORT4_RX_CLK_SRC 95 +#define NSS_PORT4_RX_DIV_CLK_SRC 96 +#define NSS_PORT4_TX_CLK_SRC 97 +#define NSS_PORT4_TX_DIV_CLK_SRC 98 +#define NSS_PORT5_RX_CLK_SRC 99 +#define NSS_PORT5_RX_DIV_CLK_SRC 100 +#define NSS_PORT5_TX_CLK_SRC 101 +#define NSS_PORT5_TX_DIV_CLK_SRC 102 +#define NSS_PORT6_RX_CLK_SRC 103 +#define NSS_PORT6_RX_DIV_CLK_SRC 104 +#define NSS_PORT6_TX_CLK_SRC 105 +#define NSS_PORT6_TX_DIV_CLK_SRC 106 +#define CRYPTO_CLK_SRC 107 +#define GP1_CLK_SRC 108 +#define GP2_CLK_SRC 109 +#define GP3_CLK_SRC 110 +#define GCC_PCIE0_AHB_CLK 111 +#define GCC_PCIE0_AUX_CLK 112 +#define GCC_PCIE0_AXI_M_CLK 113 +#define GCC_PCIE0_AXI_S_CLK 114 +#define GCC_PCIE0_PIPE_CLK 115 +#define GCC_SYS_NOC_PCIE0_AXI_CLK 116 +#define GCC_PCIE1_AHB_CLK 117 +#define GCC_PCIE1_AUX_CLK 118 +#define GCC_PCIE1_AXI_M_CLK 119 +#define GCC_PCIE1_AXI_S_CLK 120 +#define GCC_PCIE1_PIPE_CLK 121 +#define GCC_SYS_NOC_PCIE1_AXI_CLK 122 +#define GCC_USB0_AUX_CLK 123 +#define GCC_SYS_NOC_USB0_AXI_CLK 124 +#define GCC_USB0_MASTER_CLK 125 +#define GCC_USB0_MOCK_UTMI_CLK 126 +#define GCC_USB0_PHY_CFG_AHB_CLK 127 +#define GCC_USB0_PIPE_CLK 128 +#define GCC_USB0_SLEEP_CLK 129 +#define GCC_USB1_AUX_CLK 130 +#define GCC_SYS_NOC_USB1_AXI_CLK 131 +#define GCC_USB1_MASTER_CLK 132 +#define GCC_USB1_MOCK_UTMI_CLK 133 +#define GCC_USB1_PHY_CFG_AHB_CLK 134 +#define GCC_USB1_PIPE_CLK 135 +#define GCC_USB1_SLEEP_CLK 136 +#define GCC_SDCC1_AHB_CLK 137 +#define GCC_SDCC1_APPS_CLK 138 +#define GCC_SDCC1_ICE_CORE_CLK 139 +#define GCC_SDCC2_AHB_CLK 140 +#define GCC_SDCC2_APPS_CLK 141 +#define GCC_MEM_NOC_NSS_AXI_CLK 142 +#define GCC_NSS_CE_APB_CLK 143 +#define GCC_NSS_CE_AXI_CLK 144 +#define GCC_NSS_CFG_CLK 145 +#define GCC_NSS_CRYPTO_CLK 146 +#define GCC_NSS_CSR_CLK 147 +#define GCC_NSS_EDMA_CFG_CLK 148 +#define GCC_NSS_EDMA_CLK 149 +#define GCC_NSS_IMEM_CLK 150 +#define GCC_NSS_NOC_CLK 151 +#define GCC_NSS_PPE_BTQ_CLK 152 +#define GCC_NSS_PPE_CFG_CLK 153 +#define GCC_NSS_PPE_CLK 154 +#define GCC_NSS_PPE_IPE_CLK 155 +#define GCC_NSS_PTP_REF_CLK 156 +#define GCC_NSSNOC_CE_APB_CLK 157 +#define GCC_NSSNOC_CE_AXI_CLK 158 +#define GCC_NSSNOC_CRYPTO_CLK 159 +#define GCC_NSSNOC_PPE_CFG_CLK 160 +#define GCC_NSSNOC_PPE_CLK 161 +#define GCC_NSSNOC_QOSGEN_REF_CLK 162 +#define GCC_NSSNOC_SNOC_CLK 163 +#define GCC_NSSNOC_TIMEOUT_REF_CLK 164 +#define GCC_NSSNOC_UBI0_AHB_CLK 165 +#define GCC_NSSNOC_UBI1_AHB_CLK 166 +#define GCC_UBI0_AHB_CLK 167 +#define GCC_UBI0_AXI_CLK 168 +#define GCC_UBI0_NC_AXI_CLK 169 +#define GCC_UBI0_CORE_CLK 170 +#define GCC_UBI0_MPT_CLK 171 +#define GCC_UBI1_AHB_CLK 172 +#define GCC_UBI1_AXI_CLK 173 +#define GCC_UBI1_NC_AXI_CLK 174 +#define GCC_UBI1_CORE_CLK 175 +#define GCC_UBI1_MPT_CLK 176 +#define GCC_CMN_12GPLL_AHB_CLK 177 +#define GCC_CMN_12GPLL_SYS_CLK 178 +#define GCC_MDIO_AHB_CLK 179 +#define GCC_UNIPHY0_AHB_CLK 180 +#define GCC_UNIPHY0_SYS_CLK 181 +#define GCC_UNIPHY1_AHB_CLK 182 +#define GCC_UNIPHY1_SYS_CLK 183 +#define GCC_UNIPHY2_AHB_CLK 184 +#define GCC_UNIPHY2_SYS_CLK 185 +#define GCC_NSS_PORT1_RX_CLK 186 +#define GCC_NSS_PORT1_TX_CLK 187 +#define GCC_NSS_PORT2_RX_CLK 188 +#define GCC_NSS_PORT2_TX_CLK 189 +#define GCC_NSS_PORT3_RX_CLK 190 +#define GCC_NSS_PORT3_TX_CLK 191 +#define GCC_NSS_PORT4_RX_CLK 192 +#define GCC_NSS_PORT4_TX_CLK 193 +#define GCC_NSS_PORT5_RX_CLK 194 +#define GCC_NSS_PORT5_TX_CLK 195 +#define GCC_NSS_PORT6_RX_CLK 196 +#define GCC_NSS_PORT6_TX_CLK 197 +#define GCC_PORT1_MAC_CLK 198 +#define GCC_PORT2_MAC_CLK 199 +#define GCC_PORT3_MAC_CLK 200 +#define GCC_PORT4_MAC_CLK 201 +#define GCC_PORT5_MAC_CLK 202 +#define GCC_PORT6_MAC_CLK 203 +#define GCC_UNIPHY0_PORT1_RX_CLK 204 +#define GCC_UNIPHY0_PORT1_TX_CLK 205 +#define GCC_UNIPHY0_PORT2_RX_CLK 206 +#define GCC_UNIPHY0_PORT2_TX_CLK 207 +#define GCC_UNIPHY0_PORT3_RX_CLK 208 +#define GCC_UNIPHY0_PORT3_TX_CLK 209 +#define GCC_UNIPHY0_PORT4_RX_CLK 210 +#define GCC_UNIPHY0_PORT4_TX_CLK 211 +#define GCC_UNIPHY0_PORT5_RX_CLK 212 +#define GCC_UNIPHY0_PORT5_TX_CLK 213 +#define GCC_UNIPHY1_PORT5_RX_CLK 214 +#define GCC_UNIPHY1_PORT5_TX_CLK 215 +#define GCC_UNIPHY2_PORT6_RX_CLK 216 +#define GCC_UNIPHY2_PORT6_TX_CLK 217 +#define GCC_CRYPTO_AHB_CLK 218 +#define GCC_CRYPTO_AXI_CLK 219 +#define GCC_CRYPTO_CLK 220 +#define GCC_GP1_CLK 221 +#define GCC_GP2_CLK 222 +#define GCC_GP3_CLK 223 #define GCC_BLSP1_BCR 0 #define GCC_BLSP1_QUP1_BCR 1 @@ -148,5 +328,47 @@ #define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 86 #define GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR 87 #define GCC_SMMU_CATS_BCR 88 +#define GCC_UBI0_AXI_ARES 89 +#define GCC_UBI0_AHB_ARES 90 +#define GCC_UBI0_NC_AXI_ARES 91 +#define GCC_UBI0_DBG_ARES 92 +#define GCC_UBI0_CORE_CLAMP_ENABLE 93 +#define GCC_UBI0_CLKRST_CLAMP_ENABLE 94 +#define GCC_UBI1_AXI_ARES 95 +#define GCC_UBI1_AHB_ARES 96 +#define GCC_UBI1_NC_AXI_ARES 97 +#define GCC_UBI1_DBG_ARES 98 +#define GCC_UBI1_CORE_CLAMP_ENABLE 99 +#define GCC_UBI1_CLKRST_CLAMP_ENABLE 100 +#define GCC_NSS_CFG_ARES 101 +#define GCC_NSS_IMEM_ARES 102 +#define GCC_NSS_NOC_ARES 103 +#define GCC_NSS_CRYPTO_ARES 104 +#define GCC_NSS_CSR_ARES 105 +#define GCC_NSS_CE_APB_ARES 106 +#define GCC_NSS_CE_AXI_ARES 107 +#define GCC_NSSNOC_CE_APB_ARES 108 +#define GCC_NSSNOC_CE_AXI_ARES 109 +#define GCC_NSSNOC_UBI0_AHB_ARES 110 +#define GCC_NSSNOC_UBI1_AHB_ARES 111 +#define GCC_NSSNOC_SNOC_ARES 112 +#define GCC_NSSNOC_CRYPTO_ARES 113 +#define GCC_NSSNOC_ATB_ARES 114 +#define GCC_NSSNOC_QOSGEN_REF_ARES 115 +#define GCC_NSSNOC_TIMEOUT_REF_ARES 116 +#define GCC_PCIE0_PIPE_ARES 117 +#define GCC_PCIE0_SLEEP_ARES 118 +#define GCC_PCIE0_CORE_STICKY_ARES 119 +#define GCC_PCIE0_AXI_MASTER_ARES 120 +#define GCC_PCIE0_AXI_SLAVE_ARES 121 +#define GCC_PCIE0_AHB_ARES 122 +#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 123 +#define GCC_PCIE1_PIPE_ARES 124 +#define GCC_PCIE1_SLEEP_ARES 125 +#define GCC_PCIE1_CORE_STICKY_ARES 126 +#define GCC_PCIE1_AXI_MASTER_ARES 127 +#define GCC_PCIE1_AXI_SLAVE_ARES 128 +#define GCC_PCIE1_AHB_ARES 129 +#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130 #endif diff --git a/include/dt-bindings/clock/sprd,sc9860-clk.h b/include/dt-bindings/clock/sprd,sc9860-clk.h new file mode 100644 index 000000000000..4cb202f090c2 --- /dev/null +++ b/include/dt-bindings/clock/sprd,sc9860-clk.h @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +// +// Spreadtrum SC9860 platform clocks +// +// Copyright (C) 2017, Spreadtrum Communications Inc. + +#ifndef _DT_BINDINGS_CLK_SC9860_H_ +#define _DT_BINDINGS_CLK_SC9860_H_ + +#define CLK_FAC_4M 0 +#define CLK_FAC_2M 1 +#define CLK_FAC_1M 2 +#define CLK_FAC_250K 3 +#define CLK_FAC_RPLL0_26M 4 +#define CLK_FAC_RPLL1_26M 5 +#define CLK_FAC_RCO25M 6 +#define CLK_FAC_RCO4M 7 +#define CLK_FAC_RCO2M 8 +#define CLK_FAC_3K2 9 +#define CLK_FAC_1K 10 +#define CLK_MPLL0_GATE 11 +#define CLK_MPLL1_GATE 12 +#define CLK_DPLL0_GATE 13 +#define CLK_DPLL1_GATE 14 +#define CLK_LTEPLL0_GATE 15 +#define CLK_TWPLL_GATE 16 +#define CLK_LTEPLL1_GATE 17 +#define CLK_RPLL0_GATE 18 +#define CLK_RPLL1_GATE 19 +#define CLK_CPPLL_GATE 20 +#define CLK_GPLL_GATE 21 +#define CLK_PMU_GATE_NUM (CLK_GPLL_GATE + 1) + +#define CLK_MPLL0 0 +#define CLK_MPLL1 1 +#define CLK_DPLL0 2 +#define CLK_DPLL1 3 +#define CLK_RPLL0 4 +#define CLK_RPLL1 5 +#define CLK_TWPLL 6 +#define CLK_LTEPLL0 7 +#define CLK_LTEPLL1 8 +#define CLK_GPLL 9 +#define CLK_CPPLL 10 +#define CLK_GPLL_42M5 11 +#define CLK_TWPLL_768M 12 +#define CLK_TWPLL_384M 13 +#define CLK_TWPLL_192M 14 +#define CLK_TWPLL_96M 15 +#define CLK_TWPLL_48M 16 +#define CLK_TWPLL_24M 17 +#define CLK_TWPLL_12M 18 +#define CLK_TWPLL_512M 19 +#define CLK_TWPLL_256M 20 +#define CLK_TWPLL_128M 21 +#define CLK_TWPLL_64M 22 +#define CLK_TWPLL_307M2 23 +#define CLK_TWPLL_153M6 24 +#define CLK_TWPLL_76M8 25 +#define CLK_TWPLL_51M2 26 +#define CLK_TWPLL_38M4 27 +#define CLK_TWPLL_19M2 28 +#define CLK_L0_614M4 29 +#define CLK_L0_409M6 30 +#define CLK_L0_38M 31 +#define CLK_L1_38M 32 +#define CLK_RPLL0_192M 33 +#define CLK_RPLL0_96M 34 +#define CLK_RPLL0_48M 35 +#define CLK_RPLL1_468M 36 +#define CLK_RPLL1_192M 37 +#define CLK_RPLL1_96M 38 +#define CLK_RPLL1_64M 39 +#define CLK_RPLL1_48M 40 +#define CLK_DPLL0_50M 41 +#define CLK_DPLL1_50M 42 +#define CLK_CPPLL_50M 43 +#define CLK_M0_39M 44 +#define CLK_M1_63M 45 +#define CLK_PLL_NUM (CLK_M1_63M + 1) + + +#define CLK_AP_APB 0 +#define CLK_AP_USB3 1 +#define CLK_UART0 2 +#define CLK_UART1 3 +#define CLK_UART2 4 +#define CLK_UART3 5 +#define CLK_UART4 6 +#define CLK_I2C0 7 +#define CLK_I2C1 8 +#define CLK_I2C2 9 +#define CLK_I2C3 10 +#define CLK_I2C4 11 +#define CLK_I2C5 12 +#define CLK_SPI0 13 +#define CLK_SPI1 14 +#define CLK_SPI2 15 +#define CLK_SPI3 16 +#define CLK_IIS0 17 +#define CLK_IIS1 18 +#define CLK_IIS2 19 +#define CLK_IIS3 20 +#define CLK_AP_CLK_NUM (CLK_IIS3 + 1) + +#define CLK_AON_APB 0 +#define CLK_AUX0 1 +#define CLK_AUX1 2 +#define CLK_AUX2 3 +#define CLK_PROBE 4 +#define CLK_SP_AHB 5 +#define CLK_CCI 6 +#define CLK_GIC 7 +#define CLK_CSSYS 8 +#define CLK_SDIO0_2X 9 +#define CLK_SDIO1_2X 10 +#define CLK_SDIO2_2X 11 +#define CLK_EMMC_2X 12 +#define CLK_SDIO0_1X 13 +#define CLK_SDIO1_1X 14 +#define CLK_SDIO2_1X 15 +#define CLK_EMMC_1X 16 +#define CLK_ADI 17 +#define CLK_PWM0 18 +#define CLK_PWM1 19 +#define CLK_PWM2 20 +#define CLK_PWM3 21 +#define CLK_EFUSE 22 +#define CLK_CM3_UART0 23 +#define CLK_CM3_UART1 24 +#define CLK_THM 25 +#define CLK_CM3_I2C0 26 +#define CLK_CM3_I2C1 27 +#define CLK_CM4_SPI 28 +#define CLK_AON_I2C 29 +#define CLK_AVS 30 +#define CLK_CA53_DAP 31 +#define CLK_CA53_TS 32 +#define CLK_DJTAG_TCK 33 +#define CLK_PMU 34 +#define CLK_PMU_26M 35 +#define CLK_DEBOUNCE 36 +#define CLK_OTG2_REF 37 +#define CLK_USB3_REF 38 +#define CLK_AP_AXI 39 +#define CLK_AON_PREDIV_NUM (CLK_AP_AXI + 1) + +#define CLK_USB3_EB 0 +#define CLK_USB3_SUSPEND_EB 1 +#define CLK_USB3_REF_EB 2 +#define CLK_DMA_EB 3 +#define CLK_SDIO0_EB 4 +#define CLK_SDIO1_EB 5 +#define CLK_SDIO2_EB 6 +#define CLK_EMMC_EB 7 +#define CLK_ROM_EB 8 +#define CLK_BUSMON_EB 9 +#define CLK_CC63S_EB 10 +#define CLK_CC63P_EB 11 +#define CLK_CE0_EB 12 +#define CLK_CE1_EB 13 +#define CLK_APAHB_GATE_NUM (CLK_CE1_EB + 1) + +#define CLK_AVS_LIT_EB 0 +#define CLK_AVS_BIG_EB 1 +#define CLK_AP_INTC5_EB 2 +#define CLK_GPIO_EB 3 +#define CLK_PWM0_EB 4 +#define CLK_PWM1_EB 5 +#define CLK_PWM2_EB 6 +#define CLK_PWM3_EB 7 +#define CLK_KPD_EB 8 +#define CLK_AON_SYS_EB 9 +#define CLK_AP_SYS_EB 10 +#define CLK_AON_TMR_EB 11 +#define CLK_AP_TMR0_EB 12 +#define CLK_EFUSE_EB 13 +#define CLK_EIC_EB 14 +#define CLK_PUB1_REG_EB 15 +#define CLK_ADI_EB 16 +#define CLK_AP_INTC0_EB 17 +#define CLK_AP_INTC1_EB 18 +#define CLK_AP_INTC2_EB 19 +#define CLK_AP_INTC3_EB 20 +#define CLK_AP_INTC4_EB 21 +#define CLK_SPLK_EB 22 +#define CLK_MSPI_EB 23 +#define CLK_PUB0_REG_EB 24 +#define CLK_PIN_EB 25 +#define CLK_AON_CKG_EB 26 +#define CLK_GPU_EB 27 +#define CLK_APCPU_TS0_EB 28 +#define CLK_APCPU_TS1_EB 29 +#define CLK_DAP_EB 30 +#define CLK_I2C_EB 31 +#define CLK_PMU_EB 32 +#define CLK_THM_EB 33 +#define CLK_AUX0_EB 34 +#define CLK_AUX1_EB 35 +#define CLK_AUX2_EB 36 +#define CLK_PROBE_EB 37 +#define CLK_GPU0_AVS_EB 38 +#define CLK_GPU1_AVS_EB 39 +#define CLK_APCPU_WDG_EB 40 +#define CLK_AP_TMR1_EB 41 +#define CLK_AP_TMR2_EB 42 +#define CLK_DISP_EMC_EB 43 +#define CLK_ZIP_EMC_EB 44 +#define CLK_GSP_EMC_EB 45 +#define CLK_OSC_AON_EB 46 +#define CLK_LVDS_TRX_EB 47 +#define CLK_LVDS_TCXO_EB 48 +#define CLK_MDAR_EB 49 +#define CLK_RTC4M0_CAL_EB 50 +#define CLK_RCT100M_CAL_EB 51 +#define CLK_DJTAG_EB 52 +#define CLK_MBOX_EB 53 +#define CLK_AON_DMA_EB 54 +#define CLK_DBG_EMC_EB 55 +#define CLK_LVDS_PLL_DIV_EN 56 +#define CLK_DEF_EB 57 +#define CLK_AON_APB_RSV0 58 +#define CLK_ORP_JTAG_EB 59 +#define CLK_VSP_EB 60 +#define CLK_CAM_EB 61 +#define CLK_DISP_EB 62 +#define CLK_DBG_AXI_IF_EB 63 +#define CLK_SDIO0_2X_EN 64 +#define CLK_SDIO1_2X_EN 65 +#define CLK_SDIO2_2X_EN 66 +#define CLK_EMMC_2X_EN 67 +#define CLK_AON_GATE_NUM (CLK_EMMC_2X_EN + 1) + +#define CLK_LIT_MCU 0 +#define CLK_BIG_MCU 1 +#define CLK_AONSECURE_NUM (CLK_BIG_MCU + 1) + +#define CLK_AGCP_IIS0_EB 0 +#define CLK_AGCP_IIS1_EB 1 +#define CLK_AGCP_IIS2_EB 2 +#define CLK_AGCP_IIS3_EB 3 +#define CLK_AGCP_UART_EB 4 +#define CLK_AGCP_DMACP_EB 5 +#define CLK_AGCP_DMAAP_EB 6 +#define CLK_AGCP_ARC48K_EB 7 +#define CLK_AGCP_SRC44P1K_EB 8 +#define CLK_AGCP_MCDT_EB 9 +#define CLK_AGCP_VBCIFD_EB 10 +#define CLK_AGCP_VBC_EB 11 +#define CLK_AGCP_SPINLOCK_EB 12 +#define CLK_AGCP_ICU_EB 13 +#define CLK_AGCP_AP_ASHB_EB 14 +#define CLK_AGCP_CP_ASHB_EB 15 +#define CLK_AGCP_AUD_EB 16 +#define CLK_AGCP_AUDIF_EB 17 +#define CLK_AGCP_GATE_NUM (CLK_AGCP_AUDIF_EB + 1) + +#define CLK_GPU 0 +#define CLK_GPU_NUM (CLK_GPU + 1) + +#define CLK_AHB_VSP 0 +#define CLK_VSP 1 +#define CLK_VSP_ENC 2 +#define CLK_VPP 3 +#define CLK_VSP_26M 4 +#define CLK_VSP_NUM (CLK_VSP_26M + 1) + +#define CLK_VSP_DEC_EB 0 +#define CLK_VSP_CKG_EB 1 +#define CLK_VSP_MMU_EB 2 +#define CLK_VSP_ENC_EB 3 +#define CLK_VPP_EB 4 +#define CLK_VSP_26M_EB 5 +#define CLK_VSP_AXI_GATE 6 +#define CLK_VSP_ENC_GATE 7 +#define CLK_VPP_AXI_GATE 8 +#define CLK_VSP_BM_GATE 9 +#define CLK_VSP_ENC_BM_GATE 10 +#define CLK_VPP_BM_GATE 11 +#define CLK_VSP_GATE_NUM (CLK_VPP_BM_GATE + 1) + +#define CLK_AHB_CAM 0 +#define CLK_SENSOR0 1 +#define CLK_SENSOR1 2 +#define CLK_SENSOR2 3 +#define CLK_MIPI_CSI0_EB 4 +#define CLK_MIPI_CSI1_EB 5 +#define CLK_CAM_NUM (CLK_MIPI_CSI1_EB + 1) + +#define CLK_DCAM0_EB 0 +#define CLK_DCAM1_EB 1 +#define CLK_ISP0_EB 2 +#define CLK_CSI0_EB 3 +#define CLK_CSI1_EB 4 +#define CLK_JPG0_EB 5 +#define CLK_JPG1_EB 6 +#define CLK_CAM_CKG_EB 7 +#define CLK_CAM_MMU_EB 8 +#define CLK_ISP1_EB 9 +#define CLK_CPP_EB 10 +#define CLK_MMU_PF_EB 11 +#define CLK_ISP2_EB 12 +#define CLK_DCAM2ISP_IF_EB 13 +#define CLK_ISP2DCAM_IF_EB 14 +#define CLK_ISP_LCLK_EB 15 +#define CLK_ISP_ICLK_EB 16 +#define CLK_ISP_MCLK_EB 17 +#define CLK_ISP_PCLK_EB 18 +#define CLK_ISP_ISP2DCAM_EB 19 +#define CLK_DCAM0_IF_EB 20 +#define CLK_CLK26M_IF_EB 21 +#define CLK_CPHY0_GATE 22 +#define CLK_MIPI_CSI0_GATE 23 +#define CLK_CPHY1_GATE 24 +#define CLK_MIPI_CSI1 25 +#define CLK_DCAM0_AXI_GATE 26 +#define CLK_DCAM1_AXI_GATE 27 +#define CLK_SENSOR0_GATE 28 +#define CLK_SENSOR1_GATE 29 +#define CLK_JPG0_AXI_GATE 30 +#define CLK_GPG1_AXI_GATE 31 +#define CLK_ISP0_AXI_GATE 32 +#define CLK_ISP1_AXI_GATE 33 +#define CLK_ISP2_AXI_GATE 34 +#define CLK_CPP_AXI_GATE 35 +#define CLK_D0_IF_AXI_GATE 36 +#define CLK_D2I_IF_AXI_GATE 37 +#define CLK_I2D_IF_AXI_GATE 38 +#define CLK_SPARE_AXI_GATE 39 +#define CLK_SENSOR2_GATE 40 +#define CLK_D0IF_IN_D_EN 41 +#define CLK_D1IF_IN_D_EN 42 +#define CLK_D0IF_IN_D2I_EN 43 +#define CLK_D1IF_IN_D2I_EN 44 +#define CLK_IA_IN_D2I_EN 45 +#define CLK_IB_IN_D2I_EN 46 +#define CLK_IC_IN_D2I_EN 47 +#define CLK_IA_IN_I_EN 48 +#define CLK_IB_IN_I_EN 49 +#define CLK_IC_IN_I_EN 50 +#define CLK_CAM_GATE_NUM (CLK_IC_IN_I_EN + 1) + +#define CLK_AHB_DISP 0 +#define CLK_DISPC0_DPI 1 +#define CLK_DISPC1_DPI 2 +#define CLK_DISP_NUM (CLK_DISPC1_DPI + 1) + +#define CLK_DISPC0_EB 0 +#define CLK_DISPC1_EB 1 +#define CLK_DISPC_MMU_EB 2 +#define CLK_GSP0_EB 3 +#define CLK_GSP1_EB 4 +#define CLK_GSP0_MMU_EB 5 +#define CLK_GSP1_MMU_EB 6 +#define CLK_DSI0_EB 7 +#define CLK_DSI1_EB 8 +#define CLK_DISP_CKG_EB 9 +#define CLK_DISP_GPU_EB 10 +#define CLK_GPU_MTX_EB 11 +#define CLK_GSP_MTX_EB 12 +#define CLK_TMC_MTX_EB 13 +#define CLK_DISPC_MTX_EB 14 +#define CLK_DPHY0_GATE 15 +#define CLK_DPHY1_GATE 16 +#define CLK_GSP0_A_GATE 17 +#define CLK_GSP1_A_GATE 18 +#define CLK_GSP0_F_GATE 19 +#define CLK_GSP1_F_GATE 20 +#define CLK_D_MTX_F_GATE 21 +#define CLK_D_MTX_A_GATE 22 +#define CLK_D_NOC_F_GATE 23 +#define CLK_D_NOC_A_GATE 24 +#define CLK_GSP_MTX_F_GATE 25 +#define CLK_GSP_MTX_A_GATE 26 +#define CLK_GSP_NOC_F_GATE 27 +#define CLK_GSP_NOC_A_GATE 28 +#define CLK_DISPM0IDLE_GATE 29 +#define CLK_GSPM0IDLE_GATE 30 +#define CLK_DISP_GATE_NUM (CLK_GSPM0IDLE_GATE + 1) + +#define CLK_SIM0_EB 0 +#define CLK_IIS0_EB 1 +#define CLK_IIS1_EB 2 +#define CLK_IIS2_EB 3 +#define CLK_IIS3_EB 4 +#define CLK_SPI0_EB 5 +#define CLK_SPI1_EB 6 +#define CLK_SPI2_EB 7 +#define CLK_I2C0_EB 8 +#define CLK_I2C1_EB 9 +#define CLK_I2C2_EB 10 +#define CLK_I2C3_EB 11 +#define CLK_I2C4_EB 12 +#define CLK_I2C5_EB 13 +#define CLK_UART0_EB 14 +#define CLK_UART1_EB 15 +#define CLK_UART2_EB 16 +#define CLK_UART3_EB 17 +#define CLK_UART4_EB 18 +#define CLK_AP_CKG_EB 19 +#define CLK_SPI3_EB 20 +#define CLK_APAPB_GATE_NUM (CLK_SPI3_EB + 1) + +#endif /* _DT_BINDINGS_CLK_SC9860_H_ */ diff --git a/include/dt-bindings/gpio/aspeed-gpio.h b/include/dt-bindings/gpio/aspeed-gpio.h new file mode 100644 index 000000000000..56fc4889b2c4 --- /dev/null +++ b/include/dt-bindings/gpio/aspeed-gpio.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * This header provides constants for binding aspeed,*-gpio. + * + * The first cell in Aspeed's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_ASPEED_GPIO_H +#define _DT_BINDINGS_GPIO_ASPEED_GPIO_H + +#include <dt-bindings/gpio/gpio.h> + +#define ASPEED_GPIO_PORT_A 0 +#define ASPEED_GPIO_PORT_B 1 +#define ASPEED_GPIO_PORT_C 2 +#define ASPEED_GPIO_PORT_D 3 +#define ASPEED_GPIO_PORT_E 4 +#define ASPEED_GPIO_PORT_F 5 +#define ASPEED_GPIO_PORT_G 6 +#define ASPEED_GPIO_PORT_H 7 +#define ASPEED_GPIO_PORT_I 8 +#define ASPEED_GPIO_PORT_J 9 +#define ASPEED_GPIO_PORT_K 10 +#define ASPEED_GPIO_PORT_L 11 +#define ASPEED_GPIO_PORT_M 12 +#define ASPEED_GPIO_PORT_N 13 +#define ASPEED_GPIO_PORT_O 14 +#define ASPEED_GPIO_PORT_P 15 +#define ASPEED_GPIO_PORT_Q 16 +#define ASPEED_GPIO_PORT_R 17 +#define ASPEED_GPIO_PORT_S 18 +#define ASPEED_GPIO_PORT_T 19 +#define ASPEED_GPIO_PORT_U 20 +#define ASPEED_GPIO_PORT_V 21 +#define ASPEED_GPIO_PORT_W 22 +#define ASPEED_GPIO_PORT_X 23 +#define ASPEED_GPIO_PORT_Y 24 +#define ASPEED_GPIO_PORT_Z 25 +#define ASPEED_GPIO_PORT_AA 26 +#define ASPEED_GPIO_PORT_AB 27 +#define ASPEED_GPIO_PORT_AC 28 + +#define ASPEED_GPIO(port, offset) \ + ((ASPEED_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h index dd549ff04295..2cc10ae4bbb7 100644 --- a/include/dt-bindings/gpio/gpio.h +++ b/include/dt-bindings/gpio/gpio.h @@ -29,8 +29,8 @@ #define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN) #define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE) -/* Bit 3 express GPIO suspend/resume persistence */ -#define GPIO_SLEEP_MAINTAIN_VALUE 0 -#define GPIO_SLEEP_MAY_LOSE_VALUE 8 +/* Bit 3 express GPIO suspend/resume and reset persistence */ +#define GPIO_PERSISTENT 0 +#define GPIO_TRANSITORY 8 #endif diff --git a/include/dt-bindings/gpio/meson-axg-gpio.h b/include/dt-bindings/gpio/meson-axg-gpio.h new file mode 100644 index 000000000000..25bb1fffa97a --- /dev/null +++ b/include/dt-bindings/gpio/meson-axg-gpio.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2017 Amlogic, Inc. All rights reserved. + * Author: Xingyu Chen <xingyu.chen@amlogic.com> + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#ifndef _DT_BINDINGS_MESON_AXG_GPIO_H +#define _DT_BINDINGS_MESON_AXG_GPIO_H + +/* First GPIO chip */ +#define GPIOAO_0 0 +#define GPIOAO_1 1 +#define GPIOAO_2 2 +#define GPIOAO_3 3 +#define GPIOAO_4 4 +#define GPIOAO_5 5 +#define GPIOAO_6 6 +#define GPIOAO_7 7 +#define GPIOAO_8 8 +#define GPIOAO_9 9 +#define GPIOAO_10 10 +#define GPIOAO_11 11 +#define GPIOAO_12 12 +#define GPIOAO_13 13 +#define GPIO_TEST_N 14 + +/* Second GPIO chip */ +#define GPIOZ_0 0 +#define GPIOZ_1 1 +#define GPIOZ_2 2 +#define GPIOZ_3 3 +#define GPIOZ_4 4 +#define GPIOZ_5 5 +#define GPIOZ_6 6 +#define GPIOZ_7 7 +#define GPIOZ_8 8 +#define GPIOZ_9 9 +#define GPIOZ_10 10 +#define BOOT_0 11 +#define BOOT_1 12 +#define BOOT_2 13 +#define BOOT_3 14 +#define BOOT_4 15 +#define BOOT_5 16 +#define BOOT_6 17 +#define BOOT_7 18 +#define BOOT_8 19 +#define BOOT_9 20 +#define BOOT_10 21 +#define BOOT_11 22 +#define BOOT_12 23 +#define BOOT_13 24 +#define BOOT_14 25 +#define GPIOA_0 26 +#define GPIOA_1 27 +#define GPIOA_2 28 +#define GPIOA_3 29 +#define GPIOA_4 30 +#define GPIOA_5 31 +#define GPIOA_6 32 +#define GPIOA_7 33 +#define GPIOA_8 34 +#define GPIOA_9 35 +#define GPIOA_10 36 +#define GPIOA_11 37 +#define GPIOA_12 38 +#define GPIOA_13 39 +#define GPIOA_14 40 +#define GPIOA_15 41 +#define GPIOA_16 42 +#define GPIOA_17 43 +#define GPIOA_18 44 +#define GPIOA_19 45 +#define GPIOA_20 46 +#define GPIOX_0 47 +#define GPIOX_1 48 +#define GPIOX_2 49 +#define GPIOX_3 50 +#define GPIOX_4 51 +#define GPIOX_5 52 +#define GPIOX_6 53 +#define GPIOX_7 54 +#define GPIOX_8 55 +#define GPIOX_9 56 +#define GPIOX_10 57 +#define GPIOX_11 58 +#define GPIOX_12 59 +#define GPIOX_13 60 +#define GPIOX_14 61 +#define GPIOX_15 62 +#define GPIOX_16 63 +#define GPIOX_17 64 +#define GPIOX_18 65 +#define GPIOX_19 66 +#define GPIOX_20 67 +#define GPIOX_21 68 +#define GPIOX_22 69 +#define GPIOY_0 70 +#define GPIOY_1 71 +#define GPIOY_2 72 +#define GPIOY_3 73 +#define GPIOY_4 74 +#define GPIOY_5 75 +#define GPIOY_6 76 +#define GPIOY_7 77 +#define GPIOY_8 78 +#define GPIOY_9 79 +#define GPIOY_10 80 +#define GPIOY_11 81 +#define GPIOY_12 82 +#define GPIOY_13 83 +#define GPIOY_14 84 +#define GPIOY_15 85 + +#endif /* _DT_BINDINGS_MESON_AXG_GPIO_H */ diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h new file mode 100644 index 000000000000..64813536aec9 --- /dev/null +++ b/include/dt-bindings/memory/tegra186-mc.h @@ -0,0 +1,111 @@ +#ifndef DT_BINDINGS_MEMORY_TEGRA186_MC_H +#define DT_BINDINGS_MEMORY_TEGRA186_MC_H + +/* special clients */ +#define TEGRA186_SID_INVALID 0x00 +#define TEGRA186_SID_PASSTHROUGH 0x7f + +/* host1x clients */ +#define TEGRA186_SID_HOST1X 0x01 +#define TEGRA186_SID_CSI 0x02 +#define TEGRA186_SID_VIC 0x03 +#define TEGRA186_SID_VI 0x04 +#define TEGRA186_SID_ISP 0x05 +#define TEGRA186_SID_NVDEC 0x06 +#define TEGRA186_SID_NVENC 0x07 +#define TEGRA186_SID_NVJPG 0x08 +#define TEGRA186_SID_NVDISPLAY 0x09 +#define TEGRA186_SID_TSEC 0x0a +#define TEGRA186_SID_TSECB 0x0b +#define TEGRA186_SID_SE 0x0c +#define TEGRA186_SID_SE1 0x0d +#define TEGRA186_SID_SE2 0x0e +#define TEGRA186_SID_SE3 0x0f + +/* GPU clients */ +#define TEGRA186_SID_GPU 0x10 + +/* other SoC clients */ +#define TEGRA186_SID_AFI 0x11 +#define TEGRA186_SID_HDA 0x12 +#define TEGRA186_SID_ETR 0x13 +#define TEGRA186_SID_EQOS 0x14 +#define TEGRA186_SID_UFSHC 0x15 +#define TEGRA186_SID_AON 0x16 +#define TEGRA186_SID_SDMMC4 0x17 +#define TEGRA186_SID_SDMMC3 0x18 +#define TEGRA186_SID_SDMMC2 0x19 +#define TEGRA186_SID_SDMMC1 0x1a +#define TEGRA186_SID_XUSB_HOST 0x1b +#define TEGRA186_SID_XUSB_DEV 0x1c +#define TEGRA186_SID_SATA 0x1d +#define TEGRA186_SID_APE 0x1e +#define TEGRA186_SID_SCE 0x1f + +/* GPC DMA clients */ +#define TEGRA186_SID_GPCDMA_0 0x20 +#define TEGRA186_SID_GPCDMA_1 0x21 +#define TEGRA186_SID_GPCDMA_2 0x22 +#define TEGRA186_SID_GPCDMA_3 0x23 +#define TEGRA186_SID_GPCDMA_4 0x24 +#define TEGRA186_SID_GPCDMA_5 0x25 +#define TEGRA186_SID_GPCDMA_6 0x26 +#define TEGRA186_SID_GPCDMA_7 0x27 + +/* APE DMA clients */ +#define TEGRA186_SID_APE_1 0x28 +#define TEGRA186_SID_APE_2 0x29 + +/* camera RTCPU */ +#define TEGRA186_SID_RCE 0x2a + +/* camera RTCPU on host1x address space */ +#define TEGRA186_SID_RCE_1X 0x2b + +/* APE DMA clients */ +#define TEGRA186_SID_APE_3 0x2c + +/* camera RTCPU running on APE */ +#define TEGRA186_SID_APE_CAM 0x2d +#define TEGRA186_SID_APE_CAM_1X 0x2e + +/* + * The BPMP has its SID value hardcoded in the firmware. Changing it requires + * considerable effort. + */ +#define TEGRA186_SID_BPMP 0x32 + +/* for SMMU tests */ +#define TEGRA186_SID_SMMU_TEST 0x33 + +/* host1x virtualization channels */ +#define TEGRA186_SID_HOST1X_CTX0 0x38 +#define TEGRA186_SID_HOST1X_CTX1 0x39 +#define TEGRA186_SID_HOST1X_CTX2 0x3a +#define TEGRA186_SID_HOST1X_CTX3 0x3b +#define TEGRA186_SID_HOST1X_CTX4 0x3c +#define TEGRA186_SID_HOST1X_CTX5 0x3d +#define TEGRA186_SID_HOST1X_CTX6 0x3e +#define TEGRA186_SID_HOST1X_CTX7 0x3f + +/* host1x command buffers */ +#define TEGRA186_SID_HOST1X_VM0 0x40 +#define TEGRA186_SID_HOST1X_VM1 0x41 +#define TEGRA186_SID_HOST1X_VM2 0x42 +#define TEGRA186_SID_HOST1X_VM3 0x43 +#define TEGRA186_SID_HOST1X_VM4 0x44 +#define TEGRA186_SID_HOST1X_VM5 0x45 +#define TEGRA186_SID_HOST1X_VM6 0x46 +#define TEGRA186_SID_HOST1X_VM7 0x47 + +/* SE data buffers */ +#define TEGRA186_SID_SE_VM0 0x48 +#define TEGRA186_SID_SE_VM1 0x49 +#define TEGRA186_SID_SE_VM2 0x4a +#define TEGRA186_SID_SE_VM3 0x4b +#define TEGRA186_SID_SE_VM4 0x4c +#define TEGRA186_SID_SE_VM5 0x4d +#define TEGRA186_SID_SE_VM6 0x4e +#define TEGRA186_SID_SE_VM7 0x4f + +#endif diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h index a69e310789c5..6ce4a32f77d4 100644 --- a/include/dt-bindings/pinctrl/am43xx.h +++ b/include/dt-bindings/pinctrl/am43xx.h @@ -25,7 +25,8 @@ #define DS0_FORCE_OFF_MODE (1 << 24) #define DS0_INPUT (1 << 25) #define DS0_FORCE_OUT_HIGH (1 << 26) -#define DS0_PULL_UP_DOWN_EN (1 << 27) +#define DS0_PULL_UP_DOWN_EN (0 << 27) +#define DS0_PULL_UP_DOWN_DIS (1 << 27) #define DS0_PULL_UP_SEL (1 << 28) #define WAKEUP_ENABLE (1 << 29) diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h index b8dfe31821e6..b5a2174a6386 100644 --- a/include/dt-bindings/pinctrl/stm32-pinfunc.h +++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h @@ -1,3 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (C) STMicroelectronics 2017 - All Rights Reserved + * Author: Torgue Alexandre <alexandre.torgue@st.com> for STMicroelectronics. + */ + #ifndef _DT_BINDINGS_STM32_PINFUNC_H #define _DT_BINDINGS_STM32_PINFUNC_H diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h new file mode 100644 index 000000000000..92b46d772fae --- /dev/null +++ b/include/dt-bindings/power/mt2712-power.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2017 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See http://www.gnu.org/licenses/gpl-2.0.html for more details. + */ + +#ifndef _DT_BINDINGS_POWER_MT2712_POWER_H +#define _DT_BINDINGS_POWER_MT2712_POWER_H + +#define MT2712_POWER_DOMAIN_MM 0 +#define MT2712_POWER_DOMAIN_VDEC 1 +#define MT2712_POWER_DOMAIN_VENC 2 +#define MT2712_POWER_DOMAIN_ISP 3 +#define MT2712_POWER_DOMAIN_AUDIO 4 +#define MT2712_POWER_DOMAIN_USB 5 +#define MT2712_POWER_DOMAIN_USB2 6 +#define MT2712_POWER_DOMAIN_MFG 7 + +#endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */ diff --git a/include/dt-bindings/power/owl-s700-powergate.h b/include/dt-bindings/power/owl-s700-powergate.h new file mode 100644 index 000000000000..4cf1aefbf09c --- /dev/null +++ b/include/dt-bindings/power/owl-s700-powergate.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Actions Semi S700 SPS + * + * Copyright (c) 2017 Andreas Färber + */ +#ifndef DT_BINDINGS_POWER_OWL_S700_POWERGATE_H +#define DT_BINDINGS_POWER_OWL_S700_POWERGATE_H + +#define S700_PD_VDE 0 +#define S700_PD_VCE_SI 1 +#define S700_PD_USB2_1 2 +#define S700_PD_HDE 3 +#define S700_PD_DMA 4 +#define S700_PD_DS 5 +#define S700_PD_USB3 6 +#define S700_PD_USB2_0 7 + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h new file mode 100644 index 000000000000..ad6f55dabd6d --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h @@ -0,0 +1,124 @@ +/* + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * Copyright (c) 2017 Amlogic, inc. + * Author: Yixun Lan <yixun.lan@amlogic.com> + * + * SPDX-License-Identifier: (GPL-2.0+ OR BSD) + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +#define RESET_PCIE_A 1 +#define RESET_PCIE_B 2 +#define RESET_DDR_TOP 3 +/* 4 */ +#define RESET_VIU 5 +#define RESET_PCIE_PHY 6 +#define RESET_PCIE_APB 7 +/* 8 */ +/* 9 */ +#define RESET_VENC 10 +#define RESET_ASSIST 11 +/* 12 */ +#define RESET_VCBUS 13 +/* 14 */ +/* 15 */ +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +/* 18-21 */ +#define RESET_SYS_CPU_CAPB3 22 +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +#define RESET_MMC 27 +/* 28-31 */ +/* RESET1 */ +/* 32 */ +/* 33 */ +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_AO_RESET 36 +/* 37 */ +#define RESET_AHB_SRAM 38 +/* 39 */ +/* 40 */ +#define RESET_DMA 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +/* 44 */ +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +#define RESET_ROM_BOOT 47 +#define RESET_SYS_CPU_0 48 +#define RESET_SYS_CPU_1 49 +#define RESET_SYS_CPU_2 50 +#define RESET_SYS_CPU_3 51 +#define RESET_SYS_CPU_CORE_0 52 +#define RESET_SYS_CPU_CORE_1 53 +#define RESET_SYS_CPU_CORE_2 54 +#define RESET_SYS_CPU_CORE_3 55 +#define RESET_SYS_PLL_DIV 56 +#define RESET_SYS_CPU_AXI 57 +#define RESET_SYS_CPU_L2 58 +#define RESET_SYS_CPU_P 59 +#define RESET_SYS_CPU_MBIST 60 +/* 61-63 */ +/* RESET2 */ +/* 64 */ +/* 65 */ +#define RESET_AUDIO 66 +/* 67 */ +#define RESET_MIPI_HOST 68 +#define RESET_AUDIO_LOCKER 69 +#define RESET_GE2D 70 +/* 71-76 */ +#define RESET_AO_CPU_RESET 77 +/* 78-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +/* 97-127 */ +/* RESET4 */ +/* 128 */ +/* 129 */ +#define RESET_MIPI_PHY 130 +/* 131-140 */ +#define RESET_VENCL 141 +#define RESET_I2C_MASTER_2 142 +#define RESET_I2C_MASTER_1 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_SPICC 193 +/* 194 */ +/* 195 */ +#define RESET_PERIPHS_I2C_MASTER_0 196 +/* 197-200 */ +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1 202 +/* 203-204 */ +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_I2C_MASTER_3 206 +/* 207-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +/* 228 */ +#define RESET_DEVICE_MMC_ARB 229 +/* 230 */ +#define RESET_VID_LOCK 231 +#define RESET_A9_DMC_PIPEL 232 +#define RESET_DMC_VPU_PIPEL 233 +/* 234-255 */ + +#endif diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b8f4c3c776e5..e6d41b65d396 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -56,6 +56,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) +#define ACPI_HANDLE_FWNODE(fwnode) \ + acpi_device_handle(to_acpi_device_node(fwnode)) static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) { @@ -585,6 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); +void *acpi_get_match_data(const struct device *dev); extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); @@ -627,6 +630,7 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count) #define ACPI_COMPANION(dev) (NULL) #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) +#define ACPI_HANDLE_FWNODE(fwnode) (NULL) #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), struct fwnode_handle; @@ -762,6 +766,11 @@ static inline const struct acpi_device_id *acpi_match_device( return NULL; } +static inline void *acpi_get_match_data(const struct device *dev) +{ + return NULL; +} + static inline bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { @@ -985,6 +994,11 @@ struct acpi_gpio_mapping { const char *name; const struct acpi_gpio_params *data; unsigned int size; + +/* Ignore IoRestriction field */ +#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) + + unsigned int quirks; }; #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h new file mode 100644 index 000000000000..942afbd544b7 --- /dev/null +++ b/include/linux/arm_sdei.h @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2017 Arm Ltd. +#ifndef __LINUX_ARM_SDEI_H +#define __LINUX_ARM_SDEI_H + +#include <uapi/linux/arm_sdei.h> + +enum sdei_conduit_types { + CONDUIT_INVALID = 0, + CONDUIT_SMC, + CONDUIT_HVC, +}; + +#include <asm/sdei.h> + +/* Arch code should override this to set the entry point from firmware... */ +#ifndef sdei_arch_get_entry_point +#define sdei_arch_get_entry_point(conduit) (0) +#endif + +/* + * When an event occurs sdei_event_handler() will call a user-provided callback + * like this in NMI context on the CPU that received the event. + */ +typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg); + +/* + * Register your callback to claim an event. The event must be described + * by firmware. + */ +int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg); + +/* + * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling + * it until it succeeds. + */ +int sdei_event_unregister(u32 event_num); + +int sdei_event_enable(u32 event_num); +int sdei_event_disable(u32 event_num); + +#ifdef CONFIG_ARM_SDE_INTERFACE +/* For use by arch code when CPU hotplug notifiers are not appropriate. */ +int sdei_mask_local_cpu(void); +int sdei_unmask_local_cpu(void); +#else +static inline int sdei_mask_local_cpu(void) { return 0; } +static inline int sdei_unmask_local_cpu(void) { return 0; } +#endif /* CONFIG_ARM_SDE_INTERFACE */ + + +/* + * This struct represents an event that has been registered. The driver + * maintains a list of all events, and which ones are registered. (Private + * events have one entry in the list, but are registered on each CPU). + * A pointer to this struct is passed to firmware, and back to the event + * handler. The event handler can then use this to invoke the registered + * callback, without having to walk the list. + * + * For CPU private events, this structure is per-cpu. + */ +struct sdei_registered_event { + /* For use by arch code: */ + struct pt_regs interrupted_regs; + + sdei_event_callback *callback; + void *callback_arg; + u32 event_num; + u8 priority; +}; + +/* The arch code entry point should then call this when an event arrives. */ +int notrace sdei_event_handler(struct pt_regs *regs, + struct sdei_registered_event *arg); + +/* arch code may use this to retrieve the extra registers. */ +int sdei_api_event_context(u32 query, u64 *result); + +#endif /* __LINUX_ARM_SDEI_H */ diff --git a/include/linux/ata.h b/include/linux/ata.h index c7a353825450..40d150ad7e07 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -448,6 +448,8 @@ enum { ATA_SET_MAX_LOCK = 0x02, ATA_SET_MAX_UNLOCK = 0x03, ATA_SET_MAX_FREEZE_LOCK = 0x04, + ATA_SET_MAX_PASSWD_DMA = 0x05, + ATA_SET_MAX_UNLOCK_DMA = 0x06, /* feature values for DEVICE CONFIGURATION OVERLAY */ ATA_DCO_RESTORE = 0xC0, diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 1030651f8309..cf2588d81148 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -16,6 +16,7 @@ #define _LINUX_BITFIELD_H #include <linux/build_bug.h> +#include <asm/byteorder.h> /* * Bitfield access macros @@ -103,4 +104,49 @@ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ }) +extern void __compiletime_warning("value doesn't fit into mask") +__field_overflow(void); +extern void __compiletime_error("bad bitfield mask") +__bad_mask(void); +static __always_inline u64 field_multiplier(u64 field) +{ + if ((field | (field - 1)) & ((field | (field - 1)) + 1)) + __bad_mask(); + return field & -field; +} +static __always_inline u64 field_mask(u64 field) +{ + return field / field_multiplier(field); +} +#define ____MAKE_OP(type,base,to,from) \ +static __always_inline __##type type##_encode_bits(base v, base field) \ +{ \ + if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \ + __field_overflow(); \ + return to((v & field_mask(field)) * field_multiplier(field)); \ +} \ +static __always_inline __##type type##_replace_bits(__##type old, \ + base val, base field) \ +{ \ + return (old & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline void type##p_replace_bits(__##type *p, \ + base val, base field) \ +{ \ + *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ +} \ +static __always_inline base type##_get_bits(__##type v, base field) \ +{ \ + return (from(v) & field)/field_multiplier(field); \ +} +#define __MAKE_OP(size) \ + ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ + ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ + ____MAKE_OP(u##size,u##size,,) +__MAKE_OP(16) +__MAKE_OP(32) +__MAKE_OP(64) +#undef __MAKE_OP +#undef ____MAKE_OP + #endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index c5d3db0d83f8..bf18b95ed92d 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -39,6 +39,24 @@ typedef u8 __bitwise blk_status_t; #define BLK_STS_AGAIN ((__force blk_status_t)12) +/* + * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if + * device related resources are unavailable, but the driver can guarantee + * that the queue will be rerun in the future once resources become + * available again. This is typically the case for device specific + * resources that are consumed for IO. If the driver fails allocating these + * resources, we know that inflight (or pending) IO will free these + * resource upon completion. + * + * This is different from BLK_STS_RESOURCE in that it explicitly references + * a device specific resource. For resources of wider scope, allocation + * failure can happen without having pending IO. This means that we can't + * rely on request completions freeing these resources, as IO may not be in + * flight. Examples of that are kernel memory allocations, DMA mappings, or + * any other system wide resources. + */ +#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) + /** * blk_path_error - returns true if error may be path related * @error: status the request was completed with diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0b25cf87b6d6..66df387106de 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -17,6 +17,7 @@ #include <linux/numa.h> #include <linux/wait.h> +struct bpf_verifier_env; struct perf_event; struct bpf_prog; struct bpf_map; @@ -24,6 +25,7 @@ struct bpf_map; /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ + int (*map_alloc_check)(union bpf_attr *attr); struct bpf_map *(*map_alloc)(union bpf_attr *attr); void (*map_release)(struct bpf_map *map, struct file *map_file); void (*map_free)(struct bpf_map *map); @@ -72,6 +74,33 @@ struct bpf_map { char name[BPF_OBJ_NAME_LEN]; }; +struct bpf_offloaded_map; + +struct bpf_map_dev_ops { + int (*map_get_next_key)(struct bpf_offloaded_map *map, + void *key, void *next_key); + int (*map_lookup_elem)(struct bpf_offloaded_map *map, + void *key, void *value); + int (*map_update_elem)(struct bpf_offloaded_map *map, + void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); +}; + +struct bpf_offloaded_map { + struct bpf_map map; + struct net_device *netdev; + const struct bpf_map_dev_ops *dev_ops; + void *dev_priv; + struct list_head offloads; +}; + +static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) +{ + return container_of(map, struct bpf_offloaded_map, map); +} + +extern const struct bpf_map_ops bpf_map_offload_ops; + /* function argument constraints */ enum bpf_arg_type { ARG_DONTCARE = 0, /* unused argument in helper function */ @@ -193,14 +222,20 @@ struct bpf_verifier_ops { struct bpf_prog *prog, u32 *target_size); }; -struct bpf_dev_offload { +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); +}; + +struct bpf_prog_offload { struct bpf_prog *prog; struct net_device *netdev; void *dev_priv; struct list_head offloads; bool dev_state; - bool verifier_running; - wait_queue_head_t verifier_done; + const struct bpf_prog_offload_ops *dev_ops; + void *jited_image; + u32 jited_len; }; struct bpf_prog_aux { @@ -209,6 +244,10 @@ struct bpf_prog_aux { u32 max_ctx_offset; u32 stack_depth; u32 id; + u32 func_cnt; + bool offload_requested; + struct bpf_prog **func; + void *jit_data; /* JIT specific data. arch dependent */ struct latch_tree_node ksym_tnode; struct list_head ksym_lnode; const struct bpf_prog_ops *ops; @@ -220,7 +259,7 @@ struct bpf_prog_aux { #ifdef CONFIG_SECURITY void *security; #endif - struct bpf_dev_offload *offload; + struct bpf_prog_offload *offload; union { struct work_struct work; struct rcu_head rcu; @@ -295,6 +334,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, struct bpf_prog *old_prog); +int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, + __u32 __user *prog_ids, u32 request_cnt, + __u32 __user *prog_cnt); int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, @@ -355,6 +397,9 @@ void bpf_prog_put(struct bpf_prog *prog); int __bpf_prog_charge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages); +void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); +void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); + struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); @@ -363,6 +408,7 @@ void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); void *bpf_map_area_alloc(size_t size, int numa_node); void bpf_map_area_free(void *base); +void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); extern int sysctl_unprivileged_bpf_disabled; @@ -409,6 +455,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); +void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); /* Map specifics */ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); @@ -536,14 +583,35 @@ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); int bpf_prog_offload_compile(struct bpf_prog *prog); void bpf_prog_offload_destroy(struct bpf_prog *prog); +int bpf_prog_offload_info_fill(struct bpf_prog_info *info, + struct bpf_prog *prog); + +int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); + +int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); +int bpf_map_offload_update_elem(struct bpf_map *map, + void *key, void *value, u64 flags); +int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); +int bpf_map_offload_get_next_key(struct bpf_map *map, + void *key, void *next_key); + +bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) { - return aux->offload; + return aux->offload_requested; } + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return unlikely(map->ops == &bpf_map_offload_ops); +} + +struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); +void bpf_map_offload_map_free(struct bpf_map *map); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -555,9 +623,23 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) { return false; } + +static inline bool bpf_map_is_dev_bound(struct bpf_map *map) +{ + return false; +} + +static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void bpf_map_offload_map_free(struct bpf_map *map) +{ +} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); #else diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 978c1d9c9383..19b8349a3809 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) -#ifdef CONFIG_STREAM_PARSER +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1632bb13ad8a..6b66cd1aa0b9 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -76,6 +76,14 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + /* Inside the callee two registers can be both PTR_TO_STACK like + * R1=fp-8 and R2=fp-8, but one of them points to this function stack + * while another to the caller's stack. To differentiate them 'frameno' + * is used which is an index in bpf_verifier_state->frame[] array + * pointing to bpf_func_state. + * This field must be second to last, for states_equal() reasons. + */ + u32 frameno; /* This field must be last, for states_equal() reasons. */ enum bpf_reg_liveness live; }; @@ -83,7 +91,8 @@ struct bpf_reg_state { enum bpf_stack_slot_type { STACK_INVALID, /* nothing was stored in this stack slot */ STACK_SPILL, /* register spilled into stack */ - STACK_MISC /* BPF program wrote some data into this slot */ + STACK_MISC, /* BPF program wrote some data into this slot */ + STACK_ZERO, /* BPF program wrote constant zero */ }; #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ @@ -96,13 +105,34 @@ struct bpf_stack_state { /* state of the program: * type of all registers and stack info */ -struct bpf_verifier_state { +struct bpf_func_state { struct bpf_reg_state regs[MAX_BPF_REG]; struct bpf_verifier_state *parent; + /* index of call instruction that called into this func */ + int callsite; + /* stack frame number of this function state from pov of + * enclosing bpf_verifier_state. + * 0 = main function, 1 = first callee. + */ + u32 frameno; + /* subprog number == index within subprog_stack_depth + * zero == main subprog + */ + u32 subprogno; + + /* should be second to last. See copy_func_state() */ int allocated_stack; struct bpf_stack_state *stack; }; +#define MAX_CALL_FRAMES 8 +struct bpf_verifier_state { + /* call stack tracking */ + struct bpf_func_state *frame[MAX_CALL_FRAMES]; + struct bpf_verifier_state *parent; + u32 curframe; +}; + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; @@ -113,6 +143,7 @@ struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ + s32 call_imm; /* saved imm field of call insn */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ bool seen; /* this insn was processed by the verifier */ @@ -135,11 +166,7 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log) return log->len_used >= log->len_total - 1; } -struct bpf_verifier_env; -struct bpf_ext_analyzer_ops { - int (*insn_hook)(struct bpf_verifier_env *env, - int insn_idx, int prev_insn_idx); -}; +#define BPF_MAX_SUBPROGS 256 /* single container for all structs * one verifier_env per bpf_check() call @@ -152,29 +179,31 @@ struct bpf_verifier_env { bool strict_alignment; /* perform strict pointer alignment checks */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ - const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ - struct bpf_verifer_log log; + u32 subprog_starts[BPF_MAX_SUBPROGS]; + /* computes the stack depth of each bpf function */ + u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; + u32 subprog_cnt; }; +__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, + const char *fmt, ...); + static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) { - return env->cur_state->regs; + struct bpf_verifier_state *cur = env->cur_state; + + return cur->frame[cur->curframe]->regs; } -#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); -#else -static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) -{ - return -EOPNOTSUPP; -} -#endif +int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 8ff86b4c1b8a..d3339dd48b1a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -14,6 +14,7 @@ #define PHY_ID_BCM5241 0x0143bc30 #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 +#define PHY_ID_BCM5395 0x0143bcf0 #define PHY_ID_BCM54810 0x03625d00 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 8b1bf8d3d4a2..894e5d125de6 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -81,11 +81,14 @@ struct buffer_head { /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. + * To avoid reset buffer flags that are already set, because that causes + * a costly cache line transition, check the flag first. */ #define BUFFER_FNS(bit, name) \ static __always_inline void set_buffer_##name(struct buffer_head *bh) \ { \ - set_bit(BH_##bit, &(bh)->b_state); \ + if (!test_bit(BH_##bit, &(bh)->b_state)) \ + set_bit(BH_##bit, &(bh)->b_state); \ } \ static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ { \ @@ -151,7 +154,6 @@ void buffer_check_dirty_writeback(struct page *page, void mark_buffer_dirty(struct buffer_head *bh); void mark_buffer_write_io_error(struct buffer_head *bh); -void init_buffer(struct buffer_head *, bh_end_io_t *, void *); void touch_buffer(struct buffer_head *bh); void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset); diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 61f1cf2d9f44..055aaf5ed9af 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -46,6 +46,7 @@ struct can_priv { unsigned int bitrate_const_cnt; const u32 *data_bitrate_const; unsigned int data_bitrate_const_cnt; + u32 bitrate_max; struct can_clock clock; enum can_state state; @@ -166,6 +167,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); +#ifdef CONFIG_OF +void of_can_transceiver(struct net_device *dev); +#else +static inline void of_can_transceiver(struct net_device *dev) { } +#endif + struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); struct sk_buff *alloc_canfd_skb(struct net_device *dev, struct canfd_frame **cfd); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8b7fd8eeccee..9f242b876fde 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -561,7 +561,7 @@ struct cftype { /* * Control Group subsystem type. - * See Documentation/cgroups/cgroups.txt for details + * See Documentation/cgroup-v1/cgroups.txt for details */ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 7c925e6211f1..f711be6e8c44 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -20,6 +20,8 @@ * flags used across common struct clk. these flags should only affect the * top-level framework. custom flags for dealing with hardware specifics * belong in struct clk_foo + * + * Please update clk_flags[] in drivers/clk/clk.c when making changes here! */ #define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ @@ -412,7 +414,7 @@ extern const struct clk_ops clk_divider_ro_ops; unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, unsigned int val, const struct clk_div_table *table, - unsigned long flags); + unsigned long flags, unsigned long width); long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, unsigned long rate, unsigned long *prate, const struct clk_div_table *table, @@ -744,6 +746,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw); unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); bool clk_hw_is_prepared(const struct clk_hw *hw); +bool clk_hw_rate_is_protected(const struct clk_hw *hw); bool clk_hw_is_enabled(const struct clk_hw *hw); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); @@ -806,6 +809,44 @@ extern struct of_device_id __clk_of_table; } \ OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver) +#define CLK_HW_INIT(_name, _parent, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = (const char *[]) { _parent }, \ + .num_parents = 1, \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .ops = _ops, \ + }) + +#define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \ + (&(struct clk_init_data) { \ + .flags = _flags, \ + .name = _name, \ + .parent_names = NULL, \ + .num_parents = 0, \ + .ops = _ops, \ + }) + +#define CLK_FIXED_FACTOR(_struct, _name, _parent, \ + _div, _mult, _flags) \ + struct clk_fixed_factor _struct = { \ + .div = _div, \ + .mult = _mult, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &clk_fixed_factor_ops, \ + _flags), \ + } + #ifdef CONFIG_OF int of_clk_add_provider(struct device_node *np, struct clk *(*clk_src_get)(struct of_phandle_args *args, diff --git a/include/linux/clk.h b/include/linux/clk.h index 12c96d94d1fa..4c4ef9f34db3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -331,6 +331,38 @@ struct clk *devm_clk_get(struct device *dev, const char *id); */ struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id); +/** + * clk_rate_exclusive_get - get exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to get exclusive control over the rate of a + * provider. It prevents any other consumer to execute, even indirectly, + * opereation which could alter the rate of the provider or cause glitches + * + * If exlusivity is claimed more than once on clock, even by the same driver, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Must not be called from within atomic context. + * + * Returns success (0) or negative errno. + */ +int clk_rate_exclusive_get(struct clk *clk); + +/** + * clk_rate_exclusive_put - release exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to release the exclusivity it previously got + * from clk_rate_exclusive_get() + * + * The caller must balance the number of clk_rate_exclusive_get() and + * clk_rate_exclusive_put() calls. + * + * Must not be called from within atomic context. + */ +void clk_rate_exclusive_put(struct clk *clk); /** * clk_enable - inform the system when the clock source should be running. @@ -473,6 +505,23 @@ long clk_round_rate(struct clk *clk, unsigned long rate); int clk_set_rate(struct clk *clk, unsigned long rate); /** + * clk_set_rate_exclusive- set the clock rate and claim exclusivity over + * clock source + * @clk: clock source + * @rate: desired clock rate in Hz + * + * This helper function allows drivers to atomically set the rate of a producer + * and claim exclusivity over the rate control of the producer. + * + * It is essentially a combination of clk_set_rate() and + * clk_rate_exclusite_get(). Caller must balance this call with a call to + * clk_rate_exclusive_put() + * + * Returns success (0) or negative errno. + */ +int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); + +/** * clk_has_parent - check if a clock is a possible parent for another * @clk: clock source * @parent: parent clock source @@ -583,6 +632,14 @@ static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} static inline void devm_clk_put(struct device *dev, struct clk *clk) {} + +static inline int clk_rate_exclusive_get(struct clk *clk) +{ + return 0; +} + +static inline void clk_rate_exclusive_put(struct clk *clk) {} + static inline int clk_enable(struct clk *clk) { return 0; @@ -609,6 +666,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate) return 0; } +static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) +{ + return 0; +} + static inline long clk_round_rate(struct clk *clk, unsigned long rate) { return 0; diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index 2eabc862abdb..4890ff033220 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h @@ -12,7 +12,7 @@ #ifndef __CLKDEV_H #define __CLKDEV_H -#include <asm/clkdev.h> +#include <linux/slab.h> struct clk; struct clk_hw; @@ -52,9 +52,4 @@ int clk_add_alias(const char *, const char *, const char *, struct device *); int clk_register_clkdev(struct clk *, const char *, const char *); int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); -#ifdef CONFIG_COMMON_CLK -int __clk_get(struct clk *clk); -void __clk_put(struct clk *clk); -#endif - #endif diff --git a/include/linux/compat.h b/include/linux/compat.h index 0fc36406f32c..8a9643857c4a 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -157,6 +157,104 @@ struct compat_sigaction { compat_sigset_t sa_mask __packed; }; +typedef union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +} compat_sigval_t; + +typedef struct compat_siginfo { + int si_signo; +#ifndef __ARCH_HAS_SWAPPED_SIGINFO + int si_errno; + int si_code; +#else + int si_code; + int si_errno; +#endif + + union { + int _pad[128/sizeof(int) - 3]; + + /* kill() */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + compat_timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + compat_sigval_t _sigval; /* same as below */ + } _timer; + + /* POSIX.1b signals */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + compat_sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + +#ifdef CONFIG_X86_X32_ABI + /* SIGCHLD (x32 version) */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_s64 _utime; + compat_s64 _stime; + } _sigchld_x32; +#endif + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ + struct { + compat_uptr_t _addr; /* faulting insn/memory ref. */ +#ifdef __ARCH_SI_TRAPNO + int _trapno; /* TRAP # which caused the signal */ +#endif + union { + /* + * used when si_code=BUS_MCEERR_AR or + * used when si_code=BUS_MCEERR_AO + */ + short int _addr_lsb; /* Valid LSB of the reported address. */ + /* used when si_code=SEGV_BNDERR */ + struct { + short _dummy_bnd; + compat_uptr_t _lower; + compat_uptr_t _upper; + } _addr_bnd; + /* used when si_code=SEGV_PKUERR */ + struct { + short _dummy_pkey; + u32 _pkey; + } _addr_pkey; + }; + } _sigfault; + + /* SIGPOLL */ + struct { + compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + + struct { + compat_uptr_t _call_addr; /* calling user insn */ + int _syscall; /* triggering system call number */ + unsigned int _arch; /* AUDIT_ARCH_* of syscall */ + } _sigsys; + } _sifields; +} compat_siginfo_t; + /* * These functions operate on 32- or 64-bit specs depending on * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. @@ -412,7 +510,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size); long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size); -int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); +int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from); int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 52e611ab9a6c..c2cc57a2f508 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -185,23 +185,21 @@ void __read_once_size(const volatile void *p, void *res, int size) #ifdef CONFIG_KASAN /* - * This function is not 'inline' because __no_sanitize_address confilcts + * We can't declare function 'inline' because __no_sanitize_address confilcts * with inlining. Attempt to inline it may cause a build failure. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. */ -static __no_sanitize_address __maybe_unused -void __read_once_size_nocheck(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} +# define __no_kasan_or_inline __no_sanitize_address __maybe_unused #else -static __always_inline +# define __no_kasan_or_inline __always_inline +#endif + +static __no_kasan_or_inline void __read_once_size_nocheck(const volatile void *p, void *res, int size) { __READ_ONCE_SIZE; } -#endif static __always_inline void __write_once_size(volatile void *p, void *res, int size) { @@ -240,6 +238,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * required ordering. */ #include <asm/barrier.h> +#include <linux/kasan-checks.h> #define __READ_ONCE(x, check) \ ({ \ @@ -259,6 +258,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s */ #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) +static __no_kasan_or_inline +unsigned long read_word_at_a_time(const void *addr) +{ + kasan_check_read(addr, 1); + return *(unsigned long *)addr; +} + #define WRITE_ONCE(x, val) \ ({ \ union { typeof(x) __val; char __c[1]; } __u = \ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 1a32e558eb11..5172ad0daa7c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -59,6 +59,7 @@ enum cpuhp_state { CPUHP_PCI_XGENE_DEAD, CPUHP_IOMMU_INTEL_DEAD, CPUHP_LUSTRE_CFS_DEAD, + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -109,6 +110,7 @@ enum cpuhp_state { CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_PERF_METAG_STARTING, CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, + CPUHP_AP_ARM_SDEI_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, @@ -137,6 +139,7 @@ enum cpuhp_state { CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, CPUHP_AP_X86_TBOOT_DYING, + CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, CPUHP_AP_ONLINE_IDLE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 8f7788d23b57..871f9e21810c 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -257,22 +257,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) {return 0;} #endif -#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ -({ \ - int __ret; \ - \ - if (!idx) { \ - cpu_do_idle(); \ - return idx; \ - } \ - \ - __ret = cpu_pm_enter(); \ - if (!__ret) { \ - __ret = low_level_idle_enter(idx); \ - cpu_pm_exit(); \ - } \ - \ - __ret ? -1 : idx; \ +#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ +({ \ + int __ret = 0; \ + \ + if (!idx) { \ + cpu_do_idle(); \ + return idx; \ + } \ + \ + if (!is_retention) \ + __ret = cpu_pm_enter(); \ + if (!__ret) { \ + __ret = low_level_idle_enter(idx); \ + if (!is_retention) \ + cpu_pm_exit(); \ + } \ + \ + __ret ? -1 : idx; \ }) +#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) + +#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) + #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 78508ca4b108..7e6e84cf6383 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -107,8 +107,16 @@ #define CRYPTO_ALG_INTERNAL 0x00002000 /* + * Set if the algorithm has a ->setkey() method but can be used without + * calling it first, i.e. there is a default key. + */ +#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 + +/* * Transform masks and values (for crt_flags). */ +#define CRYPTO_TFM_NEED_KEY 0x00000001 + #define CRYPTO_TFM_REQ_MASK 0x000fff00 #define CRYPTO_TFM_RES_MASK 0xfff00000 @@ -447,7 +455,7 @@ struct crypto_alg { unsigned int cra_alignmask; int cra_priority; - atomic_t cra_refcnt; + refcount_t cra_refcnt; char cra_name[CRYPTO_MAX_ALG_NAME]; char cra_driver_name[CRYPTO_MAX_ALG_NAME]; diff --git a/include/linux/dax.h b/include/linux/dax.h index 5258346c558c..0185ecdae135 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -96,7 +96,7 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, - pfn_t *pfnp, const struct iomap_ops *ops); + pfn_t *pfnp, int *errp, const struct iomap_ops *ops); int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, pfn_t pfn); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 65cd8ab60b7a..82a99d366aec 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -227,6 +227,7 @@ extern seqlock_t rename_lock; */ extern void d_instantiate(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); +extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); extern int d_instantiate_no_diralias(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); @@ -235,6 +236,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op /* allocate/de-allocate */ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); +extern struct dentry * d_alloc_anon(struct super_block *); extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, wait_queue_head_t *); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index a5538433c927..da83f64952e7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -28,6 +28,7 @@ enum dm_queue_mode { DM_TYPE_REQUEST_BASED = 2, DM_TYPE_MQ_REQUEST_BASED = 3, DM_TYPE_DAX_BIO_BASED = 4, + DM_TYPE_NVME_BIO_BASED = 5, }; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; @@ -221,14 +222,6 @@ struct target_type { #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) /* - * Some targets need to be sent the same WRITE bio severals times so - * that they can send copies of it to different devices. This function - * examines any supplied bio and returns the number of copies of it the - * target requires. - */ -typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio); - -/* * A target implements own bio data integrity. */ #define DM_TARGET_INTEGRITY 0x00000010 @@ -291,13 +284,6 @@ struct dm_target { */ unsigned per_io_data_size; - /* - * If defined, this function is called to find out how many - * duplicate bios should be sent to the target when writing - * data. - */ - dm_num_write_bios_fn num_write_bios; - /* target specific data */ void *private; @@ -329,35 +315,9 @@ struct dm_target_callbacks { int (*congested_fn) (struct dm_target_callbacks *, int); }; -/* - * For bio-based dm. - * One of these is allocated for each bio. - * This structure shouldn't be touched directly by target drivers. - * It is here so that we can inline dm_per_bio_data and - * dm_bio_from_per_bio_data - */ -struct dm_target_io { - struct dm_io *io; - struct dm_target *ti; - unsigned target_bio_nr; - unsigned *len_ptr; - struct bio clone; -}; - -static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) -{ - return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; -} - -static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) -{ - return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); -} - -static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) -{ - return container_of(bio, struct dm_target_io, clone)->target_bio_nr; -} +void *dm_per_bio_data(struct bio *bio, size_t data_size); +struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); +unsigned dm_bio_get_target_bio_nr(const struct bio *bio); int dm_register_target(struct target_type *t); void dm_unregister_target(struct target_type *t); @@ -500,6 +460,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); int dm_table_complete(struct dm_table *t); /* + * Destroy the table when finished. + */ +void dm_table_destroy(struct dm_table *t); + +/* * Target may require that it is never sent I/O larger than len. */ int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); @@ -585,6 +550,7 @@ do { \ #define DM_ENDIO_DONE 0 #define DM_ENDIO_INCOMPLETE 1 #define DM_ENDIO_REQUEUE 2 +#define DM_ENDIO_DELAY_REQUEUE 3 /* * Definitions of return values from target map function. @@ -592,7 +558,7 @@ do { \ #define DM_MAPIO_SUBMITTED 0 #define DM_MAPIO_REMAPPED 1 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE -#define DM_MAPIO_DELAY_REQUEUE 3 +#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE #define DM_MAPIO_KILL 4 #define dm_sector_div64(x, y)( \ diff --git a/include/linux/device.h b/include/linux/device.h index 9d32000725da..b093405ed525 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * device.h - generic, centralized driver model * @@ -5,8 +6,6 @@ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2008-2009 Novell Inc. * - * This file is released under the GPLv2 - * * See Documentation/driver-model/ for more information. */ @@ -21,7 +20,6 @@ #include <linux/compiler.h> #include <linux/types.h> #include <linux/mutex.h> -#include <linux/pinctrl/devinfo.h> #include <linux/pm.h> #include <linux/atomic.h> #include <linux/ratelimit.h> @@ -42,6 +40,7 @@ struct fwnode_handle; struct iommu_ops; struct iommu_group; struct iommu_fwspec; +struct dev_pin_info; struct bus_attribute { struct attribute attr; @@ -288,6 +287,7 @@ struct device_driver { const struct attribute_group **groups; const struct dev_pm_ops *pm; + int (*coredump) (struct device *dev); struct driver_private *p; }; @@ -301,7 +301,6 @@ extern struct device_driver *driver_find(const char *name, extern int driver_probe_done(void); extern void wait_for_device_probe(void); - /* sysfs interface for exporting driver attributes */ struct driver_attribute { @@ -575,6 +574,9 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_##_name = \ + __ATTR_PREALLOC(_name, _mode, _show, _store) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) #define DEVICE_ATTR_RO(_name) \ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 79f27d60ec66..085db2fee2d7 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -301,7 +301,7 @@ struct dma_buf { struct dma_fence_cb cb; wait_queue_head_t *poll; - unsigned long active; + __poll_t active; } cb_excl, cb_shared; }; diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h new file mode 100644 index 000000000000..bcdb1a3e4b1f --- /dev/null +++ b/include/linux/dma-direct.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_DMA_DIRECT_H +#define _LINUX_DMA_DIRECT_H 1 + +#include <linux/dma-mapping.h> + +#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA +#include <asm/dma-direct.h> +#else +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + dma_addr_t dev_addr = (dma_addr_t)paddr; + + return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ + phys_addr_t paddr = (phys_addr_t)dev_addr; + + return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + if (!dev->dma_mask) + return false; + + return addr + size - 1 <= *dev->dma_mask; +} +#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ + +#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN +void dma_mark_clean(void *addr, size_t size); +#else +static inline void dma_mark_clean(void *addr, size_t size) +{ +} +#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ + +void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs); +void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); +int dma_direct_supported(struct device *dev, u64 mask); + +#endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index 332a5420243c..bc8940ca280d 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -21,6 +21,7 @@ #define __LINUX_DMA_FENCE_ARRAY_H #include <linux/dma-fence.h> +#include <linux/irq_work.h> /** * struct dma_fence_array_cb - callback helper for fence array @@ -47,6 +48,8 @@ struct dma_fence_array { unsigned num_fences; atomic_t num_pending; struct dma_fence **fences; + + struct irq_work work; }; extern const struct dma_fence_ops dma_fence_array_ops; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index efdabbb64e3c..4c008170fe65 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -242,7 +242,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) * The caller is required to hold the RCU read lock. */ static inline struct dma_fence * -dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) +dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) { do { struct dma_fence *fence; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 81ed9b2d84dc..34fe8463d10e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -136,7 +136,7 @@ struct dma_map_ops { int is_phys; }; -extern const struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_direct_ops; extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -513,10 +513,18 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, void *cpu_addr; BUG_ON(!ops); + WARN_ON_ONCE(dev && !dev->coherent_dma_mask); if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) return cpu_addr; + /* + * Let the implementation decide on the zone to allocate from, and + * decide on the way of zeroing the memory given that the memory + * returned should always be zeroed. + */ + flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO); + if (!arch_dma_alloc_attrs(&dev, &flag)) return NULL; if (!ops->alloc) @@ -568,6 +576,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } +/* + * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please + * don't use this is new code. + */ +#ifndef arch_dma_supported +#define arch_dma_supported(dev, mask) (1) +#endif + static inline void dma_check_mask(struct device *dev, u64 mask) { if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) @@ -580,6 +596,9 @@ static inline int dma_supported(struct device *dev, u64 mask) if (!ops) return 0; + if (!arch_dma_supported(dev, mask)) + return 0; + if (!ops->dma_supported) return 1; return ops->dma_supported(dev, mask); @@ -692,7 +711,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) #ifndef dma_max_pfn static inline unsigned long dma_max_pfn(struct device *dev) { - return *dev->dma_mask >> PAGE_SHIFT; + return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; } #endif diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h index f48a85c377de..b4f22112ba75 100644 --- a/include/linux/dsa/lan9303.h +++ b/include/linux/dsa/lan9303.h @@ -23,9 +23,10 @@ struct lan9303 { struct regmap_irq_chip_data *irq_data; struct gpio_desc *reset_gpio; u32 reset_duration; /* in [ms] */ - bool phy_addr_sel_strap; + int phy_addr_base; struct dsa_switch *ds; struct mutex indirect_mutex; /* protect indexed register access */ + struct mutex alr_mutex; /* protect ALR access */ const struct lan9303_phy_ops *ops; bool is_bridged; /* true if port 1 and 2 are bridged */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 29fdf8029cf6..f5083aa72eae 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -475,6 +475,39 @@ typedef struct { u64 get_all; } apple_properties_protocol_64_t; +typedef struct { + u32 get_capability; + u32 get_event_log; + u32 hash_log_extend_event; + u32 submit_command; + u32 get_active_pcr_banks; + u32 set_active_pcr_banks; + u32 get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_32_t; + +typedef struct { + u64 get_capability; + u64 get_event_log; + u64 hash_log_extend_event; + u64 submit_command; + u64 get_active_pcr_banks; + u64 set_active_pcr_banks; + u64 get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_64_t; + +typedef u32 efi_tcg2_event_log_format; + +typedef struct { + void *get_capability; + efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format, + efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); + void *hash_log_extend_event; + void *submit_command; + void *get_active_pcr_banks; + void *set_active_pcr_banks; + void *get_result_of_set_active_pcr_banks; +} efi_tcg2_protocol_t; + /* * Types and defines for EFI ResetSystem */ @@ -625,6 +658,7 @@ void efi_native_runtime_setup(void); #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) #define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) +#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f) #define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) #define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) @@ -637,6 +671,7 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) +#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) typedef struct { efi_guid_t guid; @@ -911,6 +946,7 @@ extern struct efi { unsigned long properties_table; /* properties table */ unsigned long mem_attr_table; /* memory attributes table */ unsigned long rng_seed; /* UEFI firmware random seed */ + unsigned long tpm_log; /* TPM2 Event Log table */ efi_get_time_t *get_time; efi_set_time_t *set_time; efi_get_wakeup_time_t *get_wakeup_time; @@ -1536,6 +1572,8 @@ static inline void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { } #endif +void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); + /* * Arch code can implement the following three template macros, avoiding * reptition for the void/non-void return cases of {__,}efi_call_virt(): @@ -1603,4 +1641,12 @@ struct linux_efi_random_seed { u8 bits[]; }; +struct linux_efi_tpm_eventlog { + u32 size; + u8 version; + u8 log[]; +}; + +extern int efi_tpm_eventlog_init(void); + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h new file mode 100644 index 000000000000..280c61ecbf20 --- /dev/null +++ b/include/linux/error-injection.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ERROR_INJECTION_H +#define _LINUX_ERROR_INJECTION_H + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + +#include <asm/error-injection.h> + +extern bool within_error_injection_list(unsigned long addr); +extern int get_injectable_error_type(unsigned long addr); + +#else /* !CONFIG_FUNCTION_ERROR_INJECTION */ + +#include <asm-generic/error-injection.h> +static inline bool within_error_injection_list(unsigned long addr) +{ + return false; +} + +static inline int get_injectable_error_type(unsigned long addr) +{ + return EI_ETYPE_NONE; +} + +#endif + +#endif /* _LINUX_ERROR_INJECTION_H */ diff --git a/include/linux/errseq.h b/include/linux/errseq.h index 6ffae9c5052d..fc2777770768 100644 --- a/include/linux/errseq.h +++ b/include/linux/errseq.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * See Documentation/errseq.rst and lib/errseq.c + * See Documentation/core-api/errseq.rst and lib/errseq.c */ #ifndef _LINUX_ERRSEQ_H #define _LINUX_ERRSEQ_H diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 60b2985e8a18..7094718b653b 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -26,18 +26,16 @@ #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) +struct eventfd_ctx; struct file; #ifdef CONFIG_EVENTFD -struct file *eventfd_file_create(unsigned int count, int flags); -struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); struct eventfd_ctx *eventfd_ctx_fdget(int fd); struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); -ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt); @@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w * Ugly ugly ugly error layer to support modules that uses eventfd but * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. */ -static inline struct file *eventfd_file_create(unsigned int count, int flags) -{ - return ERR_PTR(-ENOSYS); -} static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) { @@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) } -static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, - __u64 *cnt) -{ - return -ENOSYS; -} - static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, __u64 *cnt) { diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 43e98d30d2df..58aecb60ea51 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -117,6 +117,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_NOCRC_RECOVERY_FLAG 0x00000200 #define CP_TRIMMED_FLAG 0x00000100 #define CP_NAT_BITS_FLAG 0x00000080 #define CP_CRC_RECOVERY_FLAG 0x00000040 @@ -212,6 +213,7 @@ struct f2fs_extent { #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ #define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ +#define F2FS_PIN_FILE 0x40 /* file should not be gced */ struct f2fs_inode { __le16 i_mode; /* file mode */ @@ -229,7 +231,13 @@ struct f2fs_inode { __le32 i_ctime_nsec; /* change time in nano scale */ __le32 i_mtime_nsec; /* modification time in nano scale */ __le32 i_generation; /* file version (for NFS) */ - __le32 i_current_depth; /* only for directory depth */ + union { + __le32 i_current_depth; /* only for directory depth */ + __le16 i_gc_failures; /* + * # of gc failures on pinned file. + * only for regular files. + */ + }; __le32 i_xattr_nid; /* nid to save xattr */ __le32 i_flags; /* file attributes */ __le32 i_pino; /* parent inode number */ @@ -245,8 +253,10 @@ struct f2fs_inode { __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ __le32 i_projid; /* project id */ __le32 i_inode_checksum;/* inode meta checksum */ + __le64 i_crtime; /* creation time */ + __le32 i_crtime_nsec; /* creation time in nano scale */ __le32 i_extra_end[0]; /* for attribute size calculation */ - }; + } __packed; __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ }; __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), diff --git a/include/linux/fb.h b/include/linux/fb.h index bc24e48e396d..d1e5bed39140 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -465,6 +465,11 @@ struct fb_info { atomic_t count; int node; int flags; + /* + * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows + * a lcd is not mounted upright and fbcon should rotate to compensate. + */ + int fbcon_rotate_hint; struct mutex lock; /* Lock for open/release/ioctl funcs */ struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ struct fb_var_screeninfo var; /* Current var */ diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 1c65817673db..41615f38bcff 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -10,6 +10,7 @@ #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> +#include <linux/nospec.h> #include <linux/types.h> #include <linux/init.h> #include <linux/fs.h> @@ -82,8 +83,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i { struct fdtable *fdt = rcu_dereference_raw(files->fdt); - if (fd < fdt->max_fds) + if (fd < fdt->max_fds) { + fd = array_index_nospec(fd, fdt->max_fds); return rcu_dereference_raw(fdt->fd[fd]); + } return NULL; } diff --git a/include/linux/filter.h b/include/linux/filter.h index 80b5b482cb46..276932d75975 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -18,7 +18,9 @@ #include <linux/capability.h> #include <linux/cryptohash.h> #include <linux/set_memory.h> +#include <linux/kallsyms.h> +#include <net/xdp.h> #include <net/sch_generic.h> #include <uapi/linux/filter.h> @@ -58,6 +60,9 @@ struct bpf_prog_aux; /* unused opcode to mark special call to bpf_tail_call() helper */ #define BPF_TAIL_CALL 0xf0 +/* unused opcode to mark call to interpreter with arguments */ +#define BPF_CALL_ARGS 0xe0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -455,10 +460,14 @@ struct bpf_binary_header { struct bpf_prog { u16 pages; /* Number of allocated pages */ u16 jited:1, /* Is our filter JIT'ed? */ + jit_requested:1,/* archs need to JIT the prog */ locked:1, /* Program image locked? */ gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ - dst_needed:1; /* Do we need dst entry? */ + dst_needed:1, /* Do we need dst entry? */ + blinded:1, /* Was blinded */ + is_func:1, /* program is a bpf function */ + kprobe_override:1; /* Do we override a kprobe? */ enum bpf_prog_type type; /* Type of BPF program */ u32 len; /* Number of filter blocks */ u32 jited_len; /* Size of jited insns in bytes */ @@ -495,6 +504,7 @@ struct xdp_buff { void *data_end; void *data_meta; void *data_hard_start; + struct xdp_rxq_info *rxq; }; /* Compute the linear packet data range [data, data_end) which @@ -678,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb) struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); +bool bpf_opcode_in_insntable(u8 code); + struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); @@ -709,11 +721,22 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +#define __bpf_call_base_args \ + ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ + __bpf_call_base) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_pkt_data(void *func); +static inline bool bpf_dump_raw_ok(void) +{ + /* Reconstruction of call-sites is dependent on kallsyms, + * thus make dump the same restriction. + */ + return kallsyms_show_value() == 1; +} + struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); @@ -797,7 +820,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) return fp->jited && bpf_jit_is_ebpf(); } -static inline bool bpf_jit_blinding_enabled(void) +static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) { /* These are the prerequisites, should someone ever have the * idea to call blinding outside of them, we make sure to @@ -805,7 +828,7 @@ static inline bool bpf_jit_blinding_enabled(void) */ if (!bpf_jit_is_ebpf()) return false; - if (!bpf_jit_enable) + if (!prog->jit_requested) return false; if (!bpf_jit_harden) return false; @@ -982,9 +1005,20 @@ struct bpf_sock_ops_kern { struct sock *sk; u32 op; union { + u32 args[4]; u32 reply; u32 replylong[4]; }; + u32 is_fullsock; + u64 temp; /* temp and everything after is not + * initialized to 0 before calling + * the BPF program. New fields that + * should be initialized to 0 should + * be inserted before temp. + * temp is scratch storage used by + * sock_ops_convert_ctx_access + * as temporary storage of a register. + */ }; #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h index aa66c87c120b..3694821a6d2d 100644 --- a/include/linux/fpga/fpga-bridge.h +++ b/include/linux/fpga/fpga-bridge.h @@ -1,10 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include <linux/device.h> -#include <linux/fpga/fpga-mgr.h> #ifndef _LINUX_FPGA_BRIDGE_H #define _LINUX_FPGA_BRIDGE_H +#include <linux/device.h> +#include <linux/fpga/fpga-mgr.h> + struct fpga_bridge; /** @@ -12,11 +13,13 @@ struct fpga_bridge; * @enable_show: returns the FPGA bridge's status * @enable_set: set a FPGA bridge as enabled or disabled * @fpga_bridge_remove: set FPGA into a specific state during driver remove + * @groups: optional attribute groups. */ struct fpga_bridge_ops { int (*enable_show)(struct fpga_bridge *bridge); int (*enable_set)(struct fpga_bridge *bridge, bool enable); void (*fpga_bridge_remove)(struct fpga_bridge *bridge); + const struct attribute_group **groups; }; /** @@ -43,6 +46,8 @@ struct fpga_bridge { struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, struct fpga_image_info *info); +struct fpga_bridge *fpga_bridge_get(struct device *dev, + struct fpga_image_info *info); void fpga_bridge_put(struct fpga_bridge *bridge); int fpga_bridge_enable(struct fpga_bridge *bridge); int fpga_bridge_disable(struct fpga_bridge *bridge); @@ -50,9 +55,12 @@ int fpga_bridge_disable(struct fpga_bridge *bridge); int fpga_bridges_enable(struct list_head *bridge_list); int fpga_bridges_disable(struct list_head *bridge_list); void fpga_bridges_put(struct list_head *bridge_list); -int fpga_bridge_get_to_list(struct device_node *np, +int fpga_bridge_get_to_list(struct device *dev, struct fpga_image_info *info, struct list_head *bridge_list); +int of_fpga_bridge_get_to_list(struct device_node *np, + struct fpga_image_info *info, + struct list_head *bridge_list); int fpga_bridge_register(struct device *dev, const char *name, const struct fpga_bridge_ops *br_ops, void *priv); diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index bfa14bc023fb..3c6de23aabdf 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -1,7 +1,8 @@ /* * FPGA Framework * - * Copyright (C) 2013-2015 Altera Corporation + * Copyright (C) 2013-2016 Altera Corporation + * Copyright (C) 2017 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -15,12 +16,12 @@ * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/mutex.h> -#include <linux/platform_device.h> - #ifndef _LINUX_FPGA_MGR_H #define _LINUX_FPGA_MGR_H +#include <linux/mutex.h> +#include <linux/platform_device.h> + struct fpga_manager; struct sg_table; @@ -83,12 +84,26 @@ enum fpga_mgr_states { * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) * @config_complete_timeout_us: maximum time for FPGA to switch to operating * status in the write_complete op. + * @firmware_name: name of FPGA image firmware file + * @sgt: scatter/gather table containing FPGA image + * @buf: contiguous buffer containing FPGA image + * @count: size of buf + * @dev: device that owns this + * @overlay: Device Tree overlay */ struct fpga_image_info { u32 flags; u32 enable_timeout_us; u32 disable_timeout_us; u32 config_complete_timeout_us; + char *firmware_name; + struct sg_table *sgt; + const char *buf; + size_t count; + struct device *dev; +#ifdef CONFIG_OF + struct device_node *overlay; +#endif }; /** @@ -100,6 +115,7 @@ struct fpga_image_info { * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done * @fpga_remove: optional: Set FPGA into a specific state during driver remove + * @groups: optional attribute groups. * * fpga_manager_ops are the low level functions implemented by a specific * fpga manager driver. The optional ones are tested for NULL before being @@ -116,6 +132,7 @@ struct fpga_manager_ops { int (*write_complete)(struct fpga_manager *mgr, struct fpga_image_info *info); void (*fpga_remove)(struct fpga_manager *mgr); + const struct attribute_group **groups; }; /** @@ -138,14 +155,14 @@ struct fpga_manager { #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) -int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, - const char *buf, size_t count); -int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info, - struct sg_table *sgt); +struct fpga_image_info *fpga_image_info_alloc(struct device *dev); + +void fpga_image_info_free(struct fpga_image_info *info); + +int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info); -int fpga_mgr_firmware_load(struct fpga_manager *mgr, - struct fpga_image_info *info, - const char *image_name); +int fpga_mgr_lock(struct fpga_manager *mgr); +void fpga_mgr_unlock(struct fpga_manager *mgr); struct fpga_manager *of_fpga_mgr_get(struct device_node *node); diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h new file mode 100644 index 000000000000..b6520318ab9c --- /dev/null +++ b/include/linux/fpga/fpga-region.h @@ -0,0 +1,40 @@ +#ifndef _FPGA_REGION_H +#define _FPGA_REGION_H + +#include <linux/device.h> +#include <linux/fpga/fpga-mgr.h> +#include <linux/fpga/fpga-bridge.h> + +/** + * struct fpga_region - FPGA Region structure + * @dev: FPGA Region device + * @mutex: enforces exclusive reference to region + * @bridge_list: list of FPGA bridges specified in region + * @mgr: FPGA manager + * @info: FPGA image info + * @priv: private data + * @get_bridges: optional function to get bridges to a list + * @groups: optional attribute groups. + */ +struct fpga_region { + struct device dev; + struct mutex mutex; /* for exclusive reference to region */ + struct list_head bridge_list; + struct fpga_manager *mgr; + struct fpga_image_info *info; + void *priv; + int (*get_bridges)(struct fpga_region *region); + const struct attribute_group **groups; +}; + +#define to_fpga_region(d) container_of(d, struct fpga_region, dev) + +struct fpga_region *fpga_region_class_find( + struct device *start, const void *data, + int (*match)(struct device *, const void *)); + +int fpga_region_program_fpga(struct fpga_region *region); +int fpga_region_register(struct device *dev, struct fpga_region *region); +int fpga_region_unregister(struct fpga_region *region); + +#endif /* _FPGA_REGION_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 6804d075933e..2a815560fda0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass) down_write_nested(&inode->i_rwsem, subclass); } +static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass) +{ + down_read_nested(&inode->i_rwsem, subclass); +} + void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); @@ -1359,7 +1364,7 @@ struct super_block { const struct fscrypt_operations *s_cop; - struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ + struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ struct block_device *s_bdev; struct backing_dev_info *s_bdi; @@ -1608,6 +1613,10 @@ extern int vfs_whiteout(struct inode *, struct dentry *); extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag); +int vfs_mkobj(struct dentry *, umode_t, + int (*f)(struct dentry *, umode_t, void *), + void *); + /* * VFS file helper functions. */ @@ -1698,7 +1707,7 @@ struct file_operations { ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); - unsigned int (*poll) (struct file *, struct poll_table_struct *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); @@ -2684,7 +2693,6 @@ extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *, struct inode **); extern int inode_permission(struct inode *, int); -extern int __inode_permission(struct inode *, int); extern int generic_permission(struct inode *, int); extern int __check_sticky(struct inode *dir, struct inode *inode); @@ -2977,6 +2985,7 @@ enum { }; void dio_end_io(struct bio *bio); +void dio_warn_stale_pagecache(struct file *filp); ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, @@ -3224,6 +3233,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) ki->ki_flags |= IOCB_DSYNC; if (flags & RWF_SYNC) ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); + if (flags & RWF_APPEND) + ki->ki_flags |= IOCB_APPEND; return 0; } diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 08b4b40c5aa8..952ab97af325 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -14,42 +14,13 @@ #ifndef _LINUX_FSCRYPT_H #define _LINUX_FSCRYPT_H -#include <linux/key.h> #include <linux/fs.h> -#include <linux/mm.h> -#include <linux/bio.h> -#include <linux/dcache.h> -#include <crypto/skcipher.h> -#include <uapi/linux/fs.h> #define FS_CRYPTO_BLOCK_SIZE 16 +struct fscrypt_ctx; struct fscrypt_info; -struct fscrypt_ctx { - union { - struct { - struct page *bounce_page; /* Ciphertext page */ - struct page *control_page; /* Original page */ - } w; - struct { - struct bio *bio; - struct work_struct work; - } r; - struct list_head free_list; /* Free list */ - }; - u8 flags; /* Flags */ -}; - -/** - * For encrypted symlinks, the ciphertext length is stored at the beginning - * of the string in little-endian format. - */ -struct fscrypt_symlink_data { - __le16 len; - char encrypted_path[1]; -} __packed; - struct fscrypt_str { unsigned char *name; u32 len; @@ -68,89 +39,14 @@ struct fscrypt_name { #define fname_name(p) ((p)->disk_name.name) #define fname_len(p) ((p)->disk_name.len) -/* - * fscrypt superblock flags - */ -#define FS_CFLG_OWN_PAGES (1U << 1) - -/* - * crypto opertions for filesystems - */ -struct fscrypt_operations { - unsigned int flags; - const char *key_prefix; - int (*get_context)(struct inode *, void *, size_t); - int (*set_context)(struct inode *, const void *, size_t, void *); - bool (*dummy_context)(struct inode *); - bool (*empty_dir)(struct inode *); - unsigned (*max_namelen)(struct inode *); -}; - /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ #define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) -{ - if (inode->i_sb->s_cop->dummy_context && - inode->i_sb->s_cop->dummy_context(inode)) - return true; - return false; -} - -static inline bool fscrypt_valid_enc_modes(u32 contents_mode, - u32 filenames_mode) -{ - if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC && - filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS) - return true; - - if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS && - filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) - return true; - - return false; -} - -static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) -{ - if (str->len == 1 && str->name[0] == '.') - return true; - - if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') - return true; - - return false; -} - #if __FS_HAS_ENCRYPTION - -static inline struct page *fscrypt_control_page(struct page *page) -{ - return ((struct fscrypt_ctx *)page_private(page))->w.control_page; -} - -static inline bool fscrypt_has_encryption_key(const struct inode *inode) -{ - return (inode->i_crypt_info != NULL); -} - #include <linux/fscrypt_supp.h> - -#else /* !__FS_HAS_ENCRYPTION */ - -static inline struct page *fscrypt_control_page(struct page *page) -{ - WARN_ON_ONCE(1); - return ERR_PTR(-EINVAL); -} - -static inline bool fscrypt_has_encryption_key(const struct inode *inode) -{ - return 0; -} - +#else #include <linux/fscrypt_notsupp.h> -#endif /* __FS_HAS_ENCRYPTION */ +#endif /** * fscrypt_require_key - require an inode's encryption key @@ -291,4 +187,68 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry, return 0; } +/** + * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink + * @dir: directory in which the symlink is being created + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @max_len: space the filesystem has available to store the symlink target + * @disk_link: (out) the on-disk symlink target being prepared + * + * This function computes the size the symlink target will require on-disk, + * stores it in @disk_link->len, and validates it against @max_len. An + * encrypted symlink may be longer than the original. + * + * Additionally, @disk_link->name is set to @target if the symlink will be + * unencrypted, but left NULL if the symlink will be encrypted. For encrypted + * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the + * on-disk target later. (The reason for the two-step process is that some + * filesystems need to know the size of the symlink target before creating the + * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.) + * + * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long, + * -ENOKEY if the encryption key is missing, or another -errno code if a problem + * occurred while setting up the encryption key. + */ +static inline int fscrypt_prepare_symlink(struct inode *dir, + const char *target, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) + return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); + + disk_link->name = (unsigned char *)target; + disk_link->len = len + 1; + if (disk_link->len > max_len) + return -ENAMETOOLONG; + return 0; +} + +/** + * fscrypt_encrypt_symlink - encrypt the symlink target if needed + * @inode: symlink inode + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @disk_link: (in/out) the on-disk symlink target being prepared + * + * If the symlink target needs to be encrypted, then this function encrypts it + * into @disk_link->name. fscrypt_prepare_symlink() must have been called + * previously to compute @disk_link->len. If the filesystem did not allocate a + * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one + * will be kmalloc()'ed and the filesystem will be responsible for freeing it. + * + * Return: 0 on success, -errno on failure + */ +static inline int fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + if (IS_ENCRYPTED(inode)) + return __fscrypt_encrypt_symlink(inode, target, len, disk_link); + return 0; +} + #endif /* _LINUX_FSCRYPT_H */ diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h index 63e58808519a..44b50c04bae9 100644 --- a/include/linux/fscrypt_notsupp.h +++ b/include/linux/fscrypt_notsupp.h @@ -14,6 +14,16 @@ #ifndef _LINUX_FSCRYPT_NOTSUPP_H #define _LINUX_FSCRYPT_NOTSUPP_H +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return false; +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return false; +} + /* crypto.c */ static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) @@ -43,6 +53,11 @@ static inline int fscrypt_decrypt_page(const struct inode *inode, return -EOPNOTSUPP; } +static inline struct page *fscrypt_control_page(struct page *page) +{ + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); +} static inline void fscrypt_restore_control_page(struct page *page) { @@ -90,8 +105,7 @@ static inline int fscrypt_get_encryption_info(struct inode *inode) return -EOPNOTSUPP; } -static inline void fscrypt_put_encryption_info(struct inode *inode, - struct fscrypt_info *ci) +static inline void fscrypt_put_encryption_info(struct inode *inode) { return; } @@ -116,16 +130,8 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) return; } -static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode, - u32 ilen) -{ - /* never happens */ - WARN_ON(1); - return 0; -} - static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, - u32 ilen, + u32 max_encrypted_len, struct fscrypt_str *crypto_str) { return -EOPNOTSUPP; @@ -144,13 +150,6 @@ static inline int fscrypt_fname_disk_to_usr(struct inode *inode, return -EOPNOTSUPP; } -static inline int fscrypt_fname_usr_to_disk(struct inode *inode, - const struct qstr *iname, - struct fscrypt_str *oname) -{ - return -EOPNOTSUPP; -} - static inline bool fscrypt_match_name(const struct fscrypt_name *fname, const u8 *de_name, u32 de_name_len) { @@ -208,4 +207,28 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir, return -EOPNOTSUPP; } +static inline int __fscrypt_prepare_symlink(struct inode *dir, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline const char *fscrypt_get_symlink(struct inode *inode, + const void *caddr, + unsigned int max_size, + struct delayed_call *done) +{ + return ERR_PTR(-EOPNOTSUPP); +} + #endif /* _LINUX_FSCRYPT_NOTSUPP_H */ diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h index cf9e9fc02f0a..477a7a6504d2 100644 --- a/include/linux/fscrypt_supp.h +++ b/include/linux/fscrypt_supp.h @@ -11,8 +11,54 @@ #ifndef _LINUX_FSCRYPT_SUPP_H #define _LINUX_FSCRYPT_SUPP_H +#include <linux/mm.h> +#include <linux/slab.h> + +/* + * fscrypt superblock flags + */ +#define FS_CFLG_OWN_PAGES (1U << 1) + +/* + * crypto operations for filesystems + */ +struct fscrypt_operations { + unsigned int flags; + const char *key_prefix; + int (*get_context)(struct inode *, void *, size_t); + int (*set_context)(struct inode *, const void *, size_t, void *); + bool (*dummy_context)(struct inode *); + bool (*empty_dir)(struct inode *); + unsigned (*max_namelen)(struct inode *); +}; + +struct fscrypt_ctx { + union { + struct { + struct page *bounce_page; /* Ciphertext page */ + struct page *control_page; /* Original page */ + } w; + struct { + struct bio *bio; + struct work_struct work; + } r; + struct list_head free_list; /* Free list */ + }; + u8 flags; /* Flags */ +}; + +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return (inode->i_crypt_info != NULL); +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return inode->i_sb->s_cop->dummy_context && + inode->i_sb->s_cop->dummy_context(inode); +} + /* crypto.c */ -extern struct kmem_cache *fscrypt_info_cachep; extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); extern void fscrypt_release_ctx(struct fscrypt_ctx *); extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, @@ -20,6 +66,12 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, u64, gfp_t); extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, unsigned int, u64); + +static inline struct page *fscrypt_control_page(struct page *page) +{ + return ((struct fscrypt_ctx *)page_private(page))->w.control_page; +} + extern void fscrypt_restore_control_page(struct page *); extern const struct dentry_operations fscrypt_d_ops; @@ -44,7 +96,7 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); /* keyinfo.c */ extern int fscrypt_get_encryption_info(struct inode *); -extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); +extern void fscrypt_put_encryption_info(struct inode *); /* fname.c */ extern int fscrypt_setup_filename(struct inode *, const struct qstr *, @@ -55,14 +107,11 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) kfree(fname->crypto_buf.name); } -extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32); extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, struct fscrypt_str *); extern void fscrypt_fname_free_buffer(struct fscrypt_str *); extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, const struct fscrypt_str *, struct fscrypt_str *); -extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, - struct fscrypt_str *); #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 @@ -153,5 +202,14 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *new_dentry, unsigned int flags); extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); +extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link); +extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, + struct fscrypt_str *disk_link); +extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size, + struct delayed_call *done); #endif /* _LINUX_FSCRYPT_SUPP_H */ diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 411a84c6c400..4fa1a489efe4 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -15,6 +15,7 @@ #include <linux/types.h> struct fwnode_operations; +struct device; struct fwnode_handle { struct fwnode_handle *secondary; @@ -51,6 +52,7 @@ struct fwnode_reference_args { * struct fwnode_operations - Operations for fwnode interface * @get: Get a reference to an fwnode. * @put: Put a reference to an fwnode. + * @device_get_match_data: Return the device driver match data. * @property_present: Return true if a property is present. * @property_read_integer_array: Read an array of integer properties. Return * zero on success, a negative error code @@ -71,6 +73,8 @@ struct fwnode_operations { struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); void (*put)(struct fwnode_handle *fwnode); bool (*device_is_available)(const struct fwnode_handle *fwnode); + void *(*device_get_match_data)(const struct fwnode_handle *fwnode, + const struct device *dev); bool (*property_present)(const struct fwnode_handle *fwnode, const char *propname); int (*property_read_int_array)(const struct fwnode_handle *fwnode, diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 8ef7fc0ce0f0..91ed23468530 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -1,4 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * <linux/gpio.h> + * + * This is the LEGACY GPIO bulk include file, including legacy APIs. It is + * used for GPIO drivers still referencing the global GPIO numberspace, + * and should not be included in new code. + * + * If you're implementing a GPIO driver, only include <linux/gpio/driver.h> + * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h> + */ #ifndef __LINUX_GPIO_H #define __LINUX_GPIO_H diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 7447d85dbe2f..dbd065963296 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -139,6 +139,7 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size, int *value_array); int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); +int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); int gpiod_is_active_low(const struct gpio_desc *desc); int gpiod_cansleep(const struct gpio_desc *desc); @@ -150,8 +151,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); /* Child properties interface */ +struct device_node; struct fwnode_handle; +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, @@ -431,6 +438,13 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) return -ENOSYS; } +static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -ENOSYS; +} + static inline int gpiod_is_active_low(const struct gpio_desc *desc) { /* GPIO can never have been requested */ @@ -464,9 +478,20 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) } /* Child properties interface */ +struct device_node; struct fwnode_handle; static inline +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 7258cd676df4..1ba9a331ec51 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -436,6 +436,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, struct lock_class_key *lock_key, struct lock_class_key *request_key); +bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, + unsigned int offset); + #ifdef CONFIG_LOCKDEP /* diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 846be7c69a52..b2f2dc638463 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -10,8 +10,8 @@ enum gpio_lookup_flags { GPIO_ACTIVE_LOW = (1 << 0), GPIO_OPEN_DRAIN = (1 << 1), GPIO_OPEN_SOURCE = (1 << 2), - GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3), - GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3), + GPIO_PERSISTENT = (0 << 3), + GPIO_TRANSITORY = (1 << 3), }; /** diff --git a/include/linux/hid.h b/include/linux/hid.h index d491027a7c22..091a81cf330f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -281,6 +281,7 @@ struct hid_item { #define HID_DG_DEVICECONFIG 0x000d000e #define HID_DG_DEVICESETTINGS 0x000d0023 +#define HID_DG_AZIMUTH 0x000d003f #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 @@ -342,6 +343,7 @@ struct hid_item { #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 +#define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000 #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 #define HID_QUIRK_NO_IGNORE 0x40000000 @@ -671,6 +673,7 @@ struct hid_usage_id { * to be called) * @dyn_list: list of dynamically added device ids * @dyn_lock: lock protecting @dyn_list + * @match: check if the given device is handled by this driver * @probe: new device inserted * @remove: device removed (NULL if not a hot-plug capable driver) * @report_table: on which reports to call raw_event (NULL means all) @@ -683,6 +686,8 @@ struct hid_usage_id { * @input_mapped: invoked on input registering after mapping an usage * @input_configured: invoked just before the device is registered * @feature_mapping: invoked on feature registering + * @bus_add_driver: invoked when a HID driver is about to be added + * @bus_removed_driver: invoked when a HID driver has been removed * @suspend: invoked on suspend (NULL means nop) * @resume: invoked on resume if device was not reset (NULL means nop) * @reset_resume: invoked on resume if device was reset (NULL means nop) @@ -711,6 +716,7 @@ struct hid_driver { struct list_head dyn_list; spinlock_t dyn_lock; + bool (*match)(struct hid_device *dev, bool ignore_special_driver); int (*probe)(struct hid_device *dev, const struct hid_device_id *id); void (*remove)(struct hid_device *dev); @@ -736,6 +742,8 @@ struct hid_driver { void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); + void (*bus_add_driver)(struct hid_driver *driver); + void (*bus_removed_driver)(struct hid_driver *driver); #ifdef CONFIG_PM int (*suspend)(struct hid_device *hdev, pm_message_t message); int (*resume)(struct hid_device *hdev); @@ -814,6 +822,8 @@ extern bool hid_ignore(struct hid_device *); extern int hid_add_device(struct hid_device *); extern void hid_destroy_device(struct hid_device *); +extern struct bus_type hid_bus_type; + extern int __must_check __hid_register_driver(struct hid_driver *, struct module *, const char *mod_name); @@ -860,8 +870,12 @@ int hid_open_report(struct hid_device *device); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); void hid_disconnect(struct hid_device *hid); -const struct hid_device_id *hid_match_id(struct hid_device *hdev, +bool hid_match_one_id(const struct hid_device *hdev, + const struct hid_device_id *id); +const struct hid_device_id *hid_match_id(const struct hid_device *hdev, const struct hid_device_id *id); +const struct hid_device_id *hid_match_device(struct hid_device *hdev, + struct hid_driver *hdrv); s32 hid_snto32(__u32 value, unsigned n); __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, unsigned offset, unsigned n); @@ -1098,9 +1112,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, int interrupt); /* HID quirks API */ -u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct); -int usbhid_quirks_init(char **quirks_param); -void usbhid_quirks_exit(void); +unsigned long hid_lookup_quirk(const struct hid_device *hdev); +int hid_quirks_init(char **quirks_param, __u16 bus, int count); +void hid_quirks_exit(__u16 bus); #ifdef CONFIG_HID_PID int hid_pidff_init(struct hid_device *hid); diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h index 394a8405dd74..774f7d3b8f6a 100644 --- a/include/linux/hil_mlc.h +++ b/include/linux/hil_mlc.h @@ -144,12 +144,12 @@ struct hil_mlc { hil_packet ipacket[16]; hil_packet imatch; int icount; - struct timeval instart; - suseconds_t intimeout; + unsigned long instart; + unsigned long intimeout; int ddi; /* Last operational device id */ int lcv; /* LCV to throttle loops */ - struct timeval lcv_tv; /* Time loop was started */ + time64_t lcv_time; /* Time loop was started */ int di_map[7]; /* Maps below items to live devs */ struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h index d392975d8887..6f1dee7e67e0 100644 --- a/include/linux/hp_sdc.h +++ b/include/linux/hp_sdc.h @@ -281,7 +281,7 @@ typedef struct { hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */ int rcurr, rqty; /* Current read transact in process */ - struct timeval rtv; /* Time when current read started */ + ktime_t rtime; /* Time when current read started */ int wcurr; /* Current write transact in process */ int dev_err; /* carries status from registration */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 82a25880714a..36fa6a2a82e3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -119,6 +119,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); +void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); void free_huge_page(struct page *page); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; @@ -129,7 +130,6 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); -extern int hugepages_treat_as_movable; extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages; @@ -158,6 +158,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot); bool is_hugetlb_entry_migration(pte_t pte); + #else /* !CONFIG_HUGETLB_PAGE */ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) @@ -198,6 +199,7 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list) return false; } #define putback_active_hugepage(p) do {} while (0) +#define move_hugetlb_state(old, new, reason) do {} while (0) static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) @@ -271,6 +273,17 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) return sb->s_fs_info; } +struct hugetlbfs_inode_info { + struct shared_policy policy; + struct inode vfs_inode; + unsigned int seals; +}; + +static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) +{ + return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); +} + extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, @@ -343,10 +356,10 @@ struct huge_bootmem_page { struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_node(struct hstate *h, int nid); -struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask); +struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, + unsigned long address); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); @@ -524,7 +537,7 @@ struct hstate {}; #define alloc_huge_page(v, a, r) NULL #define alloc_huge_page_node(h, nid) NULL #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL -#define alloc_huge_page_noerr(v, a, r) NULL +#define alloc_huge_page_vma(h, vma, address) NULL #define alloc_bootmem_huge_page(h) NULL #define hstate_file(f) NULL #define hstate_sizelog(s) NULL diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6c9336626592..93bd6fcd6e62 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -127,28 +127,6 @@ struct hv_ring_buffer_info { u32 priv_read_index; }; -/* - * - * hv_get_ringbuffer_availbytes() - * - * Get number of bytes available to read and to write to - * for the specified ring buffer - */ -static inline void -hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, - u32 *read, u32 *write) -{ - u32 read_loc, write_loc, dsize; - - /* Capture the read/write indices before they changed */ - read_loc = rbi->ring_buffer->read_index; - write_loc = rbi->ring_buffer->write_index; - dsize = rbi->ring_datasize; - - *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : - read_loc - write_loc; - *read = dsize - *write; -} static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 0f774406fad0..419a38e7c315 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -55,7 +55,7 @@ typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); struct module; struct property_entry; -#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +#if IS_ENABLED(CONFIG_I2C) /* * The master routines are the ones normally used to transmit data to devices * on a bus (or read from them). Apart from two basic transfer functions to @@ -63,10 +63,68 @@ struct property_entry; * transmit an arbitrary number of messages without interruption. * @count must be be less than 64k since msg.len is u16. */ -extern int i2c_master_send(const struct i2c_client *client, const char *buf, - int count); -extern int i2c_master_recv(const struct i2c_client *client, char *buf, - int count); +extern int i2c_transfer_buffer_flags(const struct i2c_client *client, + char *buf, int count, u16 flags); + +/** + * i2c_master_recv - issue a single I2C message in master receive mode + * @client: Handle to slave device + * @buf: Where to store data read from slave + * @count: How many bytes to read, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes read. + */ +static inline int i2c_master_recv(const struct i2c_client *client, + char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD); +}; + +/** + * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode + * using a DMA safe buffer + * @client: Handle to slave device + * @buf: Where to store data read from slave, must be safe to use with DMA + * @count: How many bytes to read, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes read. + */ +static inline int i2c_master_recv_dmasafe(const struct i2c_client *client, + char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, buf, count, + I2C_M_RD | I2C_M_DMA_SAFE); +}; + +/** + * i2c_master_send - issue a single I2C message in master transmit mode + * @client: Handle to slave device + * @buf: Data that will be written to the slave + * @count: How many bytes to write, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes written. + */ +static inline int i2c_master_send(const struct i2c_client *client, + const char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, (char *)buf, count, 0); +}; + +/** + * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode + * using a DMA safe buffer + * @client: Handle to slave device + * @buf: Data that will be written to the slave, must be safe to use with DMA + * @count: How many bytes to write, must be less than 64k since msg.len is u16 + * + * Returns negative errno, or else the number of bytes written. + */ +static inline int i2c_master_send_dmasafe(const struct i2c_client *client, + const char *buf, int count) +{ + return i2c_transfer_buffer_flags(client, (char *)buf, count, + I2C_M_DMA_SAFE); +}; /* Transfer num messages. */ @@ -354,7 +412,7 @@ struct i2c_board_info { .type = dev_type, .addr = (dev_addr) -#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +#if IS_ENABLED(CONFIG_I2C) /* Add-on boards should register/unregister their devices; e.g. a board * with integrated I2C, a config eeprom, sensors, and a codec that's * used in conjunction with the primary hardware. @@ -485,40 +543,43 @@ struct i2c_timings { /** * struct i2c_bus_recovery_info - I2C bus recovery information * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or - * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery(). + * i2c_generic_scl_recovery(). * @get_scl: This gets current value of SCL line. Mandatory for generic SCL - * recovery. Used internally for generic GPIO recovery. - * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used - * internally for generic GPIO recovery. + * recovery. Populated internally for generic GPIO recovery. + * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery. + * Populated internally for generic GPIO recovery. * @get_sda: This gets current value of SDA line. Optional for generic SCL - * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO - * recovery. + * recovery. Populated internally, if sda_gpio is a valid GPIO, for generic + * GPIO recovery. + * @set_sda: This sets/clears the SDA line. Optional for generic SCL recovery. + * Populated internally, if sda_gpio is a valid GPIO, for generic GPIO + * recovery. * @prepare_recovery: This will be called before starting recovery. Platform may * configure padmux here for SDA/SCL line or something else they want. * @unprepare_recovery: This will be called after completing recovery. Platform * may configure padmux here for SDA/SCL line or something else they want. - * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery. - * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery. + * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. + * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. */ struct i2c_bus_recovery_info { - int (*recover_bus)(struct i2c_adapter *); + int (*recover_bus)(struct i2c_adapter *adap); - int (*get_scl)(struct i2c_adapter *); - void (*set_scl)(struct i2c_adapter *, int val); - int (*get_sda)(struct i2c_adapter *); + int (*get_scl)(struct i2c_adapter *adap); + void (*set_scl)(struct i2c_adapter *adap, int val); + int (*get_sda)(struct i2c_adapter *adap); + void (*set_sda)(struct i2c_adapter *adap, int val); - void (*prepare_recovery)(struct i2c_adapter *); - void (*unprepare_recovery)(struct i2c_adapter *); + void (*prepare_recovery)(struct i2c_adapter *adap); + void (*unprepare_recovery)(struct i2c_adapter *adap); /* gpio recovery */ - int scl_gpio; - int sda_gpio; + struct gpio_desc *scl_gpiod; + struct gpio_desc *sda_gpiod; }; int i2c_recover_bus(struct i2c_adapter *adap); /* Generic recovery routines */ -int i2c_generic_gpio_recovery(struct i2c_adapter *adap); int i2c_generic_scl_recovery(struct i2c_adapter *adap); /** @@ -706,7 +767,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter) /* administration... */ -#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) +#if IS_ENABLED(CONFIG_I2C) extern int i2c_add_adapter(struct i2c_adapter *); extern void i2c_del_adapter(struct i2c_adapter *); extern int i2c_add_numbered_adapter(struct i2c_adapter *); @@ -769,6 +830,9 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); } +u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); +void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); + int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); /** * module_i2c_driver() - Helper macro for registering a modular I2C driver diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h deleted file mode 100644 index 4dbe651f71f5..000000000000 --- a/include/linux/i7300_idle.h +++ /dev/null @@ -1,84 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef I7300_IDLE_H -#define I7300_IDLE_H - -#include <linux/pci.h> - -/* - * I/O AT controls (PCI bus 0 device 8 function 0) - * DIMM controls (PCI bus 0 device 16 function 1) - */ -#define IOAT_BUS 0 -#define IOAT_DEVFN PCI_DEVFN(8, 0) -#define MEMCTL_BUS 0 -#define MEMCTL_DEVFN PCI_DEVFN(16, 1) - -struct fbd_ioat { - unsigned int vendor; - unsigned int ioat_dev; - unsigned int enabled; -}; - -/* - * The i5000 chip-set has the same hooks as the i7300 - * but it is not enabled by default and must be manually - * manually enabled with "forceload=1" because it is - * only lightly validated. - */ - -static const struct fbd_ioat fbd_ioat_list[] = { - {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1}, - {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0}, - {0, 0} -}; - -/* table of devices that work with this driver */ -static const struct pci_device_id pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, - { } /* Terminating entry */ -}; - -/* Check for known platforms with I/O-AT */ -static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, - struct pci_dev **ioat_dev, - int enable_all) -{ - int i; - struct pci_dev *memdev, *dmadev; - - memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN); - if (!memdev) - return -ENODEV; - - for (i = 0; pci_tbl[i].vendor != 0; i++) { - if (memdev->vendor == pci_tbl[i].vendor && - memdev->device == pci_tbl[i].device) { - break; - } - } - if (pci_tbl[i].vendor == 0) - return -ENODEV; - - dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN); - if (!dmadev) - return -ENODEV; - - for (i = 0; fbd_ioat_list[i].vendor != 0; i++) { - if (dmadev->vendor == fbd_ioat_list[i].vendor && - dmadev->device == fbd_ioat_list[i].ioat_dev) { - if (!(fbd_ioat_list[i].enabled || enable_all)) - continue; - if (fbd_dev) - *fbd_dev = memdev; - if (ioat_dev) - *ioat_dev = dmadev; - - return 0; - } - } - return -ENODEV; -} - -#endif diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 4c54611e03e9..622658dfbf0a 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -13,6 +13,8 @@ struct ifla_vf_stats { __u64 tx_bytes; __u64 broadcast; __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; }; struct ifla_vf_info { diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index bedf54b6f943..4cb7aeeafce0 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -30,10 +30,10 @@ struct macvlan_dev { enum macvlan_mode mode; u16 flags; int nest_level; + unsigned int macaddr_count; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif - unsigned int macaddr_count; }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 3ecef57c31e3..8e66866c11be 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -4,7 +4,7 @@ #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); -struct skb_array *tap_get_skb_array(struct file *file); +struct ptr_ring *tap_get_ptr_ring(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> @@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct skb_array *tap_get_skb_array(struct file *f) +static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) { return ERR_PTR(-EINVAL); } @@ -70,7 +70,7 @@ struct tap_queue { u16 queue_index; bool enabled; struct list_head next; - struct skb_array skb_array; + struct ptr_ring ring; }; rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index bf9bdf42d577..c5b0a75a7812 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -17,9 +17,14 @@ #include <uapi/linux/if_tun.h> +#define TUN_XDP_FLAG 0x1UL + #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); -struct skb_array *tun_get_skb_array(struct file *file); +struct ptr_ring *tun_get_tx_ring(struct file *file); +bool tun_is_xdp_buff(void *ptr); +void *tun_xdp_to_ptr(void *ptr); +void *tun_ptr_to_xdp(void *ptr); #else #include <linux/err.h> #include <linux/errno.h> @@ -29,9 +34,21 @@ static inline struct socket *tun_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct skb_array *tun_get_skb_array(struct file *f) +static inline struct ptr_ring *tun_get_tx_ring(struct file *f) { return ERR_PTR(-EINVAL); } +static inline bool tun_is_xdp_buff(void *ptr) +{ + return false; +} +static inline void *tun_xdp_to_ptr(void *ptr) +{ + return NULL; +} +static inline void *tun_ptr_to_xdp(void *ptr) +{ + return NULL; +} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index f12a61be1ede..11579fd4126e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -578,8 +578,8 @@ const struct iio_chan_spec * iio_device_register() - register a device with the IIO subsystem * @indio_dev: Device structure filled by the device driver **/ -#define iio_device_register(iio_dev) \ - __iio_device_register((iio_dev), THIS_MODULE) +#define iio_device_register(indio_dev) \ + __iio_device_register((indio_dev), THIS_MODULE) int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); void iio_device_unregister(struct iio_dev *indio_dev); /** diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h index 1601a2a63a72..5e1cfa75f652 100644 --- a/include/linux/iio/machine.h +++ b/include/linux/iio/machine.h @@ -28,4 +28,11 @@ struct iio_map { void *consumer_data; }; +#define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \ +{ \ + .adc_channel_label = _provider_channel, \ + .consumer_dev_name = _consumer_dev_name, \ + .consumer_channel = _consumer_channel, \ +} + #endif diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 7d5e44518379..b19b7204ef84 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -43,12 +43,13 @@ struct iio_trigger_ops { /** * struct iio_trigger - industrial I/O trigger device * @ops: [DRIVER] operations structure + * @owner: [INTERN] owner of this driver module * @id: [INTERN] unique id number * @name: [DRIVER] unique name * @dev: [DRIVER] associated device (if relevant) * @list: [INTERN] used in maintenance of global trigger list * @alloc_list: [DRIVER] used for driver specific trigger list - * @use_count: use count for the trigger + * @use_count: [INTERN] use count for the trigger. * @subirq_chip: [INTERN] associate 'virtual' irq chip. * @subirq_base: [INTERN] base number for irqs provided by trigger. * @subirqs: [INTERN] information about the 'child' irqs. diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 1ac5bf95bfdd..e16fe7d44a71 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -173,7 +173,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) } int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); -int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); +int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); diff --git a/include/linux/init.h b/include/linux/init.h index ea1b31101d9e..506a98151131 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -5,6 +5,13 @@ #include <linux/compiler.h> #include <linux/types.h> +/* Built-in __init functions needn't be compiled with retpoline */ +#if defined(RETPOLINE) && !defined(MODULE) +#define __noretpoline __attribute__((indirect_branch("keep"))) +#else +#define __noretpoline +#endif + /* These macros are used to mark some functions or * initialized data (doesn't apply to uninitialized data) * as `initialization' functions. The kernel can take this @@ -40,7 +47,7 @@ /* These are for everybody (although not all archs will actually discard it in modules) */ -#define __init __section(.init.text) __cold __latent_entropy +#define __init __section(.init.text) __cold __latent_entropy __noretpoline #define __initdata __section(.init.data) #define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h deleted file mode 100644 index f9d932476a80..000000000000 --- a/include/linux/input/gpio_tilt.h +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _INPUT_GPIO_TILT_H -#define _INPUT_GPIO_TILT_H - -/** - * struct gpio_tilt_axis - Axis used by the tilt switch - * @axis: Constant describing the axis, e.g. ABS_X - * @min: minimum value for abs_param - * @max: maximum value for abs_param - * @fuzz: fuzz value for abs_param - * @flat: flat value for abs_param - */ -struct gpio_tilt_axis { - int axis; - int min; - int max; - int fuzz; - int flat; -}; - -/** - * struct gpio_tilt_state - state description - * @gpios: bitfield of gpio target-states for the value - * @axes: array containing the axes settings for the gpio state - * The array indizes must correspond to the axes defined - * in platform_data - * - * This structure describes a supported axis settings - * and the necessary gpio-state which represent it. - * - * The n-th bit in the bitfield describes the state of the n-th GPIO - * from the gpios-array defined in gpio_regulator_config below. - */ -struct gpio_tilt_state { - int gpios; - int *axes; -}; - -/** - * struct gpio_tilt_platform_data - * @gpios: Array containing the gpios determining the tilt state - * @nr_gpios: Number of gpios - * @axes: Array of gpio_tilt_axis descriptions - * @nr_axes: Number of axes - * @states: Array of gpio_tilt_state entries describing - * the gpio state for specific tilts - * @nr_states: Number of states available - * @debounce_interval: debounce ticks interval in msecs - * @poll_interval: polling interval in msecs - for polling driver only - * @enable: callback to enable the tilt switch - * @disable: callback to disable the tilt switch - * - * This structure contains gpio-tilt-switch configuration - * information that must be passed by platform code to the - * gpio-tilt input driver. - */ -struct gpio_tilt_platform_data { - struct gpio *gpios; - int nr_gpios; - - struct gpio_tilt_axis *axes; - int nr_axes; - - struct gpio_tilt_state *states; - int nr_states; - - int debounce_interval; - - unsigned int poll_interval; - int (*enable)(struct device *dev); - void (*disable)(struct device *dev); -}; - -#endif diff --git a/include/linux/integrity.h b/include/linux/integrity.h index c2d6082a1a4c..858d3f4a2241 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h @@ -14,6 +14,7 @@ enum integrity_status { INTEGRITY_PASS = 0, + INTEGRITY_PASS_IMMUTABLE, INTEGRITY_FAIL, INTEGRITY_NOLABEL, INTEGRITY_NOXATTRS, diff --git a/include/linux/iversion.h b/include/linux/iversion.h index 858463fca249..3d2fd06495ec 100644 --- a/include/linux/iversion.h +++ b/include/linux/iversion.h @@ -309,13 +309,13 @@ inode_query_iversion(struct inode *inode) * @inode: inode to check * @old: old value to check against its i_version * - * Compare the current raw i_version counter with a previous one. Returns 0 if - * they are the same or non-zero if they are different. + * Compare the current raw i_version counter with a previous one. Returns false + * if they are the same or true if they are different. */ -static inline s64 +static inline bool inode_cmp_iversion_raw(const struct inode *inode, u64 old) { - return (s64)inode_peek_iversion_raw(inode) - (s64)old; + return inode_peek_iversion_raw(inode) != old; } /** @@ -323,19 +323,15 @@ inode_cmp_iversion_raw(const struct inode *inode, u64 old) * @inode: inode to check * @old: old value to check against its i_version * - * Compare an i_version counter with a previous one. Returns 0 if they are - * the same, a positive value if the one in the inode appears newer than @old, - * and a negative value if @old appears to be newer than the one in the - * inode. + * Compare an i_version counter with a previous one. Returns false if they are + * the same, and true if they are different. * * Note that we don't need to set the QUERIED flag in this case, as the value * in the inode is not being recorded for later use. */ - -static inline s64 +static inline bool inode_cmp_iversion(const struct inode *inode, u64 old) { - return (s64)(inode_peek_iversion_raw(inode) & ~I_VERSION_QUERIED) - - (s64)(old << I_VERSION_QUERIED_SHIFT); + return inode_peek_iversion(inode) != old; } #endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 296d1e0ea87b..b708e5169d1d 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) /** - * struct jbd_inode is the structure linking inodes in ordered mode - * present in a transaction so that we can sync them during commit. + * struct jbd_inode - The jbd_inode type is the structure linking inodes in + * ordered mode present in a transaction so that we can sync them during commit. */ struct jbd2_inode { - /* Which transaction does this inode belong to? Either the running - * transaction or the committing one. [j_list_lock] */ + /** + * @i_transaction: + * + * Which transaction does this inode belong to? Either the running + * transaction or the committing one. [j_list_lock] + */ transaction_t *i_transaction; - /* Pointer to the running transaction modifying inode's data in case - * there is already a committing transaction touching it. [j_list_lock] */ + /** + * @i_next_transaction: + * + * Pointer to the running transaction modifying inode's data in case + * there is already a committing transaction touching it. [j_list_lock] + */ transaction_t *i_next_transaction; - /* List of inodes in the i_transaction [j_list_lock] */ + /** + * @i_list: List of inodes in the i_transaction [j_list_lock] + */ struct list_head i_list; - /* VFS inode this inode belongs to [constant during the lifetime - * of the structure] */ + /** + * @i_vfs_inode: + * + * VFS inode this inode belongs to [constant for lifetime of structure] + */ struct inode *i_vfs_inode; - /* Flags of inode [j_list_lock] */ + /** + * @i_flags: Flags of inode [j_list_lock] + */ unsigned long i_flags; }; @@ -447,12 +462,20 @@ struct jbd2_revoke_table_s; * struct handle_s - The handle_s type is the concrete type associated with * handle_t. * @h_transaction: Which compound transaction is this update a part of? + * @h_journal: Which journal handle belongs to - used iff h_reserved set. + * @h_rsv_handle: Handle reserved for finishing the logical operation. * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. - * @h_ref: Reference count on this handle - * @h_err: Field for caller's use to track errors through large fs operations - * @h_sync: flag for sync-on-close - * @h_jdata: flag to force data journaling - * @h_aborted: flag indicating fatal error on handle + * @h_ref: Reference count on this handle. + * @h_err: Field for caller's use to track errors through large fs operations. + * @h_sync: Flag for sync-on-close. + * @h_jdata: Flag to force data journaling. + * @h_reserved: Flag for handle for reserved credits. + * @h_aborted: Flag indicating fatal error on handle. + * @h_type: For handle statistics. + * @h_line_no: For handle statistics. + * @h_start_jiffies: Handle Start time. + * @h_requested_credits: Holds @h_buffer_credits after handle is started. + * @saved_alloc_context: Saved context while transaction is open. **/ /* Docbook can't yet cope with the bit fields, but will leave the documentation @@ -462,32 +485,23 @@ struct jbd2_revoke_table_s; struct jbd2_journal_handle { union { - /* Which compound transaction is this update a part of? */ transaction_t *h_transaction; /* Which journal handle belongs to - used iff h_reserved set */ journal_t *h_journal; }; - /* Handle reserved for finishing the logical operation */ handle_t *h_rsv_handle; - - /* Number of remaining buffers we are allowed to dirty: */ int h_buffer_credits; - - /* Reference count on this handle */ int h_ref; - - /* Field for caller's use to track errors through large fs */ - /* operations */ int h_err; /* Flags [no locking] */ - unsigned int h_sync: 1; /* sync-on-close */ - unsigned int h_jdata: 1; /* force data journaling */ - unsigned int h_reserved: 1; /* handle with reserved credits */ - unsigned int h_aborted: 1; /* fatal error on handle */ - unsigned int h_type: 8; /* for handle statistics */ - unsigned int h_line_no: 16; /* for handle statistics */ + unsigned int h_sync: 1; + unsigned int h_jdata: 1; + unsigned int h_reserved: 1; + unsigned int h_aborted: 1; + unsigned int h_type: 8; + unsigned int h_line_no: 16; unsigned long h_start_jiffies; unsigned int h_requested_credits; @@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end) /** * struct journal_s - The journal_s type is the concrete type associated with * journal_t. - * @j_flags: General journaling state flags - * @j_errno: Is there an outstanding uncleared error on the journal (from a - * prior abort)? - * @j_sb_buffer: First part of superblock buffer - * @j_superblock: Second part of superblock buffer - * @j_format_version: Version of the superblock format - * @j_state_lock: Protect the various scalars in the journal - * @j_barrier_count: Number of processes waiting to create a barrier lock - * @j_barrier: The barrier lock itself - * @j_running_transaction: The current running transaction.. - * @j_committing_transaction: the transaction we are pushing to disk - * @j_checkpoint_transactions: a linked circular list of all transactions - * waiting for checkpointing - * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction - * to start committing, or for a barrier lock to be released - * @j_wait_done_commit: Wait queue for waiting for commit to complete - * @j_wait_commit: Wait queue to trigger commit - * @j_wait_updates: Wait queue to wait for updates to complete - * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop - * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints - * @j_head: Journal head - identifies the first unused block in the journal - * @j_tail: Journal tail - identifies the oldest still-used block in the - * journal. - * @j_free: Journal free - how many free blocks are there in the journal? - * @j_first: The block number of the first usable block - * @j_last: The block number one beyond the last usable block - * @j_dev: Device where we store the journal - * @j_blocksize: blocksize for the location where we store the journal. - * @j_blk_offset: starting block offset for into the device where we store the - * journal - * @j_fs_dev: Device which holds the client fs. For internal journal this will - * be equal to j_dev - * @j_reserved_credits: Number of buffers reserved from the running transaction - * @j_maxlen: Total maximum capacity of the journal region on disk. - * @j_list_lock: Protects the buffer lists and internal buffer state. - * @j_inode: Optional inode where we store the journal. If present, all journal - * block numbers are mapped into this inode via bmap(). - * @j_tail_sequence: Sequence number of the oldest transaction in the log - * @j_transaction_sequence: Sequence number of the next transaction to grant - * @j_commit_sequence: Sequence number of the most recently committed - * transaction - * @j_commit_request: Sequence number of the most recent transaction wanting - * commit - * @j_uuid: Uuid of client object. - * @j_task: Pointer to the current commit thread for this journal - * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a - * single compound commit transaction - * @j_commit_interval: What is the maximum transaction lifetime before we begin - * a commit? - * @j_commit_timer: The timer used to wakeup the commit thread - * @j_revoke_lock: Protect the revoke table - * @j_revoke: The revoke table - maintains the list of revoked blocks in the - * current transaction. - * @j_revoke_table: alternate revoke tables for j_revoke - * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction - * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the - * number that will fit in j_blocksize - * @j_last_sync_writer: most recent pid which did a synchronous write - * @j_history_lock: Protect the transactions statistics history - * @j_proc_entry: procfs entry for the jbd statistics directory - * @j_stats: Overall statistics - * @j_private: An opaque pointer to fs-private information. - * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies */ - struct journal_s { - /* General journaling state flags [j_state_lock] */ + /** + * @j_flags: General journaling state flags [j_state_lock] + */ unsigned long j_flags; - /* + /** + * @j_errno: + * * Is there an outstanding uncleared error on the journal (from a prior * abort)? [j_state_lock] */ int j_errno; - /* The superblock buffer */ + /** + * @j_sb_buffer: The first part of the superblock buffer. + */ struct buffer_head *j_sb_buffer; + + /** + * @j_superblock: The second part of the superblock buffer. + */ journal_superblock_t *j_superblock; - /* Version of the superblock format */ + /** + * @j_format_version: Version of the superblock format. + */ int j_format_version; - /* - * Protect the various scalars in the journal + /** + * @j_state_lock: Protect the various scalars in the journal. */ rwlock_t j_state_lock; - /* + /** + * @j_barrier_count: + * * Number of processes waiting to create a barrier lock [j_state_lock] */ int j_barrier_count; - /* The barrier lock itself */ + /** + * @j_barrier: The barrier lock itself. + */ struct mutex j_barrier; - /* + /** + * @j_running_transaction: + * * Transactions: The current running transaction... * [j_state_lock] [caller holding open handle] */ transaction_t *j_running_transaction; - /* + /** + * @j_committing_transaction: + * * the transaction we are pushing to disk * [j_state_lock] [caller holding open handle] */ transaction_t *j_committing_transaction; - /* + /** + * @j_checkpoint_transactions: + * * ... and a linked circular list of all transactions waiting for * checkpointing. [j_list_lock] */ transaction_t *j_checkpoint_transactions; - /* + /** + * @j_wait_transaction_locked: + * * Wait queue for waiting for a locked transaction to start committing, - * or for a barrier lock to be released + * or for a barrier lock to be released. */ wait_queue_head_t j_wait_transaction_locked; - /* Wait queue for waiting for commit to complete */ + /** + * @j_wait_done_commit: Wait queue for waiting for commit to complete. + */ wait_queue_head_t j_wait_done_commit; - /* Wait queue to trigger commit */ + /** + * @j_wait_commit: Wait queue to trigger commit. + */ wait_queue_head_t j_wait_commit; - /* Wait queue to wait for updates to complete */ + /** + * @j_wait_updates: Wait queue to wait for updates to complete. + */ wait_queue_head_t j_wait_updates; - /* Wait queue to wait for reserved buffer credits to drop */ + /** + * @j_wait_reserved: + * + * Wait queue to wait for reserved buffer credits to drop. + */ wait_queue_head_t j_wait_reserved; - /* Semaphore for locking against concurrent checkpoints */ + /** + * @j_checkpoint_mutex: + * + * Semaphore for locking against concurrent checkpoints. + */ struct mutex j_checkpoint_mutex; - /* + /** + * @j_chkpt_bhs: + * * List of buffer heads used by the checkpoint routine. This * was moved from jbd2_log_do_checkpoint() to reduce stack * usage. Access to this array is controlled by the - * j_checkpoint_mutex. [j_checkpoint_mutex] + * @j_checkpoint_mutex. [j_checkpoint_mutex] */ struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; - - /* + + /** + * @j_head: + * * Journal head: identifies the first unused block in the journal. * [j_state_lock] */ unsigned long j_head; - /* + /** + * @j_tail: + * * Journal tail: identifies the oldest still-used block in the journal. * [j_state_lock] */ unsigned long j_tail; - /* + /** + * @j_free: + * * Journal free: how many free blocks are there in the journal? * [j_state_lock] */ unsigned long j_free; - /* - * Journal start and end: the block numbers of the first usable block - * and one beyond the last usable block in the journal. [j_state_lock] + /** + * @j_first: + * + * The block number of the first usable block in the journal + * [j_state_lock]. */ unsigned long j_first; + + /** + * @j_last: + * + * The block number one beyond the last usable block in the journal + * [j_state_lock]. + */ unsigned long j_last; - /* - * Device, blocksize and starting block offset for the location where we - * store the journal. + /** + * @j_dev: Device where we store the journal. */ struct block_device *j_dev; + + /** + * @j_blocksize: Block size for the location where we store the journal. + */ int j_blocksize; + + /** + * @j_blk_offset: + * + * Starting block offset into the device where we store the journal. + */ unsigned long long j_blk_offset; + + /** + * @j_devname: Journal device name. + */ char j_devname[BDEVNAME_SIZE+24]; - /* + /** + * @j_fs_dev: + * * Device which holds the client fs. For internal journal this will be * equal to j_dev. */ struct block_device *j_fs_dev; - /* Total maximum capacity of the journal region on disk. */ + /** + * @j_maxlen: Total maximum capacity of the journal region on disk. + */ unsigned int j_maxlen; - /* Number of buffers reserved from the running transaction */ + /** + * @j_reserved_credits: + * + * Number of buffers reserved from the running transaction. + */ atomic_t j_reserved_credits; - /* - * Protects the buffer lists and internal buffer state. + /** + * @j_list_lock: Protects the buffer lists and internal buffer state. */ spinlock_t j_list_lock; - /* Optional inode where we store the journal. If present, all */ - /* journal block numbers are mapped into this inode via */ - /* bmap(). */ + /** + * @j_inode: + * + * Optional inode where we store the journal. If present, all + * journal block numbers are mapped into this inode via bmap(). + */ struct inode *j_inode; - /* + /** + * @j_tail_sequence: + * * Sequence number of the oldest transaction in the log [j_state_lock] */ tid_t j_tail_sequence; - /* + /** + * @j_transaction_sequence: + * * Sequence number of the next transaction to grant [j_state_lock] */ tid_t j_transaction_sequence; - /* + /** + * @j_commit_sequence: + * * Sequence number of the most recently committed transaction * [j_state_lock]. */ tid_t j_commit_sequence; - /* + /** + * @j_commit_request: + * * Sequence number of the most recent transaction wanting commit * [j_state_lock] */ tid_t j_commit_request; - /* + /** + * @j_uuid: + * * Journal uuid: identifies the object (filesystem, LVM volume etc) * backed by this journal. This will eventually be replaced by an array * of uuids, allowing us to index multiple devices within a single @@ -958,85 +997,151 @@ struct journal_s */ __u8 j_uuid[16]; - /* Pointer to the current commit thread for this journal */ + /** + * @j_task: Pointer to the current commit thread for this journal. + */ struct task_struct *j_task; - /* + /** + * @j_max_transaction_buffers: + * * Maximum number of metadata buffers to allow in a single compound - * commit transaction + * commit transaction. */ int j_max_transaction_buffers; - /* + /** + * @j_commit_interval: + * * What is the maximum transaction lifetime before we begin a commit? */ unsigned long j_commit_interval; - /* The timer used to wakeup the commit thread: */ + /** + * @j_commit_timer: The timer used to wakeup the commit thread. + */ struct timer_list j_commit_timer; - /* - * The revoke table: maintains the list of revoked blocks in the - * current transaction. [j_revoke_lock] + /** + * @j_revoke_lock: Protect the revoke table. */ spinlock_t j_revoke_lock; + + /** + * @j_revoke: + * + * The revoke table - maintains the list of revoked blocks in the + * current transaction. + */ struct jbd2_revoke_table_s *j_revoke; + + /** + * @j_revoke_table: Alternate revoke tables for j_revoke. + */ struct jbd2_revoke_table_s *j_revoke_table[2]; - /* - * array of bhs for jbd2_journal_commit_transaction + /** + * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction. */ struct buffer_head **j_wbuf; + + /** + * @j_wbufsize: + * + * Size of @j_wbuf array. + */ int j_wbufsize; - /* - * this is the pid of hte last person to run a synchronous operation - * through the journal + /** + * @j_last_sync_writer: + * + * The pid of the last person to run a synchronous operation + * through the journal. */ pid_t j_last_sync_writer; - /* - * the average amount of time in nanoseconds it takes to commit a + /** + * @j_average_commit_time: + * + * The average amount of time in nanoseconds it takes to commit a * transaction to disk. [j_state_lock] */ u64 j_average_commit_time; - /* - * minimum and maximum times that we should wait for - * additional filesystem operations to get batched into a - * synchronous handle in microseconds + /** + * @j_min_batch_time: + * + * Minimum time that we should wait for additional filesystem operations + * to get batched into a synchronous handle in microseconds. */ u32 j_min_batch_time; + + /** + * @j_max_batch_time: + * + * Maximum time that we should wait for additional filesystem operations + * to get batched into a synchronous handle in microseconds. + */ u32 j_max_batch_time; - /* This function is called when a transaction is closed */ + /** + * @j_commit_callback: + * + * This function is called when a transaction is closed. + */ void (*j_commit_callback)(journal_t *, transaction_t *); /* * Journal statistics */ + + /** + * @j_history_lock: Protect the transactions statistics history. + */ spinlock_t j_history_lock; + + /** + * @j_proc_entry: procfs entry for the jbd statistics directory. + */ struct proc_dir_entry *j_proc_entry; + + /** + * @j_stats: Overall statistics. + */ struct transaction_stats_s j_stats; - /* Failed journal commit ID */ + /** + * @j_failed_commit: Failed journal commit ID. + */ unsigned int j_failed_commit; - /* + /** + * @j_private: + * * An opaque pointer to fs-private information. ext3 puts its - * superblock pointer here + * superblock pointer here. */ void *j_private; - /* Reference to checksum algorithm driver via cryptoapi */ + /** + * @j_chksum_driver: + * + * Reference to checksum algorithm driver via cryptoapi. + */ struct crypto_shash *j_chksum_driver; - /* Precomputed journal UUID checksum for seeding other checksums */ + /** + * @j_csum_seed: + * + * Precomputed journal UUID checksum for seeding other checksums. + */ __u32 j_csum_seed; #ifdef CONFIG_DEBUG_LOCK_ALLOC - /* + /** + * @j_trans_commit_map: + * * Lockdep entity to track transaction commit dependencies. Handles * hold this "lock" for read, when we wait for commit, we acquire the * "lock" for writing. This matches the properties of jbd2 journalling diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index bd118a6c60cb..d79d1e7486bd 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -9,6 +9,10 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/stddef.h> +#include <linux/mm.h> +#include <linux/module.h> + +#include <asm/sections.h> #define KSYM_NAME_LEN 128 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ @@ -16,6 +20,56 @@ struct module; +static inline int is_kernel_inittext(unsigned long addr) +{ + if (addr >= (unsigned long)_sinittext + && addr <= (unsigned long)_einittext) + return 1; + return 0; +} + +static inline int is_kernel_text(unsigned long addr) +{ + if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || + arch_is_kernel_text(addr)) + return 1; + return in_gate_area_no_mm(addr); +} + +static inline int is_kernel(unsigned long addr) +{ + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) + return 1; + return in_gate_area_no_mm(addr); +} + +static inline int is_ksym_addr(unsigned long addr) +{ + if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) + return is_kernel(addr); + + return is_kernel_text(addr) || is_kernel_inittext(addr); +} + +static inline void *dereference_symbol_descriptor(void *ptr) +{ +#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR + struct module *mod; + + ptr = dereference_kernel_function_descriptor(ptr); + if (is_ksym_addr((unsigned long)ptr)) + return ptr; + + preempt_disable(); + mod = __module_address((unsigned long)ptr); + preempt_enable(); + + if (mod) + ptr = dereference_module_function_descriptor(mod, ptr); +#endif + return ptr; +} + #ifdef CONFIG_KALLSYMS /* Lookup the address for a symbol. Returns 0 if not found. */ unsigned long kallsyms_lookup_name(const char *name); @@ -40,9 +94,6 @@ extern int sprint_symbol(char *buffer, unsigned long address); extern int sprint_symbol_no_offset(char *buffer, unsigned long address); extern int sprint_backtrace(char *buffer, unsigned long address); -/* Look up a kernel symbol and print it to the kernel messages. */ -extern void __print_symbol(const char *fmt, unsigned long address); - int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); @@ -112,23 +163,8 @@ static inline int kallsyms_show_value(void) return false; } -/* Stupid that this does nothing, but I didn't create this mess. */ -#define __print_symbol(fmt, addr) #endif /*CONFIG_KALLSYMS*/ -/* This macro allows us to keep printk typechecking */ -static __printf(1, 2) -void __check_printsym_format(const char *fmt, ...) -{ -} - -static inline void print_symbol(const char *fmt, unsigned long addr) -{ - __check_printsym_format(fmt, ""); - __print_symbol(fmt, (unsigned long) - __builtin_extract_return_addr((void *)addr)); -} - static inline void print_ip_sym(unsigned long ip) { printk("[<%p>] %pS\n", (void *) ip, (void *) ip); diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 7b45959ebd92..e251533a5939 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -113,7 +113,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void); * array is a part of the structure and the fifo type where the array is * outside of the fifo structure. */ -#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo)) +#define __is_kfifo_ptr(fifo) \ + (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type)))) /** * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object diff --git a/include/linux/kobject.h b/include/linux/kobject.h index e0a6205caa71..7f6f93c3df9c 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * kobject.h - generic kernel object infrastructure. * @@ -6,8 +7,6 @@ * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> * Copyright (c) 2006-2008 Novell Inc. * - * This file is released under the GPLv2. - * * Please read Documentation/kobject.txt before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h index df32d2508290..069aa2ebef90 100644 --- a/include/linux/kobject_ns.h +++ b/include/linux/kobject_ns.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* Kernel object name space definitions * * Copyright (c) 2002-2003 Patrick Mochel @@ -7,8 +8,6 @@ * * Split from kobject.h by David Howells (dhowells@redhat.com) * - * This file is released under the GPLv2. - * * Please read Documentation/kobject.txt before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h index e97966d1fb8d..700efaa9e115 100644 --- a/include/linux/led-class-flash.h +++ b/include/linux/led-class-flash.h @@ -121,6 +121,8 @@ extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev); static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, bool state) { + if (!fled_cdev) + return -EINVAL; return fled_cdev->ops->strobe_set(fled_cdev, state); } @@ -136,6 +138,8 @@ static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev, bool *state) { + if (!fled_cdev) + return -EINVAL; if (fled_cdev->ops->strobe_get) return fled_cdev->ops->strobe_get(fled_cdev, state); diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h index 27ba06e5d117..90ed4ebfa692 100644 --- a/include/linux/libfdt.h +++ b/include/linux/libfdt.h @@ -3,7 +3,6 @@ #define _INCLUDE_LIBFDT_H_ #include <linux/libfdt_env.h> -#include "../../scripts/dtc/libfdt/fdt.h" #include "../../scripts/dtc/libfdt/libfdt.h" #endif /* _INCLUDE_LIBFDT_H_ */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index f8109ddb5ef1..ff855ed965fb 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -47,6 +47,17 @@ enum { /* region flag indicating to direct-map persistent memory by default */ ND_REGION_PAGEMAP = 0, + /* + * Platform ensures entire CPU store data path is flushed to pmem on + * system power loss. + */ + ND_REGION_PERSIST_CACHE = 1, + /* + * Platform provides mechanisms to automatically flush outstanding + * write data from memory controler to pmem on system power loss. + * (ADR) + */ + ND_REGION_PERSIST_MEMCTRL = 2, /* mark newly adjusted resources as requiring a label update */ DPA_RESOURCE_ADJUSTED = 1 << 0, diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index fc5c1be3f6f4..4754f01c1abb 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -40,7 +40,6 @@ * @new_func: pointer to the patched function code * @old_sympos: a hint indicating which symbol position the old function * can be found (optional) - * @immediate: patch the func immediately, bypassing safety mechanisms * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources * @stack_node: list node for klp_ops func_stack list @@ -76,7 +75,6 @@ struct klp_func { * in kallsyms for the given object is used. */ unsigned long old_sympos; - bool immediate; /* internal */ unsigned long old_addr; @@ -137,7 +135,6 @@ struct klp_object { * struct klp_patch - patch structure for live patching * @mod: reference to the live patch module * @objs: object entries for kernel objects to be patched - * @immediate: patch all funcs immediately, bypassing safety mechanisms * @list: list node for global list of registered patches * @kobj: kobject for sysfs resources * @enabled: the patch is enabled (but operation may be incomplete) @@ -147,7 +144,6 @@ struct klp_patch { /* external */ struct module *mod; struct klp_object *objs; - bool immediate; /* internal */ struct list_head list; diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index d7d313fb9cd4..4fd95dbeb52f 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -17,6 +17,7 @@ #include <net/ipv6.h> #include <linux/fs.h> #include <linux/kref.h> +#include <linux/refcount.h> #include <linux/utsname.h> #include <linux/lockd/bind.h> #include <linux/lockd/xdr.h> @@ -58,7 +59,7 @@ struct nlm_host { u32 h_state; /* pseudo-state counter */ u32 h_nsmstate; /* true remote NSM state */ u32 h_pidcount; /* Pseudopids */ - atomic_t h_count; /* reference count */ + refcount_t h_count; /* reference count */ struct mutex h_mutex; /* mutex for pmap binding */ unsigned long h_nextrebind; /* next portmap call */ unsigned long h_expires; /* eligible for GC */ @@ -83,7 +84,7 @@ struct nlm_host { struct nsm_handle { struct list_head sm_link; - atomic_t sm_count; + refcount_t sm_count; char *sm_mon_name; char *sm_name; struct sockaddr_storage sm_addr; @@ -122,7 +123,7 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host) */ struct nlm_lockowner { struct list_head list; - atomic_t count; + refcount_t count; struct nlm_host *host; fl_owner_t owner; @@ -136,7 +137,7 @@ struct nlm_wait; */ #define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) struct nlm_rqst { - atomic_t a_count; + refcount_t a_count; unsigned int a_flags; /* initial RPC task flags */ struct nlm_host * a_host; /* host handle */ struct nlm_args a_args; /* arguments */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h index ca08ab16ecdc..2cfffe586885 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -12,6 +12,7 @@ #include <uapi/linux/mdio.h> #include <linux/mod_devicetable.h> +struct gpio_desc; struct mii_bus; /* Multiple levels of nesting are possible. However typically this is @@ -39,6 +40,9 @@ struct mdio_device { /* Bus address of the MDIO device (0-31) */ int addr; int flags; + struct gpio_desc *reset; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; }; #define to_mdio_device(d) container_of(d, struct mdio_device, dev) @@ -71,6 +75,7 @@ void mdio_device_free(struct mdio_device *mdiodev); struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); int mdio_device_register(struct mdio_device *mdiodev); void mdio_device_remove(struct mdio_device *mdiodev); +void mdio_device_reset(struct mdio_device *mdiodev, int value); int mdio_driver_register(struct mdio_driver *drv); void mdio_driver_unregister(struct mdio_driver *drv); int mdio_device_bus_match(struct device *dev, struct device_driver *drv); @@ -257,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) return reg; } +int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); + int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 69966c461d1c..882046863581 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -108,7 +108,10 @@ struct lruvec_stat { */ struct mem_cgroup_per_node { struct lruvec lruvec; - struct lruvec_stat __percpu *lruvec_stat; + + struct lruvec_stat __percpu *lruvec_stat_cpu; + atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; + unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; @@ -227,10 +230,10 @@ struct mem_cgroup { spinlock_t move_lock; struct task_struct *move_lock_task; unsigned long move_lock_flags; - /* - * percpu counter. - */ - struct mem_cgroup_stat_cpu __percpu *stat; + + struct mem_cgroup_stat_cpu __percpu *stat_cpu; + atomic_long_t stat[MEMCG_NR_STAT]; + atomic_long_t events[MEMCG_NR_EVENTS]; unsigned long socket_pressure; @@ -265,6 +268,12 @@ struct mem_cgroup { /* WARNING: nodeinfo must be the last member here */ }; +/* + * size of first charge trial. "32" comes from vmscan.c's magic value. + * TODO: maybe necessary to use big numbers in big irons. + */ +#define MEMCG_CHARGE_BATCH 32U + extern struct mem_cgroup *root_mem_cgroup; static inline bool mem_cgroup_disabled(void) @@ -272,13 +281,6 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline void mem_cgroup_event(struct mem_cgroup *memcg, - enum memcg_event_item event) -{ - this_cpu_inc(memcg->stat->events[event]); - cgroup_file_notify(&memcg->events_file); -} - bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, @@ -492,32 +494,38 @@ void unlock_page_memcg(struct page *page); static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) { - long val = 0; - int cpu; - - for_each_possible_cpu(cpu) - val += per_cpu(memcg->stat->count[idx], cpu); - - if (val < 0) - val = 0; - - return val; + long x = atomic_long_read(&memcg->stat[idx]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; } /* idx can be of type enum memcg_stat_item or node_stat_item */ static inline void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) { - if (!mem_cgroup_disabled()) - __this_cpu_add(memcg->stat->count[idx], val); + long x; + + if (mem_cgroup_disabled()) + return; + + x = val + __this_cpu_read(memcg->stat_cpu->count[idx]); + if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { + atomic_long_add(x, &memcg->stat[idx]); + x = 0; + } + __this_cpu_write(memcg->stat_cpu->count[idx], x); } /* idx can be of type enum memcg_stat_item or node_stat_item */ static inline void mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) { - if (!mem_cgroup_disabled()) - this_cpu_add(memcg->stat->count[idx], val); + preempt_disable(); + __mod_memcg_state(memcg, idx, val); + preempt_enable(); } /** @@ -555,87 +563,108 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) { struct mem_cgroup_per_node *pn; - long val = 0; - int cpu; + long x; if (mem_cgroup_disabled()) return node_page_state(lruvec_pgdat(lruvec), idx); pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - for_each_possible_cpu(cpu) - val += per_cpu(pn->lruvec_stat->count[idx], cpu); - - if (val < 0) - val = 0; - - return val; + x = atomic_long_read(&pn->lruvec_stat[idx]); +#ifdef CONFIG_SMP + if (x < 0) + x = 0; +#endif + return x; } static inline void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { struct mem_cgroup_per_node *pn; + long x; + /* Update node */ __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); + if (mem_cgroup_disabled()) return; + pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); + + /* Update memcg */ __mod_memcg_state(pn->memcg, idx, val); - __this_cpu_add(pn->lruvec_stat->count[idx], val); + + /* Update lruvec */ + x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); + if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { + atomic_long_add(x, &pn->lruvec_stat[idx]); + x = 0; + } + __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); } static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { - struct mem_cgroup_per_node *pn; - - mod_node_page_state(lruvec_pgdat(lruvec), idx, val); - if (mem_cgroup_disabled()) - return; - pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - mod_memcg_state(pn->memcg, idx, val); - this_cpu_add(pn->lruvec_stat->count[idx], val); + preempt_disable(); + __mod_lruvec_state(lruvec, idx, val); + preempt_enable(); } static inline void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { - struct mem_cgroup_per_node *pn; + pg_data_t *pgdat = page_pgdat(page); + struct lruvec *lruvec; - __mod_node_page_state(page_pgdat(page), idx, val); - if (mem_cgroup_disabled() || !page->mem_cgroup) + /* Untracked pages have no memcg, no lruvec. Update only the node */ + if (!page->mem_cgroup) { + __mod_node_page_state(pgdat, idx, val); return; - __mod_memcg_state(page->mem_cgroup, idx, val); - pn = page->mem_cgroup->nodeinfo[page_to_nid(page)]; - __this_cpu_add(pn->lruvec_stat->count[idx], val); + } + + lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup); + __mod_lruvec_state(lruvec, idx, val); } static inline void mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { - struct mem_cgroup_per_node *pn; - - mod_node_page_state(page_pgdat(page), idx, val); - if (mem_cgroup_disabled() || !page->mem_cgroup) - return; - mod_memcg_state(page->mem_cgroup, idx, val); - pn = page->mem_cgroup->nodeinfo[page_to_nid(page)]; - this_cpu_add(pn->lruvec_stat->count[idx], val); + preempt_disable(); + __mod_lruvec_page_state(page, idx, val); + preempt_enable(); } unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); +/* idx can be of type enum memcg_event_item or vm_event_item */ +static inline void __count_memcg_events(struct mem_cgroup *memcg, + int idx, unsigned long count) +{ + unsigned long x; + + if (mem_cgroup_disabled()) + return; + + x = count + __this_cpu_read(memcg->stat_cpu->events[idx]); + if (unlikely(x > MEMCG_CHARGE_BATCH)) { + atomic_long_add(x, &memcg->events[idx]); + x = 0; + } + __this_cpu_write(memcg->stat_cpu->events[idx], x); +} + static inline void count_memcg_events(struct mem_cgroup *memcg, - enum vm_event_item idx, - unsigned long count) + int idx, unsigned long count) { - if (!mem_cgroup_disabled()) - this_cpu_add(memcg->stat->events[idx], count); + preempt_disable(); + __count_memcg_events(memcg, idx, count); + preempt_enable(); } -/* idx can be of type enum memcg_stat_item or node_stat_item */ +/* idx can be of type enum memcg_event_item or vm_event_item */ static inline void count_memcg_page_event(struct page *page, int idx) { @@ -654,12 +683,20 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, rcu_read_lock(); memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (likely(memcg)) { - this_cpu_inc(memcg->stat->events[idx]); + count_memcg_events(memcg, idx, 1); if (idx == OOM_KILL) cgroup_file_notify(&memcg->events_file); } rcu_read_unlock(); } + +static inline void mem_cgroup_event(struct mem_cgroup *memcg, + enum memcg_event_item event) +{ + count_memcg_events(memcg, event, 1); + cgroup_file_notify(&memcg->events_file); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE void mem_cgroup_split_huge_fixup(struct page *head); #endif diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 58e110aee7ab..aba5f86eb038 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -13,6 +13,7 @@ struct pglist_data; struct mem_section; struct memory_block; struct resource; +struct vmem_altmap; #ifdef CONFIG_MEMORY_HOTPLUG /* @@ -125,24 +126,26 @@ static inline bool movable_node_is_enabled(void) #ifdef CONFIG_MEMORY_HOTREMOVE extern bool is_pageblock_removable_nolock(struct page *page); -extern int arch_remove_memory(u64 start, u64 size); +extern int arch_remove_memory(u64 start, u64 size, + struct vmem_altmap *altmap); extern int __remove_pages(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages); + unsigned long nr_pages, struct vmem_altmap *altmap); #endif /* CONFIG_MEMORY_HOTREMOVE */ /* reasonably generic interface to expand the physical pages */ -extern int __add_pages(int nid, unsigned long start_pfn, - unsigned long nr_pages, bool want_memblock); +extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + struct vmem_altmap *altmap, bool want_memblock); #ifndef CONFIG_ARCH_HAS_ADD_PAGES static inline int add_pages(int nid, unsigned long start_pfn, - unsigned long nr_pages, bool want_memblock) + unsigned long nr_pages, struct vmem_altmap *altmap, + bool want_memblock) { - return __add_pages(nid, start_pfn, nr_pages, want_memblock); + return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); } #else /* ARCH_HAS_ADD_PAGES */ -int add_pages(int nid, unsigned long start_pfn, - unsigned long nr_pages, bool want_memblock); +int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, + struct vmem_altmap *altmap, bool want_memblock); #endif /* ARCH_HAS_ADD_PAGES */ #ifdef CONFIG_NUMA @@ -318,15 +321,17 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource, bool online); -extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock); +extern int arch_add_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap, bool want_memblock); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, - unsigned long nr_pages); + unsigned long nr_pages, struct vmem_altmap *altmap); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); extern void remove_memory(int nid, u64 start, u64 size); -extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn); +extern int sparse_add_one_section(struct pglist_data *pgdat, + unsigned long start_pfn, struct vmem_altmap *altmap); extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, - unsigned long map_offset); + unsigned long map_offset, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 10d23c367048..7b4899c06f49 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -26,18 +26,6 @@ struct vmem_altmap { unsigned long alloc; }; -unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); -void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); - -#ifdef CONFIG_ZONE_DEVICE -struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); -#else -static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) -{ - return NULL; -} -#endif - /* * Specialize ZONE_DEVICE memory into multiple types each having differents * usage. @@ -125,8 +113,9 @@ typedef void (*dev_page_free_t)(struct page *page, void *data); struct dev_pagemap { dev_page_fault_t page_fault; dev_page_free_t page_free; - struct vmem_altmap *altmap; - const struct resource *res; + struct vmem_altmap altmap; + bool altmap_valid; + struct resource res; struct percpu_ref *ref; struct device *dev; void *data; @@ -134,15 +123,17 @@ struct dev_pagemap { }; #ifdef CONFIG_ZONE_DEVICE -void *devm_memremap_pages(struct device *dev, struct resource *res, - struct percpu_ref *ref, struct vmem_altmap *altmap); -struct dev_pagemap *find_dev_pagemap(resource_size_t phys); +void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); +struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap); + +unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); +void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); static inline bool is_zone_device_page(const struct page *page); #else static inline void *devm_memremap_pages(struct device *dev, - struct resource *res, struct percpu_ref *ref, - struct vmem_altmap *altmap) + struct dev_pagemap *pgmap) { /* * Fail attempts to call devm_memremap_pages() without @@ -153,11 +144,22 @@ static inline void *devm_memremap_pages(struct device *dev, return ERR_PTR(-ENXIO); } -static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys) +static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, + struct dev_pagemap *pgmap) { return NULL; } -#endif + +static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) +{ + return 0; +} + +static inline void vmem_altmap_free(struct vmem_altmap *altmap, + unsigned long nr_pfns) +{ +} +#endif /* CONFIG_ZONE_DEVICE */ #if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) static inline bool is_device_private_page(const struct page *page) @@ -173,39 +175,6 @@ static inline bool is_device_public_page(const struct page *page) } #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ -/** - * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn - * @pfn: page frame number to lookup page_map - * @pgmap: optional known pgmap that already has a reference - * - * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the - * same mapping. - */ -static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, - struct dev_pagemap *pgmap) -{ - const struct resource *res = pgmap ? pgmap->res : NULL; - resource_size_t phys = PFN_PHYS(pfn); - - /* - * In the cached case we're already holding a live reference so - * we can simply do a blind increment - */ - if (res && phys >= res->start && phys <= res->end) { - percpu_ref_get(pgmap->ref); - return pgmap; - } - - /* fall back to slow path lookup */ - rcu_read_lock(); - pgmap = find_dev_pagemap(phys); - if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) - pgmap = NULL; - rcu_read_unlock(); - - return pgmap; -} - static inline void put_dev_pagemap(struct dev_pagemap *pgmap) { if (pgmap) diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index a83f6498b95e..2b96e630e3b6 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h @@ -291,6 +291,9 @@ enum host_event_code { /* EC desires to change state of host-controlled USB mux */ EC_HOST_EVENT_USB_MUX = 28, + /* EC RTC event occurred */ + EC_HOST_EVENT_RTC = 26, + /* * The high bit of the event mask is not used as a host event code. If * it reads back as set, then the entire event mask should be @@ -799,6 +802,8 @@ enum ec_feature_code { EC_FEATURE_USB_MUX = 23, /* Motion Sensor code has an internal software FIFO */ EC_FEATURE_MOTION_SENSE_FIFO = 24, + /* EC has RTC feature that can be controlled by host commands */ + EC_FEATURE_RTC = 27, }; #define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) @@ -1709,6 +1714,9 @@ struct ec_response_rtc { #define EC_CMD_RTC_SET_VALUE 0x46 #define EC_CMD_RTC_SET_ALARM 0x47 +/* Pass as param to SET_ALARM to clear the current alarm */ +#define EC_RTC_ALARM_CLEAR 0 + /*****************************************************************************/ /* Port80 log access */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 409ffb14298a..e5258ee4e38b 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -79,6 +79,11 @@ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) +#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \ + BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \ + MLX5_SET(typ, p, fld[idx], v); \ +} while (0) + #define MLX5_SET_TO_ONES(typ, p, fld) do { \ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ @@ -244,6 +249,8 @@ enum { MLX5_NON_FP_BFREGS_PER_UAR, MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, + MLX5_MIN_DYN_BFREGS = 512, + MLX5_MAX_DYN_BFREGS = 1024, }; enum { @@ -284,6 +291,7 @@ enum { MLX5_EVENT_QUEUE_TYPE_QP = 0, MLX5_EVENT_QUEUE_TYPE_RQ = 1, MLX5_EVENT_QUEUE_TYPE_SQ = 2, + MLX5_EVENT_QUEUE_TYPE_DCT = 6, }; enum mlx5_event { @@ -319,6 +327,8 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, + MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, + MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, }; @@ -611,6 +621,11 @@ struct mlx5_eqe_pps { u8 rsvd2[12]; } __packed; +struct mlx5_eqe_dct { + __be32 reserved[6]; + __be32 dctn; +}; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -626,6 +641,7 @@ union ev_data { struct mlx5_eqe_vport_change vport_change; struct mlx5_eqe_port_module port_module; struct mlx5_eqe_pps pps; + struct mlx5_eqe_dct dct; } __packed; struct mlx5_eqe { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a0610427e168..6ed79a8a8318 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -155,6 +155,13 @@ enum mlx5_dcbx_oper_mode { MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, }; +enum mlx5_dct_atomic_mode { + MLX5_ATOMIC_MODE_DCT_OFF = 20, + MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, + MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, + MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, +}; + enum { MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, @@ -231,6 +238,9 @@ struct mlx5_bfreg_info { u32 ver; bool lib_uar_4k; u32 num_sys_pages; + u32 num_static_sys_pages; + u32 total_num_bfregs; + u32 num_dyn_bfregs; }; struct mlx5_cmd_first { @@ -430,6 +440,7 @@ enum mlx5_res_type { MLX5_RES_SRQ = 3, MLX5_RES_XSRQ = 4, MLX5_RES_XRQ = 5, + MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT, }; struct mlx5_core_rsc_common { @@ -788,6 +799,7 @@ struct mlx5_clock { u32 nominal_c_mult; unsigned long overflow_period; struct delayed_work overflow_work; + struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; struct mlx5_pps pps_info; @@ -826,7 +838,7 @@ struct mlx5_core_dev { struct mlx5e_resources mlx5e_res; struct { struct mlx5_rsvd_gids reserved_gids; - atomic_t roce_en; + u32 roce_en; } roce; #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; @@ -835,6 +847,8 @@ struct mlx5_core_dev { struct cpu_rmap *rmap; #endif struct mlx5_clock clock; + struct mlx5_ib_clock_info *clock_info; + struct page *clock_info_page; }; struct mlx5_db { @@ -1103,7 +1117,7 @@ void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, - const u8 *mac, bool vlan, u16 vlan_id); + const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); static inline int fw_initializing(struct mlx5_core_dev *dev) { @@ -1225,6 +1239,31 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) return !!(dev->priv.rl_table.max_size); } +static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && + MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; +} + +static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev) +{ + return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; +} + +static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev) +{ + return mlx5_core_is_mp_slave(dev) || + mlx5_core_is_mp_master(dev); +} + +static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) +{ + if (!mlx5_core_mp_enabled(dev)) + return 1; + + return MLX5_CAP_GEN(dev, native_port_num); +} + enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; @@ -1238,7 +1277,7 @@ mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) int eqn; int err; - err = mlx5_vector2eqn(dev, vector, &eqn, &irq); + err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); if (err) return NULL; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b25e7baa273e..a0b48afcb422 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -95,6 +95,10 @@ struct mlx5_flow_destination { struct mlx5_flow_namespace * mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); +struct mlx5_flow_namespace * +mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type, + int vport); struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 1391a82da98e..f4e417686f62 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -502,7 +502,7 @@ struct mlx5_ifc_ads_bits { u8 dei_cfi[0x1]; u8 eth_prio[0x3]; u8 sl[0x4]; - u8 port[0x8]; + u8 vhca_port_num[0x8]; u8 rmac_47_32[0x10]; u8 rmac_31_0[0x20]; @@ -794,7 +794,10 @@ enum { }; struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x80]; + u8 reserved_at_0[0x30]; + u8 vhca_id[0x10]; + + u8 reserved_at_40[0x40]; u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; @@ -1023,13 +1026,21 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_3b8[0x3]; u8 log_min_stride_sz_sq[0x5]; - u8 reserved_at_3c0[0x1b]; + u8 hairpin[0x1]; + u8 reserved_at_3c1[0x2]; + u8 log_max_hairpin_queues[0x5]; + u8 reserved_at_3c8[0x3]; + u8 log_max_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3d0[0x3]; + u8 log_max_hairpin_num_packets[0x5]; + u8 reserved_at_3d8[0x3]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; u8 disable_local_lb_uc[0x1]; u8 disable_local_lb_mc[0x1]; - u8 reserved_at_3e3[0x8]; + u8 log_min_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3e8[0x3]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; @@ -1067,7 +1078,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_5f8[0x3]; u8 log_max_xrq[0x5]; - u8 reserved_at_600[0x200]; + u8 affiliate_nic_vport_criteria[0x8]; + u8 native_port_num[0x8]; + u8 num_vhca_ports[0x8]; + u8 reserved_at_618[0x6]; + u8 sw_owner_id[0x1]; + u8 reserved_at_61f[0x1e1]; }; enum mlx5_flow_destination_type { @@ -1163,7 +1179,12 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_at_120[0x15]; + u8 reserved_at_120[0x3]; + u8 log_hairpin_num_packets[0x5]; + u8 reserved_at_128[0x3]; + u8 log_hairpin_data_sz[0x5]; + u8 reserved_at_130[0x5]; + u8 log_wqe_num_of_strides[0x3]; u8 two_byte_shift_en[0x1]; u8 reserved_at_139[0x4]; @@ -2483,7 +2504,8 @@ struct mlx5_ifc_sqc_bits { u8 state[0x4]; u8 reg_umr[0x1]; u8 allow_swp[0x1]; - u8 reserved_at_e[0x12]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2491,7 +2513,13 @@ struct mlx5_ifc_sqc_bits { u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_at_60[0x90]; + u8 reserved_at_60[0x8]; + u8 hairpin_peer_rq[0x18]; + + u8 reserved_at_80[0x10]; + u8 hairpin_peer_vhca[0x10]; + + u8 reserved_at_a0[0x50]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; @@ -2563,7 +2591,8 @@ struct mlx5_ifc_rqc_bits { u8 state[0x4]; u8 reserved_at_c[0x1]; u8 flush_in_error_en[0x1]; - u8 reserved_at_e[0x12]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2577,7 +2606,13 @@ struct mlx5_ifc_rqc_bits { u8 reserved_at_80[0x8]; u8 rmpn[0x18]; - u8 reserved_at_a0[0xe0]; + u8 reserved_at_a0[0x8]; + u8 hairpin_peer_sq[0x18]; + + u8 reserved_at_c0[0x10]; + u8 hairpin_peer_vhca[0x10]; + + u8 reserved_at_e0[0xa0]; struct mlx5_ifc_wq_bits wq; }; @@ -2616,7 +2651,12 @@ struct mlx5_ifc_nic_vport_context_bits { u8 event_on_mc_address_change[0x1]; u8 event_on_uc_address_change[0x1]; - u8 reserved_at_40[0xf0]; + u8 reserved_at_40[0xc]; + + u8 affiliation_criteria[0x4]; + u8 affiliated_vhca_id[0x10]; + + u8 reserved_at_60[0xd0]; u8 mtu[0x10]; @@ -3259,7 +3299,8 @@ struct mlx5_ifc_set_roce_address_in_bits { u8 op_mod[0x10]; u8 roce_address_index[0x10]; - u8 reserved_at_50[0x10]; + u8 reserved_at_50[0xc]; + u8 vhca_port_num[0x4]; u8 reserved_at_60[0x20]; @@ -3879,7 +3920,8 @@ struct mlx5_ifc_query_roce_address_in_bits { u8 op_mod[0x10]; u8 roce_address_index[0x10]; - u8 reserved_at_50[0x10]; + u8 reserved_at_50[0xc]; + u8 vhca_port_num[0x4]; u8 reserved_at_60[0x20]; }; @@ -5311,7 +5353,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { }; struct mlx5_ifc_modify_nic_vport_field_select_bits { - u8 reserved_at_0[0x14]; + u8 reserved_at_0[0x12]; + u8 affiliation[0x1]; + u8 reserved_at_e[0x1]; u8 disable_uc_local_lb[0x1]; u8 disable_mc_local_lb[0x1]; u8 node_guid[0x1]; @@ -5532,6 +5576,7 @@ struct mlx5_ifc_init_hca_in_bits { u8 op_mod[0x10]; u8 reserved_at_40[0x40]; + u8 sw_owner_id[4][0x20]; }; struct mlx5_ifc_init2rtr_qp_out_bits { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 62af7512dabb..4778d41085d4 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -473,6 +473,11 @@ struct mlx5_core_qp { int pid; }; +struct mlx5_core_dct { + struct mlx5_core_qp mqp; + struct completion drained; +}; + struct mlx5_qp_path { u8 fl_free_ar; u8 rsvd3; @@ -549,6 +554,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, return radix_tree_lookup(&dev->priv.mkey_table.tree, key); } +int mlx5_core_create_dct(struct mlx5_core_dev *dev, + struct mlx5_core_dct *qp, + u32 *in, int inlen); int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, u32 *in, @@ -558,8 +566,12 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, struct mlx5_core_qp *qp); int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); +int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, + struct mlx5_core_dct *dct); int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, u32 *out, int outlen); +int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, + u32 *out, int outlen); int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, u32 timeout_usec); diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 88441f5ece25..7e8f281f8c00 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -75,4 +75,27 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen); void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); +struct mlx5_hairpin_params { + u8 log_data_size; + u8 log_num_packets; + u16 q_counter; + int num_channels; +}; + +struct mlx5_hairpin { + struct mlx5_core_dev *func_mdev; + struct mlx5_core_dev *peer_mdev; + + int num_channels; + + u32 *rqn; + u32 *sqn; +}; + +struct mlx5_hairpin * +mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, + struct mlx5_core_dev *peer_mdev, + struct mlx5_hairpin_params *params); + +void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair); #endif /* __TRANSOBJ_H__ */ diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index aaa0bb9e7655..64e193e87394 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -116,4 +116,8 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, struct mlx5_hca_vport_context *req); int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable); int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); + +int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, + struct mlx5_core_dev *port_mdev); +int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev); #endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index ea818ff739cd..ad06d42adb1a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1312,8 +1312,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); -void unmap_mapping_range(struct address_space *mapping, - loff_t const holebegin, loff_t const holelen, int even_cows); int follow_pte_pmd(struct mm_struct *mm, unsigned long address, unsigned long *start, unsigned long *end, pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); @@ -1324,12 +1322,6 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address, int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); -static inline void unmap_shared_mapping_range(struct address_space *mapping, - loff_t const holebegin, loff_t const holelen) -{ - unmap_mapping_range(mapping, holebegin, holelen, 0); -} - extern void truncate_pagecache(struct inode *inode, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); @@ -1344,6 +1336,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); +void unmap_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t nr, bool even_cows); +void unmap_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen, int even_cows); #else static inline int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) @@ -1360,10 +1356,20 @@ static inline int fixup_user_fault(struct task_struct *tsk, BUG(); return -EFAULT; } +static inline void unmap_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t nr, bool even_cows) { } +static inline void unmap_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen, int even_cows) { } #endif -extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, - unsigned int gup_flags); +static inline void unmap_shared_mapping_range(struct address_space *mapping, + loff_t const holebegin, loff_t const holelen) +{ + unmap_mapping_range(mapping, holebegin, holelen, 0); +} + +extern int access_process_vm(struct task_struct *tsk, unsigned long addr, + void *buf, int len, unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, @@ -2069,8 +2075,8 @@ static inline void zero_resv_unavail(void) {} #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_zone(unsigned long, int, unsigned long, - unsigned long, enum memmap_context); +extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, + enum memmap_context, struct vmem_altmap *); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); @@ -2538,7 +2544,8 @@ void sparse_mem_maps_populate_node(struct page **map_map, unsigned long map_count, int nodeid); -struct page *sparse_mem_map_populate(unsigned long pnum, int nid); +struct page *sparse_mem_map_populate(unsigned long pnum, int nid, + struct vmem_altmap *altmap); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); @@ -2546,20 +2553,17 @@ pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; -void *__vmemmap_alloc_block_buf(unsigned long size, int node, - struct vmem_altmap *altmap); -static inline void *vmemmap_alloc_block_buf(unsigned long size, int node) -{ - return __vmemmap_alloc_block_buf(size, node, NULL); -} - +void *vmemmap_alloc_block_buf(unsigned long size, int node); +void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, int node); -int vmemmap_populate(unsigned long start, unsigned long end, int node); +int vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap); void vmemmap_populate_print_last(void); #ifdef CONFIG_MEMORY_HOTPLUG -void vmemmap_free(unsigned long start, unsigned long end); +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap); #endif void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, unsigned long nr_pages); @@ -2570,8 +2574,8 @@ enum mf_flags { MF_MUST_KILL = 1 << 2, MF_SOFT_OFFLINE = 1 << 3, }; -extern int memory_failure(unsigned long pfn, int trapno, int flags); -extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); +extern int memory_failure(unsigned long pfn, int flags); +extern void memory_failure_queue(unsigned long pfn, int flags); extern int unpoison_memory(unsigned long pfn); extern int get_hwpoison_page(struct page *page); #define put_hwpoison_page(page) put_page(page) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cfd0ac4e5e0e..fd1af6b9591d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -31,28 +31,56 @@ struct hmm; * it to keep track of whatever it is we are using the page for at the * moment. Note that we have no way to track which tasks are using * a page, though if it is a pagecache page, rmap structures can tell us - * who is mapping it. + * who is mapping it. If you allocate the page using alloc_pages(), you + * can use some of the space in struct page for your own purposes. * - * The objects in struct page are organized in double word blocks in - * order to allows us to use atomic double word operations on portions - * of struct page. That is currently only used by slub but the arrangement - * allows the use of atomic double word operations on the flags/mapping - * and lru list pointers also. + * Pages that were once in the page cache may be found under the RCU lock + * even after they have been recycled to a different purpose. The page + * cache reads and writes some of the fields in struct page to pin the + * page before checking that it's still in the page cache. It is vital + * that all users of struct page: + * 1. Use the first word as PageFlags. + * 2. Clear or preserve bit 0 of page->compound_head. It is used as + * PageTail for compound pages, and the page cache must not see false + * positives. Some users put a pointer here (guaranteed to be at least + * 4-byte aligned), other users avoid using the field altogether. + * 3. page->_refcount must either not be used, or must be used in such a + * way that other CPUs temporarily incrementing and then decrementing the + * refcount does not cause problems. On receiving the page from + * alloc_pages(), the refcount will be positive. + * 4. Either preserve page->_mapcount or restore it to -1 before freeing it. + * + * If you allocate pages of order > 0, you can use the fields in the struct + * page associated with each page, but bear in mind that the pages may have + * been inserted individually into the page cache, so you must use the above + * four fields in a compatible way for each struct page. + * + * SLUB uses cmpxchg_double() to atomically update its freelist and + * counters. That requires that freelist & counters be adjacent and + * double-word aligned. We align all struct pages to double-word + * boundaries, and ensure that 'freelist' is aligned within the + * struct. */ +#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE +#define _struct_page_alignment __aligned(2 * sizeof(unsigned long)) +#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) +#define _slub_counter_t unsigned long +#else +#define _slub_counter_t unsigned int +#endif +#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */ +#define _struct_page_alignment +#define _slub_counter_t unsigned int +#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */ + struct page { /* First double word block */ unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ union { - struct address_space *mapping; /* If low bit clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, low bit is set, and - * it points to anon_vma object - * or KSM private structure. See - * PAGE_MAPPING_ANON and - * PAGE_MAPPING_KSM. - */ + /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */ + struct address_space *mapping; + void *s_mem; /* slab first object */ atomic_t compound_mapcount; /* first tail page */ /* page_deferred_list().next -- second tail page */ @@ -66,40 +94,27 @@ struct page { }; union { -#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ - defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) - /* Used for cmpxchg_double in slub */ - unsigned long counters; -#else - /* - * Keep _refcount separate from slub cmpxchg_double data. - * As the rest of the double word is protected by slab_lock - * but _refcount is not. - */ - unsigned counters; -#endif - struct { + _slub_counter_t counters; + unsigned int active; /* SLAB */ + struct { /* SLUB */ + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + int units; /* SLOB */ + + struct { /* Page cache */ + /* + * Count of ptes mapped in mms, to show when + * page is mapped & limit reverse map searches. + * + * Extra information about page type may be + * stored here for pages that are never mapped, + * in which case the value MUST BE <= -2. + * See page-flags.h for more details. + */ + atomic_t _mapcount; - union { - /* - * Count of ptes mapped in mms, to show when - * page is mapped & limit reverse map searches. - * - * Extra information about page type may be - * stored here for pages that are never mapped, - * in which case the value MUST BE <= -2. - * See page-flags.h for more details. - */ - atomic_t _mapcount; - - unsigned int active; /* SLAB */ - struct { /* SLUB */ - unsigned inuse:16; - unsigned objects:15; - unsigned frozen:1; - }; - int units; /* SLOB */ - }; /* * Usage count, *USE WRAPPER FUNCTION* when manual * accounting. See page_ref.h @@ -109,8 +124,6 @@ struct page { }; /* - * Third double word block - * * WARNING: bit 0 of the first word encode PageTail(). That means * the rest users of the storage space MUST NOT use the bit to * avoid collision and false-positive PageTail(). @@ -145,19 +158,9 @@ struct page { unsigned long compound_head; /* If bit zero is set */ /* First tail page only */ -#ifdef CONFIG_64BIT - /* - * On 64 bit system we have enough space in struct page - * to encode compound_dtor and compound_order with - * unsigned int. It can help compiler generate better or - * smaller code on some archtectures. - */ - unsigned int compound_dtor; - unsigned int compound_order; -#else - unsigned short int compound_dtor; - unsigned short int compound_order; -#endif + unsigned char compound_dtor; + unsigned char compound_order; + /* two/six bytes available here */ }; #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS @@ -171,15 +174,14 @@ struct page { #endif }; - /* Remainder is not double word aligned */ union { - unsigned long private; /* Mapping-private opaque data: - * usually used for buffer_heads - * if PagePrivate set; used for - * swp_entry_t if PageSwapCache; - * indicates order in the buddy - * system if PG_buddy is set. - */ + /* + * Mapping-private opaque data: + * Usually used for buffer_heads if PagePrivate + * Used for swp_entry_t if PageSwapCache + * Indicates order in the buddy system if PageBuddy + */ + unsigned long private; #if USE_SPLIT_PTE_PTLOCKS #if ALLOC_SPLIT_PTLOCKS spinlock_t *ptl; @@ -212,15 +214,7 @@ struct page { #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS int _last_cpupid; #endif -} -/* - * The struct page can be forced to be double word aligned so that atomic ops - * on double words work. The SLUB allocator can make use of such a feature. - */ -#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE - __aligned(2 * sizeof(unsigned long)) -#endif -; +} _struct_page_alignment; #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index b25dc9db19fc..2d07a1ed5a31 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -2,6 +2,7 @@ #ifndef _LINUX_MMU_NOTIFIER_H #define _LINUX_MMU_NOTIFIER_H +#include <linux/types.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mm_types.h> @@ -10,6 +11,9 @@ struct mmu_notifier; struct mmu_notifier_ops; +/* mmu_notifier_ops flags */ +#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01) + #ifdef CONFIG_MMU_NOTIFIER /* @@ -27,6 +31,15 @@ struct mmu_notifier_mm { struct mmu_notifier_ops { /* + * Flags to specify behavior of callbacks for this MMU notifier. + * Used to determine which context an operation may be called. + * + * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not + * block + */ + int flags; + + /* * Called either by mmu_notifier_unregister or when the mm is * being destroyed by exit_mmap, always before all pages are * freed. This can run concurrently with other mmu notifier @@ -137,6 +150,10 @@ struct mmu_notifier_ops { * page. Pages will no longer be referenced by the linux * address space but may still be referenced by sptes until * the last refcount is dropped. + * + * If both of these callbacks cannot block, and invalidate_range + * cannot block, mmu_notifier_ops.flags should have + * MMU_INVALIDATE_DOES_NOT_BLOCK set. */ void (*invalidate_range_start)(struct mmu_notifier *mn, struct mm_struct *mm, @@ -159,12 +176,13 @@ struct mmu_notifier_ops { * external TLB range needs to be flushed. For more in depth * discussion on this see Documentation/vm/mmu_notifier.txt * - * The invalidate_range() function is called under the ptl - * spin-lock and not allowed to sleep. - * * Note that this function might be called with just a sub-range * of what was passed to invalidate_range_start()/end(), if * called between those functions. + * + * If this callback cannot block, and invalidate_range_{start,end} + * cannot block, mmu_notifier_ops.flags should have + * MMU_INVALIDATE_DOES_NOT_BLOCK set. */ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); @@ -218,6 +236,7 @@ extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, bool only_end); extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end); +extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm); static inline void mmu_notifier_release(struct mm_struct *mm) { @@ -457,6 +476,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, { } +static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm) +{ + return false; +} + static inline void mmu_notifier_mm_init(struct mm_struct *mm) { } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 67f2e3c38939..7522a6987595 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1166,8 +1166,16 @@ extern unsigned long usemap_size(void); /* * We use the lower bits of the mem_map pointer to store - * a little bit of information. There should be at least - * 3 bits here due to 32-bit alignment. + * a little bit of information. The pointer is calculated + * as mem_map - section_nr_to_pfn(pnum). The result is + * aligned to the minimum alignment of the two values: + * 1. All mem_map arrays are page-aligned. + * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT + * lowest bits. PFN_SECTION_SHIFT is arch-specific + * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the + * worst combination is powerpc with 256k pages, + * which results in PFN_SECTION_SHIFT equal 6. + * To sum it up, at least 6 bits are available. */ #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index abb6dc2ebbf8..48fb2b43c35a 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -229,6 +229,12 @@ struct hda_device_id { unsigned long driver_data; }; +struct sdw_device_id { + __u16 mfg_id; + __u16 part_id; + kernel_ulong_t driver_data; +}; + /* * Struct used for matching a device */ @@ -452,6 +458,19 @@ struct spi_device_id { kernel_ulong_t driver_data; /* Data private to the driver */ }; +/* SLIMbus */ + +#define SLIMBUS_NAME_SIZE 32 +#define SLIMBUS_MODULE_PREFIX "slim:" + +struct slim_device_id { + __u16 manf_id, prod_code; + __u16 dev_index, instance; + + /* Data private to the driver */ + kernel_ulong_t driver_data; +}; + #define SPMI_NAME_SIZE 32 #define SPMI_MODULE_PREFIX "spmi:" diff --git a/include/linux/module.h b/include/linux/module.h index 1d8f245967be..8dc7065d904d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -19,6 +19,7 @@ #include <linux/jump_label.h> #include <linux/export.h> #include <linux/rbtree_latch.h> +#include <linux/error-injection.h> #include <linux/percpu.h> #include <asm/module.h> @@ -475,6 +476,11 @@ struct module { ctor_fn_t *ctors; unsigned int num_ctors; #endif + +#ifdef CONFIG_FUNCTION_ERROR_INJECTION + struct error_injection_entry *ei_funcs; + unsigned int num_ei_funcs; +#endif } ____cacheline_aligned __randomize_layout; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} @@ -606,6 +612,9 @@ int ref_module(struct module *a, struct module *b); __mod ? __mod->name : "kernel"; \ }) +/* Dereference module function descriptor */ +void *dereference_module_function_descriptor(struct module *mod, void *ptr); + /* For kallsyms to ask for address resolution. namebuf should be at * least KSYM_NAME_LEN long: a pointer to namebuf is returned if * found, otherwise NULL. */ @@ -760,6 +769,13 @@ static inline bool is_module_sig_enforced(void) return false; } +/* Dereference module function descriptor */ +static inline +void *dereference_module_function_descriptor(struct module *mod, void *ptr) +{ + return ptr; +} + #endif /* CONFIG_MODULES */ #ifdef CONFIG_SYSFS diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h index ea96d4c82be7..5fc6bb2fefad 100644 --- a/include/linux/mux/consumer.h +++ b/include/linux/mux/consumer.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * mux/consumer.h - definitions for the multiplexer consumer interface * * Copyright (C) 2017 Axentia Technologies AB * * Author: Peter Rosin <peda@axentia.se> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _LINUX_MUX_CONSUMER_H diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h index 35c3579c3304..627a2c6bc02d 100644 --- a/include/linux/mux/driver.h +++ b/include/linux/mux/driver.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * mux/driver.h - definitions for the multiplexer driver interface * * Copyright (C) 2017 Axentia Technologies AB * * Author: Peter Rosin <peda@axentia.se> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _LINUX_MUX_DRIVER_H diff --git a/include/linux/net.h b/include/linux/net.h index caeb159abda5..91216b16feb7 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -147,7 +147,7 @@ struct proto_ops { int (*getname) (struct socket *sock, struct sockaddr *addr, int *sockaddr_len, int peer); - unsigned int (*poll) (struct file *file, struct socket *sock, + __poll_t (*poll) (struct file *file, struct socket *sock, struct poll_table_struct *wait); int (*ioctl) (struct socket *sock, unsigned int cmd, unsigned long arg); @@ -306,7 +306,6 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags); -int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); /* Routine returns the IP overhead imposed by a (caller-protected) socket. */ diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h new file mode 100644 index 000000000000..bebeaad897cc --- /dev/null +++ b/include/linux/net_dim.h @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NET_DIM_H +#define NET_DIM_H + +#include <linux/module.h> + +struct net_dim_cq_moder { + u16 usec; + u16 pkts; + u8 cq_period_mode; +}; + +struct net_dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; +}; + +struct net_dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ +}; + +struct net_dim { /* Adaptive Moderation */ + u8 state; + struct net_dim_stats prev_stats; + struct net_dim_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum { + NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + NET_DIM_CQ_PERIOD_NUM_MODES +}; + +/* Adaptive moderation logic */ +enum { + NET_DIM_START_MEASURE, + NET_DIM_MEASURE_IN_PROGRESS, + NET_DIM_APPLY_NEW_PROFILE, +}; + +enum { + NET_DIM_PARKING_ON_TOP, + NET_DIM_PARKING_TIRED, + NET_DIM_GOING_RIGHT, + NET_DIM_GOING_LEFT, +}; + +enum { + NET_DIM_STATS_WORSE, + NET_DIM_STATS_SAME, + NET_DIM_STATS_BETTER, +}; + +enum { + NET_DIM_STEPPED, + NET_DIM_TOO_TIRED, + NET_DIM_ON_EDGE, +}; + +#define NET_DIM_PARAMS_NUM_PROFILES 5 +/* Adaptive moderation profiles */ +#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + +/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ +#define NET_DIM_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define NET_DIM_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +static const struct net_dim_cq_moder +profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_EQE_PROFILES, + NET_DIM_CQE_PROFILES, +}; + +static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode, + int ix) +{ + struct net_dim_cq_moder cq_moder; + + cq_moder = profile[cq_period_mode][ix]; + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} + +static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode) +{ + int default_profile_ix; + + if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE) + default_profile_ix = NET_DIM_DEF_PROFILE_CQE; + else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */ + default_profile_ix = NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_profile(rx_cq_period_mode, default_profile_ix); +} + +static inline bool net_dim_on_top(struct net_dim *dim) +{ + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + return true; + case NET_DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: /* NET_DIM_GOING_LEFT */ + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} + +static inline void net_dim_turn(struct net_dim *dim) +{ + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + break; + case NET_DIM_GOING_RIGHT: + dim->tune_state = NET_DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case NET_DIM_GOING_LEFT: + dim->tune_state = NET_DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} + +static inline int net_dim_step(struct net_dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return NET_DIM_TOO_TIRED; + + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + break; + case NET_DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return NET_DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case NET_DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return NET_DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return NET_DIM_STEPPED; +} + +static inline void net_dim_park_on_top(struct net_dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = NET_DIM_PARKING_ON_TOP; +} + +static inline void net_dim_park_tired(struct net_dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = NET_DIM_PARKING_TIRED; +} + +static inline void net_dim_exit_parking(struct net_dim *dim) +{ + dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT : + NET_DIM_GOING_RIGHT; + net_dim_step(dim); +} + +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + +static inline int net_dim_stats_compare(struct net_dim_stats *curr, + struct net_dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + if (!prev->ppms) + return curr->ppms ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + if (!prev->epms) + return NET_DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + return NET_DIM_STATS_SAME; +} + +static inline bool net_dim_decision(struct net_dim_stats *curr_stats, + struct net_dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != NET_DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case NET_DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case NET_DIM_GOING_RIGHT: + case NET_DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != NET_DIM_STATS_BETTER) + net_dim_turn(dim); + + if (net_dim_on_top(dim)) { + net_dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case NET_DIM_ON_EDGE: + net_dim_park_on_top(dim); + break; + case NET_DIM_TOO_TIRED: + net_dim_park_tired(dim); + break; + } + + break; + } + + if ((prev_state != NET_DIM_PARKING_ON_TOP) || + (dim->tune_state != NET_DIM_PARKING_ON_TOP)) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +static inline void net_dim_sample(u16 event_ctr, + u64 packets, + u64 bytes, + struct net_dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +#define NET_DIM_NEVENTS 64 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) + +static inline void net_dim_calc_stats(struct net_dim_sample *start, + struct net_dim_sample *end, + struct net_dim_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC, + delta_us); +} + +static inline void net_dim(struct net_dim *dim, + struct net_dim_sample end_sample) +{ + struct net_dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case NET_DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, + dim->start_sample.event_ctr); + if (nevents < NET_DIM_NEVENTS) + break; + net_dim_calc_stats(&dim->start_sample, &end_sample, + &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = NET_DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + /* fall through */ + case NET_DIM_START_MEASURE: + dim->state = NET_DIM_MEASURE_IN_PROGRESS; + break; + case NET_DIM_APPLY_NEW_PROFILE: + break; + } +} + +#endif /* NET_DIM_H */ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index b1b0ca7ccb2b..db84c516bcfb 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -78,6 +78,8 @@ enum { NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ + NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ + /* * Add your fresh new feature above and remember to update * netdev_features_strings[] in net/core/ethtool.c and maybe @@ -97,6 +99,7 @@ enum { #define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) #define NETIF_F_FSO __NETIF_F(FSO) #define NETIF_F_GRO __NETIF_F(GRO) +#define NETIF_F_GRO_HW __NETIF_F(GRO_HW) #define NETIF_F_GSO __NETIF_F(GSO) #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ef789e1d679e..4c77f39ebd65 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -44,6 +44,7 @@ #include <net/dcbnl.h> #endif #include <net/netprio_cgroup.h> +#include <net/xdp.h> #include <linux/netdev_features.h> #include <linux/neighbour.h> @@ -686,6 +687,7 @@ struct netdev_rx_queue { #endif struct kobject kobj; struct net_device *dev; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; /* @@ -778,6 +780,7 @@ enum tc_setup_type { TC_SETUP_BLOCK, TC_SETUP_QDISC_CBS, TC_SETUP_QDISC_RED, + TC_SETUP_QDISC_PRIO, }; /* These structures hold the attributes of bpf state that are being passed @@ -802,9 +805,11 @@ enum bpf_netdev_command { BPF_OFFLOAD_VERIFIER_PREP, BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY, + BPF_OFFLOAD_MAP_ALLOC, + BPF_OFFLOAD_MAP_FREE, }; -struct bpf_ext_analyzer_ops; +struct bpf_prog_offload_ops; struct netlink_ext_ack; struct netdev_bpf { @@ -820,16 +825,22 @@ struct netdev_bpf { struct { u8 prog_attached; u32 prog_id; + /* flags with which program was installed */ + u32 prog_flags; }; /* BPF_OFFLOAD_VERIFIER_PREP */ struct { struct bpf_prog *prog; - const struct bpf_ext_analyzer_ops *ops; /* callee set */ + const struct bpf_prog_offload_ops *ops; /* callee set */ } verifier; /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ struct { struct bpf_prog *prog; } offload; + /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ + struct { + struct bpf_offloaded_map *offmap; + }; }; }; @@ -840,6 +851,7 @@ struct xfrmdev_ops { void (*xdo_dev_state_free) (struct xfrm_state *x); bool (*xdo_dev_offload_ok) (struct sk_buff *skb, struct xfrm_state *x); + void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); }; #endif @@ -1458,8 +1470,6 @@ enum netdev_priv_flags { * @base_addr: Device I/O address * @irq: Device IRQ number * - * @carrier_changes: Stats to monitor carrier on<->off transitions - * * @state: Generic network queuing layer state, see netdev_state_t * @dev_list: The global list of network devices * @napi_list: List entry used for polling NAPI devices @@ -1495,6 +1505,8 @@ enum netdev_priv_flags { * do not use this in drivers * @rx_nohandler: nohandler dropped packets by core network on * inactive devices, do not use this in drivers + * @carrier_up_count: Number of times the carrier has been up + * @carrier_down_count: Number of times the carrier has been down * * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, @@ -1669,8 +1681,6 @@ struct net_device { unsigned long base_addr; int irq; - atomic_t carrier_changes; - /* * Some hardware also needs these fields (state,dev_list, * napi_list,unreg_list,close_list) but they are not @@ -1708,6 +1718,10 @@ struct net_device { atomic_long_t tx_dropped; atomic_long_t rx_nohandler; + /* Stats to monitor link on/off, flapping */ + atomic_t carrier_up_count; + atomic_t carrier_down_count; + #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def *wireless_handlers; struct iw_public_data *wireless_data; @@ -1724,7 +1738,7 @@ struct net_device { const struct ndisc_ops *ndisc_ops; #endif -#ifdef CONFIG_XFRM +#ifdef CONFIG_XFRM_OFFLOAD const struct xfrmdev_ops *xfrmdev_ops; #endif @@ -1801,12 +1815,9 @@ struct net_device { /* Interface address info used in eth_type_trans() */ unsigned char *dev_addr; -#ifdef CONFIG_SYSFS struct netdev_rx_queue *_rx; - unsigned int num_rx_queues; unsigned int real_num_rx_queues; -#endif struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; @@ -2751,7 +2762,8 @@ static inline bool dev_validate_header(const struct net_device *dev, return false; } -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); +typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, + int len, int size); int register_gifconf(unsigned int family, gifconf_func_t *gifconf); static inline int unregister_gifconf(unsigned int family) { @@ -2791,7 +2803,9 @@ struct softnet_data { struct Qdisc *output_queue; struct Qdisc **output_queue_tailp; struct sk_buff *completion_queue; - +#ifdef CONFIG_XFRM_OFFLOAD + struct sk_buff_head xfrm_backlog; +#endif #ifdef CONFIG_RPS /* input_queue_head should be written by cpu owning this struct, * and only read by other cpus. Worth using a cache line. @@ -3302,7 +3316,9 @@ int netdev_rx_handler_register(struct net_device *dev, void netdev_rx_handler_unregister(struct net_device *dev); bool dev_valid_name(const char *name); -int dev_ioctl(struct net *net, unsigned int cmd, void __user *); +int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, + bool *need_copyout); +int dev_ifconf(struct net *net, struct ifconf *, int); int dev_ethtool(struct net *net, struct ifreq *); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *, unsigned int flags); @@ -3315,6 +3331,7 @@ int dev_get_alias(const struct net_device *, char *, size_t); int dev_change_net_namespace(struct net_device *, struct net *, const char *); int __dev_set_mtu(struct net_device *, int); int dev_set_mtu(struct net_device *, int); +int dev_change_tx_queue_len(struct net_device *, unsigned long); void dev_set_group(struct net_device *, int); int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); @@ -3323,14 +3340,15 @@ int dev_get_phys_port_id(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); -struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); -u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id); +void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, + struct netdev_bpf *xdp); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); @@ -4399,11 +4417,11 @@ do { \ * file/line information and a backtrace. */ #define netdev_WARN(dev, format, args...) \ - WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ + WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ netdev_reg_state(dev), ##args) -#define netdev_WARN_ONCE(dev, condition, format, arg...) \ - WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev) \ +#define netdev_WARN_ONCE(dev, format, args...) \ + WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ netdev_reg_state(dev), ##args) /* netif printk helpers, similar to netdev_printk */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index b24e9b101651..85a1a0b32c66 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -67,6 +67,7 @@ struct nf_hook_ops { struct net_device *dev; void *priv; u_int8_t pf; + bool nat_hook; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; @@ -77,17 +78,28 @@ struct nf_hook_entry { void *priv; }; +struct nf_hook_entries_rcu_head { + struct rcu_head head; + void *allocation; +}; + struct nf_hook_entries { u16 num_hook_entries; /* padding */ struct nf_hook_entry hooks[]; - /* trailer: pointers to original orig_ops of each hook. - * - * This is not part of struct nf_hook_entry since its only - * needed in slow path (hook register/unregister). + /* trailer: pointers to original orig_ops of each hook, + * followed by rcu_head and scratch space used for freeing + * the structure via call_rcu. * + * This is not part of struct nf_hook_entry since its only + * needed in slow path (hook register/unregister): * const struct nf_hook_ops *orig_ops[] + * + * For the same reason, we store this at end -- its + * only needed when a hook is deleted, not during + * packet path processing: + * struct nf_hook_entries_rcu_head head */ }; @@ -184,7 +196,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - struct nf_hook_entries *hook_head; + struct nf_hook_entries *hook_head = NULL; int ret = 1; #ifdef HAVE_JUMP_LABEL @@ -195,7 +207,33 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, #endif rcu_read_lock(); - hook_head = rcu_dereference(net->nf.hooks[pf][hook]); + switch (pf) { + case NFPROTO_IPV4: + hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); + break; + case NFPROTO_IPV6: + hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); + break; + case NFPROTO_ARP: +#ifdef CONFIG_NETFILTER_FAMILY_ARP + hook_head = rcu_dereference(net->nf.hooks_arp[hook]); +#endif + break; + case NFPROTO_BRIDGE: +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE + hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); +#endif + break; +#if IS_ENABLED(CONFIG_DECNET) + case NFPROTO_DECNET: + hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); + break; +#endif + default: + WARN_ON_ONCE(1); + break; + } + if (hook_head) { struct nf_hook_state state; @@ -271,64 +309,16 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); struct flowi; struct nf_queue_entry; -struct nf_afinfo { - unsigned short family; - __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol); - __sum16 (*checksum_partial)(struct sk_buff *skb, - unsigned int hook, - unsigned int dataoff, - unsigned int len, - u_int8_t protocol); - int (*route)(struct net *net, struct dst_entry **dst, - struct flowi *fl, bool strict); - void (*saveroute)(const struct sk_buff *skb, - struct nf_queue_entry *entry); - int (*reroute)(struct net *net, struct sk_buff *skb, - const struct nf_queue_entry *entry); - int route_key_size; -}; - -extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; -static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) -{ - return rcu_dereference(nf_afinfo[family]); -} - -static inline __sum16 -nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, - u_int8_t protocol, unsigned short family) -{ - const struct nf_afinfo *afinfo; - __sum16 csum = 0; - - rcu_read_lock(); - afinfo = nf_get_afinfo(family); - if (afinfo) - csum = afinfo->checksum(skb, hook, dataoff, protocol); - rcu_read_unlock(); - return csum; -} - -static inline __sum16 -nf_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol, unsigned short family) -{ - const struct nf_afinfo *afinfo; - __sum16 csum = 0; - - rcu_read_lock(); - afinfo = nf_get_afinfo(family); - if (afinfo) - csum = afinfo->checksum_partial(skb, hook, dataoff, len, - protocol); - rcu_read_unlock(); - return csum; -} +__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol, + unsigned short family); -int nf_register_afinfo(const struct nf_afinfo *afinfo); -void nf_unregister_afinfo(const struct nf_afinfo *afinfo); +__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol, unsigned short family); +int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict, unsigned short family); +int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); #include <net/flow.h> extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 8e42253e5d4d..34fc80f3eb90 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -122,6 +122,8 @@ struct ip_set_ext { u64 bytes; char *comment; u32 timeout; + u8 packets_op; + u8 bytes_op; }; struct ip_set; @@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext); extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, const void *e, bool active); +extern bool ip_set_match_extensions(struct ip_set *set, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, + u32 flags, void *data); static inline int ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h index bb6fba480118..3d33a2c3f39f 100644 --- a/include/linux/netfilter/ipset/ip_set_counter.h +++ b/include/linux/netfilter/ipset/ip_set_counter.h @@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter) return (u64)atomic64_read(&(counter)->packets); } +static inline bool +ip_set_match_counter(u64 counter, u64 match, u8 op) +{ + switch (op) { + case IPSET_COUNTER_NONE: + return true; + case IPSET_COUNTER_EQ: + return counter == match; + case IPSET_COUNTER_NE: + return counter != match; + case IPSET_COUNTER_LT: + return counter < match; + case IPSET_COUNTER_GT: + return counter > match; + } + return false; +} + static inline void ip_set_update_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) + const struct ip_set_ext *ext, u32 flags) { if (ext->packets != ULLONG_MAX && !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { ip_set_add_bytes(ext->bytes, counter); ip_set_add_packets(ext->packets, counter); } - if (flags & IPSET_FLAG_MATCH_COUNTERS) { - mext->packets = ip_set_get_packets(counter); - mext->bytes = ip_set_get_bytes(counter); - } } static inline bool diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 33f7530f96b9..1313b35c3ab7 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -320,6 +320,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target, struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name); +struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, + const char *name); void xt_table_unlock(struct xt_table *t); int xt_proto_init(struct net *net, u_int8_t af); diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h index dc6111adea06..8dddfb151f00 100644 --- a/include/linux/netfilter_defs.h +++ b/include/linux/netfilter_defs.h @@ -4,7 +4,17 @@ #include <uapi/linux/netfilter.h> +/* in/out/forward only */ +#define NF_ARP_NUMHOOKS 3 + +/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */ +#define NF_DN_NUMHOOKS 7 + +#if IS_ENABLED(CONFIG_DECNET) /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ -#define NF_MAX_HOOKS 8 +#define NF_MAX_HOOKS NF_DN_NUMHOOKS +#else +#define NF_MAX_HOOKS NF_INET_NUMHOOKS +#endif #endif diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 98c03b2462b5..b31dabfdb453 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -6,7 +6,53 @@ #include <uapi/linux/netfilter_ipv4.h> +/* Extra routing may needed on local out, as the QUEUE target never returns + * control to the table. + */ +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); + +struct nf_queue_entry; + +#ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); +__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol); +int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); +int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); +#else +static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol) +{ + return 0; +} +static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb, + unsigned int hook, + unsigned int dataoff, + unsigned int len, + u_int8_t protocol) +{ + return 0; +} +static inline int nf_ip_route(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict) +{ + return -EOPNOTSUPP; +} +static inline int nf_ip_reroute(struct sk_buff *skb, + const struct nf_queue_entry *entry) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_INET */ + #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 47c6b04c28c0..288c597e75b3 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -9,6 +9,17 @@ #include <uapi/linux/netfilter_ipv6.h> +/* Extra routing may needed on local out, as the QUEUE target never returns + * control to the table. + */ +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_queue_entry; + /* * Hook functions for ipv6 to allow xt_* modules to be built-in even * if IPv6 is a module. @@ -19,6 +30,14 @@ struct nf_ipv6_ops { void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); + __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); + __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol); + int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); + int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); }; #ifdef CONFIG_NETFILTER diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 47adac640191..57ffaa20d564 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -457,7 +457,12 @@ enum lock_type4 { #define NFS4_DEBUG 1 -/* Index of predefined Linux client operations */ +/* + * Index of predefined Linux client operations + * + * To ensure that /proc/net/rpc/nfs remains correctly ordered, please + * append only to this enum when adding new client operations. + */ enum { NFSPROC4_CLNT_NULL = 0, /* Unused */ @@ -480,7 +485,6 @@ enum { NFSPROC4_CLNT_ACCESS, NFSPROC4_CLNT_GETATTR, NFSPROC4_CLNT_LOOKUP, - NFSPROC4_CLNT_LOOKUPP, NFSPROC4_CLNT_LOOKUP_ROOT, NFSPROC4_CLNT_REMOVE, NFSPROC4_CLNT_RENAME, @@ -500,7 +504,6 @@ enum { NFSPROC4_CLNT_SECINFO, NFSPROC4_CLNT_FSID_PRESENT, - /* nfs41 */ NFSPROC4_CLNT_EXCHANGE_ID, NFSPROC4_CLNT_CREATE_SESSION, NFSPROC4_CLNT_DESTROY_SESSION, @@ -518,13 +521,14 @@ enum { NFSPROC4_CLNT_BIND_CONN_TO_SESSION, NFSPROC4_CLNT_DESTROY_CLIENTID, - /* nfs42 */ NFSPROC4_CLNT_SEEK, NFSPROC4_CLNT_ALLOCATE, NFSPROC4_CLNT_DEALLOCATE, NFSPROC4_CLNT_LAYOUTSTATS, NFSPROC4_CLNT_CLONE, NFSPROC4_CLNT_COPY, + + NFSPROC4_CLNT_LOOKUPP, }; /* nfs41 types */ diff --git a/include/linux/nospec.h b/include/linux/nospec.h new file mode 100644 index 000000000000..b99bced39ac2 --- /dev/null +++ b/include/linux/nospec.h @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright(c) 2018 Linus Torvalds. All rights reserved. +// Copyright(c) 2018 Alexei Starovoitov. All rights reserved. +// Copyright(c) 2018 Intel Corporation. All rights reserved. + +#ifndef _LINUX_NOSPEC_H +#define _LINUX_NOSPEC_H + +/** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * When @index is out of bounds (@index >= @size), the sign bit will be + * set. Extend the sign bit to all bits and invert, giving a result of + * zero for an out of bounds index, or ~0 if within bounds [0, @size). + */ +#ifndef array_index_mask_nospec +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + /* + * Warn developers about inappropriate array_index_nospec() usage. + * + * Even if the CPU speculates past the WARN_ONCE branch, the + * sign bit of @index is taken into account when generating the + * mask. + * + * This warning is compiled out when the compiler can infer that + * @index and @size are less than LONG_MAX. + */ + if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, + "array_index_nospec() limited to range of [0, LONG_MAX]\n")) + return 0; + + /* + * Always calculate and emit the mask even if the compiler + * thinks the mask is not needed. The compiler does not take + * into account the value of @index under speculation. + */ + OPTIMIZER_HIDE_VAR(index); + return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); +} +#endif + +/* + * array_index_nospec - sanitize an array index after a bounds check + * + * For a code sequence like: + * + * if (index < size) { + * index = array_index_nospec(index, size); + * val = array[index]; + * } + * + * ...if the CPU speculates past the bounds check then + * array_index_nospec() will clamp the index within the range of [0, + * size). + */ +#define array_index_nospec(index, size) \ +({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ + unsigned long _mask = array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ + \ + _i &= _mask; \ + _i; \ +}) +#endif /* _LINUX_NOSPEC_H */ diff --git a/include/linux/ntb.h b/include/linux/ntb.h index c308964777eb..181d16601dd9 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -71,6 +71,7 @@ struct pci_dev; * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb. + * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs */ enum ntb_topo { NTB_TOPO_NONE = -1, @@ -79,6 +80,7 @@ enum ntb_topo { NTB_TOPO_B2B_USD, NTB_TOPO_B2B_DSD, NTB_TOPO_SWITCH, + NTB_TOPO_CROSSLINK, }; static inline int ntb_topo_is_b2b(enum ntb_topo topo) @@ -94,12 +96,13 @@ static inline int ntb_topo_is_b2b(enum ntb_topo topo) static inline char *ntb_topo_string(enum ntb_topo topo) { switch (topo) { - case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; - case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; - case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; - case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; - case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; - case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; + case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; + case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; + case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; + case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; + case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; + case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; + case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK"; } return "NTB_TOPO_INVALID"; } @@ -250,7 +253,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) * @msg_set_mask: See ntb_msg_set_mask(). * @msg_clear_mask: See ntb_msg_clear_mask(). * @msg_read: See ntb_msg_read(). - * @msg_write: See ntb_msg_write(). + * @peer_msg_write: See ntb_peer_msg_write(). */ struct ntb_dev_ops { int (*port_number)(struct ntb_dev *ntb); @@ -321,8 +324,8 @@ struct ntb_dev_ops { int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits); int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits); int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits); - int (*msg_read)(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg); - int (*msg_write)(struct ntb_dev *ntb, int midx, int pidx, u32 msg); + u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx); + int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg); }; static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) @@ -384,7 +387,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) /* !ops->msg_set_mask == !ops->msg_count && */ /* !ops->msg_clear_mask == !ops->msg_count && */ !ops->msg_read == !ops->msg_count && - !ops->msg_write == !ops->msg_count && + !ops->peer_msg_write == !ops->msg_count && 1; } @@ -764,7 +767,7 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, resource_size_t *size_align, resource_size_t *size_max) { - if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx))) + if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx))) return -ENOTCONN; return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, @@ -1459,31 +1462,29 @@ static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits) } /** - * ntb_msg_read() - read message register with specified index + * ntb_msg_read() - read inbound message register with specified index * @ntb: NTB device context. - * @midx: Message register index * @pidx: OUT - Port index of peer device a message retrieved from - * @msg: OUT - Data + * @midx: Message register index * * Read data from the specified message register. Source port index of a * message is retrieved as well. * - * Return: Zero on success, otherwise a negative error number. + * Return: The value of the inbound message register. */ -static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, - u32 *msg) +static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx) { if (!ntb->ops->msg_read) - return -EINVAL; + return ~(u32)0; - return ntb->ops->msg_read(ntb, midx, pidx, msg); + return ntb->ops->msg_read(ntb, pidx, midx); } /** - * ntb_msg_write() - write data to the specified message register + * ntb_peer_msg_write() - write data to the specified peer message register * @ntb: NTB device context. - * @midx: Message register index * @pidx: Port index of peer device a message being sent to + * @midx: Message register index * @msg: Data to send * * Send data to a specified peer device using the defined message register. @@ -1492,13 +1493,13 @@ static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, * * Return: Zero on success, otherwise a negative error number. */ -static inline int ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx, - u32 msg) +static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, + u32 msg) { - if (!ntb->ops->msg_write) + if (!ntb->ops->peer_msg_write) return -EINVAL; - return ntb->ops->msg_write(ntb, midx, pidx, msg); + return ntb->ops->peer_msg_write(ntb, pidx, midx, msg); } #endif diff --git a/include/linux/of.h b/include/linux/of.h index d3dea1d1e3a9..da1ee95241c1 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _LINUX_OF_H #define _LINUX_OF_H /* @@ -9,11 +10,6 @@ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. * Updates for SPARC64 by David S. Miller * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/bitops.h> @@ -544,6 +540,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur); bool of_console_check(struct device_node *dn, char *name, int index); +extern int of_cpu_node_to_id(struct device_node *np); + #else /* CONFIG_OF */ static inline void of_core_init(void) @@ -916,6 +914,11 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag { } +static inline int of_cpu_node_to_id(struct device_node *np) +{ + return -ENODEV; +} + #define of_match_ptr(_ptr) NULL #define of_match_node(_matches, _node) NULL #endif /* CONFIG_OF */ diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index b90d8ec57c1f..fd706cdf255c 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * OF helpers for DMA request / controller * * Based on of_gpio.h * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __LINUX_OF_DMA_H diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 013c5418aeec..b9cd9ebdf9b9 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Definitions for working with the Flattened Device Tree data format * * Copyright 2009 Benjamin Herrenschmidt, IBM Corp * benh@kernel.crashing.org - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. */ #ifndef _LINUX_OF_FDT_H @@ -47,6 +44,12 @@ extern void *initial_boot_params; extern char __dtb_start[]; extern char __dtb_end[]; +/* Other Prototypes */ +extern u64 of_flat_dt_translate_address(unsigned long node); +extern void of_fdt_limit_memory(int limit); +#endif /* CONFIG_OF_FLATTREE */ + +#ifdef CONFIG_OF_EARLY_FLATTREE /* For scanning the flat device-tree at boot time */ extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, int depth, void *data), @@ -77,7 +80,6 @@ extern void early_init_dt_add_memory_arch(u64 base, u64 size); extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool no_map); -extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); extern u64 dt_mem_next_cell(int s, const __be32 **cellp); /* Early flat tree scan hooks */ @@ -97,16 +99,14 @@ extern void unflatten_device_tree(void); extern void unflatten_and_copy_device_tree(void); extern void early_init_devtree(void *); extern void early_get_first_memblock_info(void *, phys_addr_t *); -extern u64 of_flat_dt_translate_address(unsigned long node); -extern void of_fdt_limit_memory(int limit); -#else /* CONFIG_OF_FLATTREE */ +#else /* CONFIG_OF_EARLY_FLATTREE */ static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; } static inline void early_init_fdt_scan_reserved_mem(void) {} static inline void early_init_fdt_reserve_self(void) {} static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } static inline void unflatten_device_tree(void) {} static inline void unflatten_and_copy_device_tree(void) {} -#endif /* CONFIG_OF_FLATTREE */ +#endif /* CONFIG_OF_EARLY_FLATTREE */ #endif /* __ASSEMBLY__ */ #endif /* _LINUX_OF_FDT_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 1fe205582111..163b79ecd01a 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * OF helpers for the GPIO API * * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <avorontsov@ru.mvista.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __LINUX_OF_GPIO_H @@ -31,7 +27,7 @@ enum of_gpio_flags { OF_GPIO_ACTIVE_LOW = 0x1, OF_GPIO_SINGLE_ENDED = 0x2, OF_GPIO_OPEN_DRAIN = 0x4, - OF_GPIO_SLEEP_MAY_LOSE_VALUE = 0x8, + OF_GPIO_TRANSITORY = 0x8, }; #ifdef CONFIG_OF_GPIO diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index 3e058f05ab04..01038a6aade0 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * OF graph binding parsing helpers * @@ -6,10 +7,6 @@ * * Copyright (C) 2012 Renesas Electronics Corp. * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. */ #ifndef __LINUX_OF_GRAPH_H #define __LINUX_OF_GRAPH_H diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index bf588a05d0d0..88865e0ebf4d 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -9,8 +9,7 @@ struct pci_dev; struct of_phandle_args; struct device_node; -#ifdef CONFIG_OF_PCI -int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); +#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI) struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn); int of_pci_get_devfn(struct device_node *np); @@ -23,11 +22,6 @@ int of_pci_map_rid(struct device_node *np, u32 rid, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); #else -static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) -{ - return 0; -} - static inline struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn) { diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index 7e09244bb679..d0b183ab65c6 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h @@ -1,13 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Definitions for building a device tree by calling into the * Open Firmware PROM. * * Copyright (C) 2010 Andres Salomon <dilinger@queued.net> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #ifndef _LINUX_OF_PDT_H diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index fb908e598348..84a966623e78 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -1,14 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _LINUX_OF_PLATFORM_H #define _LINUX_OF_PLATFORM_H /* * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * */ #include <linux/device.h> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 3ec44e27aa9d..50c2b8786831 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -46,11 +46,6 @@ * guarantees that this bit is cleared for a page when it first is entered into * the page cache. * - * PG_highmem pages are not permanently mapped into the kernel virtual address - * space, they need to be kmapped separately for doing IO on the pages. The - * struct page (these bits with information) are always mapped into kernel - * address space... - * * PG_hwpoison indicates that a page got corrupted in hardware and contains * data with incorrect ECC bits that triggered a machine check. Accessing is * not safe since it may cause another machine check. Don't touch! diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 5fb6580f7f23..6dc456ac6136 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -9,14 +9,14 @@ #ifndef _LINUX_PAGEVEC_H #define _LINUX_PAGEVEC_H -/* 14 pointers + two long's align the pagevec structure to a power of two */ -#define PAGEVEC_SIZE 14 +/* 15 pointers + header align the pagevec structure to a power of two */ +#define PAGEVEC_SIZE 15 struct page; struct address_space; struct pagevec { - unsigned long nr; + unsigned char nr; bool percpu_pvec_drained; struct page *pages[PAGEVEC_SIZE]; }; diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index 3cc06b059017..df28af5cef21 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h @@ -24,43 +24,12 @@ #define PCIE_LINK_STATE_CLKPM 4 #ifdef CONFIG_PCIEASPM -void pcie_aspm_init_link_state(struct pci_dev *pdev); -void pcie_aspm_exit_link_state(struct pci_dev *pdev); -void pcie_aspm_pm_state_change(struct pci_dev *pdev); -void pcie_aspm_powersave_config_link(struct pci_dev *pdev); void pci_disable_link_state(struct pci_dev *pdev, int state); void pci_disable_link_state_locked(struct pci_dev *pdev, int state); void pcie_no_aspm(void); #else -static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) -{ -} -static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) -{ -} -static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) -{ -} -static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) -{ -} -static inline void pci_disable_link_state(struct pci_dev *pdev, int state) -{ -} -static inline void pcie_no_aspm(void) -{ -} +static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } +static inline void pcie_no_aspm(void) { } #endif -#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ -void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev); -void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev); -#else -static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) -{ -} -static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) -{ -} -#endif #endif /* LINUX_ASPM_H */ diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h index d1f9fdade1e0..0dd1a3f7b309 100644 --- a/include/linux/pci-dma-compat.h +++ b/include/linux/pci-dma-compat.h @@ -17,91 +17,90 @@ static inline void * pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { - return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); + return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); } static inline void * pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { - return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, - size, dma_handle, GFP_ATOMIC); + return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); } static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { - dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); + dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle); } static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) { - return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); + return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction); } static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) { - dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); + dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); } static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, unsigned long offset, size_t size, int direction) { - return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); + return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction); } static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, size_t size, int direction) { - dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); + dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction); } static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { - return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); + return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); } static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { - dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); + dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); } static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) { - dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); + dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); } static inline void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) { - dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); + dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); } static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { - dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); + dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); } static inline void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { - dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); + dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); } static inline int diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 809c2f1873ac..baadad1aabbc 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright 2016 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation (the "GPL"). - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 (GPLv2) for more details. - * - * You should have received a copy of the GNU General Public License - * version 2 (GPLv2) along with this source code. */ #ifndef DRIVERS_PCI_ECAM_H #define DRIVERS_PCI_ECAM_H diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h index 263b89ea5705..f42b0fd4b4bc 100644 --- a/include/linux/pci-ep-cfs.h +++ b/include/linux/pci-ep-cfs.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /** * PCI Endpoint ConfigFS header file * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <kishon@ti.com> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 of - * the License as published by the Free Software Foundation. */ #ifndef __LINUX_PCI_EP_CFS_H diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index f7a04e1af112..a1a5e5df0f66 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /** * PCI Endpoint *Controller* (EPC) header file * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <kishon@ti.com> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 of - * the License as published by the Free Software Foundation. */ #ifndef __LINUX_PCI_EPC_H @@ -39,17 +36,20 @@ enum pci_epc_irq_type { * @owner: the module owner containing the ops */ struct pci_epc_ops { - int (*write_header)(struct pci_epc *pci_epc, + int (*write_header)(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); - int (*set_bar)(struct pci_epc *epc, enum pci_barno bar, + int (*set_bar)(struct pci_epc *epc, u8 func_no, + enum pci_barno bar, dma_addr_t bar_phys, size_t size, int flags); - void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar); - int (*map_addr)(struct pci_epc *epc, phys_addr_t addr, - u64 pci_addr, size_t size); - void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr); - int (*set_msi)(struct pci_epc *epc, u8 interrupts); - int (*get_msi)(struct pci_epc *epc); - int (*raise_irq)(struct pci_epc *pci_epc, + void (*clear_bar)(struct pci_epc *epc, u8 func_no, + enum pci_barno bar); + int (*map_addr)(struct pci_epc *epc, u8 func_no, + phys_addr_t addr, u64 pci_addr, size_t size); + void (*unmap_addr)(struct pci_epc *epc, u8 func_no, + phys_addr_t addr); + int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); + int (*get_msi)(struct pci_epc *epc, u8 func_no); + int (*raise_irq)(struct pci_epc *epc, u8 func_no, enum pci_epc_irq_type type, u8 interrupt_num); int (*start)(struct pci_epc *epc); void (*stop)(struct pci_epc *epc); @@ -124,17 +124,21 @@ void pci_epc_destroy(struct pci_epc *epc); int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); void pci_epc_linkup(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); -int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr); -int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar, +int pci_epc_write_header(struct pci_epc *epc, u8 func_no, + struct pci_epf_header *hdr); +int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, + enum pci_barno bar, dma_addr_t bar_phys, size_t size, int flags); -void pci_epc_clear_bar(struct pci_epc *epc, int bar); -int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr, +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar); +int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, + phys_addr_t phys_addr, u64 pci_addr, size_t size); -void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr); -int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts); -int pci_epc_get_msi(struct pci_epc *epc); -int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type, - u8 interrupt_num); +void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, + phys_addr_t phys_addr); +int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); +int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); +int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, + enum pci_epc_irq_type type, u8 interrupt_num); int pci_epc_start(struct pci_epc *epc); void pci_epc_stop(struct pci_epc *epc); struct pci_epc *pci_epc_get(const char *epc_name); diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 60d551a9a1ba..e897bf076701 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /** * PCI Endpoint *Function* (EPF) header file * * Copyright (C) 2017 Texas Instruments * Author: Kishon Vijay Abraham I <kishon@ti.com> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 of - * the License as published by the Free Software Foundation. */ #ifndef __LINUX_PCI_EPF_H diff --git a/include/linux/pci.h b/include/linux/pci.h index c170c9250c8b..024a1beda008 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -48,17 +48,17 @@ * In the interest of not exposing interfaces to user-space unnecessarily, * the following kernel-only defines are being added here. */ -#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) /* pci_slot represents a physical slot */ struct pci_slot { - struct pci_bus *bus; /* The bus this slot is on */ - struct list_head list; /* node in list of slots on this bus */ - struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ - unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ - struct kobject kobj; + struct pci_bus *bus; /* Bus this slot is on */ + struct list_head list; /* Node in list of slots */ + struct hotplug_slot *hotplug; /* Hotplug info (move here) */ + unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ + struct kobject kobj; }; static inline const char *pci_slot_name(const struct pci_slot *slot) @@ -72,9 +72,7 @@ enum pci_mmap_state { pci_mmap_mem }; -/* - * For PCI devices, the region numbers are assigned this way: - */ +/* For PCI devices, the region numbers are assigned this way: */ enum { /* #0-5: standard PCI resources */ PCI_STD_RESOURCES, @@ -83,23 +81,23 @@ enum { /* #6: expansion ROM resource */ PCI_ROM_RESOURCE, - /* device specific resources */ + /* Device-specific resources */ #ifdef CONFIG_PCI_IOV PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, #endif - /* resources assigned to buses behind the bridge */ + /* Resources assigned to buses behind the bridge */ #define PCI_BRIDGE_RESOURCE_NUM 4 PCI_BRIDGE_RESOURCES, PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + PCI_BRIDGE_RESOURCE_NUM - 1, - /* total resources associated with a PCI device */ + /* Total resources associated with a PCI device */ PCI_NUM_RESOURCES, - /* preserve this for compatibility */ + /* Preserve this for compatibility */ DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, }; @@ -152,9 +150,10 @@ static inline const char *pci_power_name(pci_power_t state) #define PCI_PM_D3COLD_WAIT 100 #define PCI_PM_BUS_WAIT 50 -/** The pci_channel state describes connectivity between the CPU and - * the pci device. If some PCI bus between here and the pci device - * has crashed or locked up, this info is reflected here. +/** + * The pci_channel state describes connectivity between the CPU and + * the PCI device. If some PCI bus between here and the PCI device + * has crashed or locked up, this info is reflected here. */ typedef unsigned int __bitwise pci_channel_state_t; @@ -184,9 +183,7 @@ enum pcie_reset_state { typedef unsigned short __bitwise pci_dev_flags_t; enum pci_dev_flags { - /* INTX_DISABLE in PCI_COMMAND register disables MSI - * generation too. - */ + /* INTX_DISABLE in PCI_COMMAND register disables MSI too */ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), /* Device configuration is irrevocably lost if disabled into D3 */ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), @@ -202,7 +199,7 @@ enum pci_dev_flags { PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), /* Get VPD from function 0 VPD */ PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), - /* a non-root bridge where translation occurs, stop alias search here */ + /* A non-root bridge where translation occurs, stop alias search here */ PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), /* Do not use FLR even if device advertises PCI_AF_CAP */ PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), @@ -222,17 +219,17 @@ enum pci_bus_flags { PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, }; -/* These values come from the PCI Express Spec */ +/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */ enum pcie_link_width { PCIE_LNK_WIDTH_RESRV = 0x00, PCIE_LNK_X1 = 0x01, PCIE_LNK_X2 = 0x02, PCIE_LNK_X4 = 0x04, PCIE_LNK_X8 = 0x08, - PCIE_LNK_X12 = 0x0C, + PCIE_LNK_X12 = 0x0c, PCIE_LNK_X16 = 0x10, PCIE_LNK_X32 = 0x20, - PCIE_LNK_WIDTH_UNKNOWN = 0xFF, + PCIE_LNK_WIDTH_UNKNOWN = 0xff, }; /* Based on the PCI Hotplug Spec, but some values are made up by us */ @@ -263,15 +260,15 @@ enum pci_bus_speed { }; struct pci_cap_saved_data { - u16 cap_nr; - bool cap_extended; - unsigned int size; - u32 data[0]; + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[0]; }; struct pci_cap_saved_state { - struct hlist_node next; - struct pci_cap_saved_data cap; + struct hlist_node next; + struct pci_cap_saved_data cap; }; struct irq_affinity; @@ -280,19 +277,17 @@ struct pci_vpd; struct pci_sriov; struct pci_ats; -/* - * The pci_dev structure is used to describe PCI devices. - */ +/* The pci_dev structure describes PCI devices */ struct pci_dev { - struct list_head bus_list; /* node in per-bus list */ - struct pci_bus *bus; /* bus this device is on */ - struct pci_bus *subordinate; /* bus this device bridges to */ + struct list_head bus_list; /* Node in per-bus list */ + struct pci_bus *bus; /* Bus this device is on */ + struct pci_bus *subordinate; /* Bus this device bridges to */ - void *sysdata; /* hook for sys-specific extension */ - struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ + void *sysdata; /* Hook for sys-specific extension */ + struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */ struct pci_slot *slot; /* Physical slot this device is in */ - unsigned int devfn; /* encoded device & function index */ + unsigned int devfn; /* Encoded device & function index */ unsigned short vendor; unsigned short device; unsigned short subsystem_vendor; @@ -307,12 +302,12 @@ struct pci_dev { u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ - u8 rom_base_reg; /* which config register controls the ROM */ - u8 pin; /* which interrupt pin this device uses */ - u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ - unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */ + u8 rom_base_reg; /* Config register controlling ROM */ + u8 pin; /* Interrupt pin this device uses */ + u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */ + unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */ - struct pci_driver *driver; /* which driver has allocated this device */ + struct pci_driver *driver; /* Driver bound to this device */ u64 dma_mask; /* Mask of the bits of bus address this device implements. Normally this is 0xffffffff. You only need to change @@ -321,9 +316,9 @@ struct pci_dev { struct device_dma_parameters dma_parms; - pci_power_t current_state; /* Current operating state. In ACPI-speak, - this is D0-D3, D0 being fully functional, - and D3 being off. */ + pci_power_t current_state; /* Current operating state. In ACPI, + this is D0-D3, D0 being fully + functional, and D3 being off. */ u8 pm_cap; /* PM capability offset */ unsigned int pme_support:5; /* Bitmask of states from which PME# can be generated */ @@ -334,10 +329,10 @@ struct pci_dev { unsigned int no_d3cold:1; /* D3cold is forbidden */ unsigned int bridge_d3:1; /* Allow D3 for bridge */ unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ - unsigned int mmio_always_on:1; /* disallow turning off io/mem - decoding during bar sizing */ + unsigned int mmio_always_on:1; /* Disallow turning off io/mem + decoding during BAR sizing */ unsigned int wakeup_prepared:1; - unsigned int runtime_d3cold:1; /* whether go through runtime + unsigned int runtime_d3cold:1; /* Whether go through runtime D3cold, not set for devices powered on/off by the corresponding bridge */ @@ -350,12 +345,14 @@ struct pci_dev { #ifdef CONFIG_PCIEASPM struct pcie_link_state *link_state; /* ASPM link state */ + unsigned int ltr_path:1; /* Latency Tolerance Reporting + supported from root to here */ #endif - pci_channel_state_t error_state; /* current connectivity state */ - struct device dev; /* Generic device interface */ + pci_channel_state_t error_state; /* Current connectivity state */ + struct device dev; /* Generic device interface */ - int cfg_size; /* Size of configuration space */ + int cfg_size; /* Size of config space */ /* * Instead of touching interrupt line and base address registers @@ -364,47 +361,47 @@ struct pci_dev { unsigned int irq; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ - bool match_driver; /* Skip attaching driver */ - /* These fields are used by common fixups */ - unsigned int transparent:1; /* Subtractive decode PCI bridge */ - unsigned int multifunction:1;/* Part of multi-function device */ - /* keep track of device state */ + bool match_driver; /* Skip attaching driver */ + + unsigned int transparent:1; /* Subtractive decode bridge */ + unsigned int multifunction:1; /* Multi-function device */ + unsigned int is_added:1; - unsigned int is_busmaster:1; /* device is busmaster */ - unsigned int no_msi:1; /* device may not use msi */ - unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ - unsigned int block_cfg_access:1; /* config space access is blocked */ - unsigned int broken_parity_status:1; /* Device generates false positive parity */ - unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ + unsigned int is_busmaster:1; /* Is busmaster */ + unsigned int no_msi:1; /* May not use MSI */ + unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ + unsigned int block_cfg_access:1; /* Config space access blocked */ + unsigned int broken_parity_status:1; /* Generates false positive parity */ + unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ unsigned int msi_enabled:1; unsigned int msix_enabled:1; - unsigned int ari_enabled:1; /* ARI forwarding */ - unsigned int ats_enabled:1; /* Address Translation Service */ + unsigned int ari_enabled:1; /* ARI forwarding */ + unsigned int ats_enabled:1; /* Address Translation Svc */ unsigned int pasid_enabled:1; /* Process Address Space ID */ unsigned int pri_enabled:1; /* Page Request Interface */ unsigned int is_managed:1; - unsigned int needs_freset:1; /* Dev requires fundamental reset */ + unsigned int needs_freset:1; /* Requires fundamental reset */ unsigned int state_saved:1; unsigned int is_physfn:1; unsigned int is_virtfn:1; unsigned int reset_fn:1; - unsigned int is_hotplug_bridge:1; - unsigned int is_thunderbolt:1; /* Thunderbolt controller */ - unsigned int __aer_firmware_first_valid:1; + unsigned int is_hotplug_bridge:1; + unsigned int is_thunderbolt:1; /* Thunderbolt controller */ + unsigned int __aer_firmware_first_valid:1; unsigned int __aer_firmware_first:1; - unsigned int broken_intx_masking:1; /* INTx masking can't be used */ - unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ + unsigned int broken_intx_masking:1; /* INTx masking can't be used */ + unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ unsigned int irq_managed:1; unsigned int has_secondary_link:1; - unsigned int non_compliant_bars:1; /* broken BARs; ignore them */ - unsigned int is_probed:1; /* device probing in progress */ + unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ + unsigned int is_probed:1; /* Device probing in progress */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ - u32 saved_config_space[16]; /* config space saved at suspend time */ + u32 saved_config_space[16]; /* Config space saved at suspend time */ struct hlist_head saved_cap_space; - struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ - int rom_attr_enabled; /* has display of the rom attribute been enabled? */ + struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */ + int rom_attr_enabled; /* Display of ROM attribute enabled? */ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ @@ -419,12 +416,12 @@ struct pci_dev { struct pci_vpd *vpd; #ifdef CONFIG_PCI_ATS union { - struct pci_sriov *sriov; /* SR-IOV capability related */ - struct pci_dev *physfn; /* the PF this VF is associated with */ + struct pci_sriov *sriov; /* PF: SR-IOV info */ + struct pci_dev *physfn; /* VF: related PF */ }; u16 ats_cap; /* ATS Capability offset */ u8 ats_stu; /* ATS Smallest Translation Unit */ - atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */ + atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */ #endif #ifdef CONFIG_PCI_PRI u32 pri_reqs_alloc; /* Number of PRI requests allocated */ @@ -432,11 +429,11 @@ struct pci_dev { #ifdef CONFIG_PCI_PASID u16 pasid_features; #endif - phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ - size_t romlen; /* Length of ROM if it's not from the BAR */ - char *driver_override; /* Driver name to force a match */ + phys_addr_t rom; /* Physical address if not from BAR */ + size_t romlen; /* Length if not from BAR */ + char *driver_override; /* Driver name to force a match */ - unsigned long priv_flags; /* Private flags for the pci driver */ + unsigned long priv_flags; /* Private flags for the PCI driver */ }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -459,26 +456,26 @@ static inline int pci_channel_offline(struct pci_dev *pdev) } struct pci_host_bridge { - struct device dev; - struct pci_bus *bus; /* root bus */ - struct pci_ops *ops; - void *sysdata; - int busnr; + struct device dev; + struct pci_bus *bus; /* Root bus */ + struct pci_ops *ops; + void *sysdata; + int busnr; struct list_head windows; /* resource_entry */ - u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */ + u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ int (*map_irq)(const struct pci_dev *, u8, u8); void (*release_fn)(struct pci_host_bridge *); - void *release_data; + void *release_data; struct msi_controller *msi; - unsigned int ignore_reset_delay:1; /* for entire hierarchy */ - unsigned int no_ext_tags:1; /* no Extended Tags */ + unsigned int ignore_reset_delay:1; /* For entire hierarchy */ + unsigned int no_ext_tags:1; /* No Extended Tags */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, const struct resource *res, resource_size_t start, resource_size_t size, resource_size_t align); - unsigned long private[0] ____cacheline_aligned; + unsigned long private[0] ____cacheline_aligned; }; #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) @@ -500,8 +497,8 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge); struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); void pci_set_host_bridge_release(struct pci_host_bridge *bridge, - void (*release_fn)(struct pci_host_bridge *), - void *release_data); + void (*release_fn)(struct pci_host_bridge *), + void *release_data); int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); @@ -521,32 +518,32 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); #define PCI_SUBTRACTIVE_DECODE 0x1 struct pci_bus_resource { - struct list_head list; - struct resource *res; - unsigned int flags; + struct list_head list; + struct resource *res; + unsigned int flags; }; #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ struct pci_bus { - struct list_head node; /* node in list of buses */ - struct pci_bus *parent; /* parent bus this bridge is on */ - struct list_head children; /* list of child buses */ - struct list_head devices; /* list of devices on this bus */ - struct pci_dev *self; /* bridge device as seen by parent */ - struct list_head slots; /* list of slots on this bus; + struct list_head node; /* Node in list of buses */ + struct pci_bus *parent; /* Parent bus this bridge is on */ + struct list_head children; /* List of child buses */ + struct list_head devices; /* List of devices on this bus */ + struct pci_dev *self; /* Bridge device as seen by parent */ + struct list_head slots; /* List of slots on this bus; protected by pci_slot_mutex */ struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; - struct list_head resources; /* address space routed to this bus */ - struct resource busn_res; /* bus numbers routed to this bus */ + struct list_head resources; /* Address space routed to this bus */ + struct resource busn_res; /* Bus numbers routed to this bus */ - struct pci_ops *ops; /* configuration access functions */ + struct pci_ops *ops; /* Configuration access functions */ struct msi_controller *msi; /* MSI controller */ - void *sysdata; /* hook for sys-specific extension */ - struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ + void *sysdata; /* Hook for sys-specific extension */ + struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ - unsigned char number; /* bus number */ - unsigned char primary; /* number of primary bridge */ + unsigned char number; /* Bus number */ + unsigned char primary; /* Number of primary bridge */ unsigned char max_bus_speed; /* enum pci_bus_speed */ unsigned char cur_bus_speed; /* enum pci_bus_speed */ #ifdef CONFIG_PCI_DOMAINS_GENERIC @@ -555,12 +552,12 @@ struct pci_bus { char name[48]; - unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ - pci_bus_flags_t bus_flags; /* inherited by child buses */ + unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */ + pci_bus_flags_t bus_flags; /* Inherited by child buses */ struct device *bridge; struct device dev; - struct bin_attribute *legacy_io; /* legacy I/O for this bus */ - struct bin_attribute *legacy_mem; /* legacy mem */ + struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ + struct bin_attribute *legacy_mem; /* Legacy mem */ unsigned int is_added:1; }; @@ -617,9 +614,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } #endif -/* - * Error values that may be returned by PCI functions. - */ +/* Error values that may be returned by PCI functions */ #define PCIBIOS_SUCCESSFUL 0x00 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 #define PCIBIOS_BAD_VENDOR_ID 0x83 @@ -628,9 +623,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; #define PCIBIOS_SET_FAILED 0x88 #define PCIBIOS_BUFFER_TOO_SMALL 0x89 -/* - * Translate above to generic errno for passing back through non-PCI code. - */ +/* Translate above to generic errno for passing back through non-PCI code */ static inline int pcibios_err_to_errno(int err) { if (err <= PCIBIOS_SUCCESSFUL) @@ -680,13 +673,13 @@ typedef u32 pci_bus_addr_t; #endif struct pci_bus_region { - pci_bus_addr_t start; - pci_bus_addr_t end; + pci_bus_addr_t start; + pci_bus_addr_t end; }; struct pci_dynids { - spinlock_t lock; /* protects list, index */ - struct list_head list; /* for IDs added at runtime */ + spinlock_t lock; /* Protects list, index */ + struct list_head list; /* For IDs added at runtime */ }; @@ -700,13 +693,13 @@ struct pci_dynids { typedef unsigned int __bitwise pci_ers_result_t; enum pci_ers_result { - /* no result/none/not supported in device driver */ + /* No result/none/not supported in device driver */ PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, /* Device driver can recover without slot reset */ PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, - /* Device driver wants slot to be reset. */ + /* Device driver wants slot to be reset */ PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, /* Device has completely failed, is unrecoverable */ @@ -742,27 +735,27 @@ struct pci_error_handlers { struct module; struct pci_driver { - struct list_head node; - const char *name; - const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ - int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ - void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ - int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ - int (*suspend_late) (struct pci_dev *dev, pm_message_t state); - int (*resume_early) (struct pci_dev *dev); - int (*resume) (struct pci_dev *dev); /* Device woken up */ + struct list_head node; + const char *name; + const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */ + int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ + void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ + int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ + int (*suspend_late)(struct pci_dev *dev, pm_message_t state); + int (*resume_early)(struct pci_dev *dev); + int (*resume) (struct pci_dev *dev); /* Device woken up */ void (*shutdown) (struct pci_dev *dev); - int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */ + int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */ const struct pci_error_handlers *err_handler; const struct attribute_group **groups; struct device_driver driver; - struct pci_dynids dynids; + struct pci_dynids dynids; }; #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) /** - * PCI_DEVICE - macro used to describe a specific pci device + * PCI_DEVICE - macro used to describe a specific PCI device * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID * @@ -775,7 +768,7 @@ struct pci_driver { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID /** - * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem + * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID * @subvend: the 16 bit PCI Subvendor ID @@ -789,7 +782,7 @@ struct pci_driver { .subvendor = (subvend), .subdevice = (subdev) /** - * PCI_DEVICE_CLASS - macro used to describe a specific pci device class + * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class * @dev_class: the class, subclass, prog-if triple for this device * @dev_class_mask: the class mask for this device * @@ -803,7 +796,7 @@ struct pci_driver { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID /** - * PCI_VDEVICE - macro used to describe a specific pci device in short form + * PCI_VDEVICE - macro used to describe a specific PCI device in short form * @vend: the vendor name * @dev: the 16 bit PCI Device ID * @@ -812,22 +805,21 @@ struct pci_driver { * to PCI_ANY_ID. The macro allows the next field to follow as the device * private data. */ - #define PCI_VDEVICE(vend, dev) \ .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 enum { - PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */ - PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */ - PCI_PROBE_ONLY = 0x00000004, /* use existing setup */ - PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */ - PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */ + PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */ + PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */ + PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */ + PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */ + PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */ PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ - PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */ + PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ }; -/* these external functions are only available when PCI support is enabled */ +/* These external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI extern unsigned int pci_flags; @@ -840,11 +832,11 @@ static inline int pci_has_flag(int flag) { return pci_flags & flag; } void pcie_bus_configure_settings(struct pci_bus *bus); enum pcie_bus_config_types { - PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */ - PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */ - PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */ - PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */ - PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */ + PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */ + PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */ + PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */ + PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */ + PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */ }; extern enum pcie_bus_config_types pcie_bus_config; @@ -853,7 +845,7 @@ extern struct bus_type pci_bus_type; /* Do NOT directly access these two variables, unless you are arch-specific PCI * code, or PCI core code. */ -extern struct list_head pci_root_buses; /* list of all known PCI buses */ +extern struct list_head pci_root_buses; /* List of all known PCI buses */ /* Some device drivers need know if PCI is initiated */ int no_pci_devices(void); @@ -887,12 +879,13 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); struct pci_bus *pci_create_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, struct list_head *resources); +int pci_host_probe(struct pci_host_bridge *bridge); int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); void pci_bus_release_busn_res(struct pci_bus *b); struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata, - struct list_head *resources); + struct pci_ops *ops, void *sysdata, + struct list_head *resources); int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); @@ -949,10 +942,10 @@ int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, - struct pci_dev *from); + struct pci_dev *from); struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, - unsigned int ss_vendor, unsigned int ss_device, - struct pci_dev *from); + unsigned int ss_vendor, unsigned int ss_device, + struct pci_dev *from); struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn); @@ -1028,7 +1021,7 @@ static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); } -/* user-space driven config access */ +/* User-space driven config access */ int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); @@ -1072,6 +1065,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pci_set_cacheline_size(struct pci_dev *dev); #define HAVE_PCI_SET_MWI int __must_check pci_set_mwi(struct pci_dev *dev); +int __must_check pcim_set_mwi(struct pci_dev *dev); int pci_try_set_mwi(struct pci_dev *dev); void pci_clear_mwi(struct pci_dev *dev); void pci_intx(struct pci_dev *dev, int enable); @@ -1170,7 +1164,7 @@ unsigned int pci_rescan_bus(struct pci_bus *bus); void pci_lock_rescan_remove(void); void pci_unlock_rescan_remove(void); -/* Vital product data routines */ +/* Vital Product Data routines */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); int pci_set_vpd_size(struct pci_dev *dev, size_t len); @@ -1255,9 +1249,7 @@ static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) int __must_check __pci_register_driver(struct pci_driver *, struct module *, const char *mod_name); -/* - * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded - */ +/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */ #define pci_register_driver(driver) \ __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) @@ -1272,8 +1264,7 @@ void pci_unregister_driver(struct pci_driver *dev); * use this macro once, and calling it replaces module_init() and module_exit() */ #define module_pci_driver(__pci_driver) \ - module_driver(__pci_driver, pci_register_driver, \ - pci_unregister_driver) + module_driver(__pci_driver, pci_register_driver, pci_unregister_driver) /** * builtin_pci_driver() - Helper macro for registering a PCI driver @@ -1312,10 +1303,10 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); int pci_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags); -#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */ -#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */ -#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */ -#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */ +#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ +#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ +#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ #define PCI_IRQ_ALL_TYPES \ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) @@ -1334,8 +1325,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) struct msix_entry { - u32 vector; /* kernel uses to write allocated vector */ - u16 entry; /* driver uses to specify entry, OS writes */ + u32 vector; /* Kernel uses to write allocated vector */ + u16 entry; /* Driver uses to specify entry, OS writes */ }; #ifdef CONFIG_PCI_MSI @@ -1375,10 +1366,10 @@ static inline int pci_msi_enabled(void) { return 0; } static inline int pci_enable_msi(struct pci_dev *dev) { return -ENOSYS; } static inline int pci_enable_msix_range(struct pci_dev *dev, - struct msix_entry *entries, int minvec, int maxvec) + struct msix_entry *entries, int minvec, int maxvec) { return -ENOSYS; } static inline int pci_enable_msix_exact(struct pci_dev *dev, - struct msix_entry *entries, int nvec) + struct msix_entry *entries, int nvec) { return -ENOSYS; } static inline int @@ -1543,9 +1534,9 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); #endif -/* some architectures require additional setup to direct VGA traffic */ +/* Some architectures require additional setup to direct VGA traffic */ typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, - unsigned int command_bits, u32 flags); + unsigned int command_bits, u32 flags); void pci_register_set_vga_state(arch_set_vga_state_t func); static inline int @@ -1584,10 +1575,9 @@ static inline void pci_clear_flags(int flags) { } static inline int pci_has_flag(int flag) { return 0; } /* - * If the system does not have PCI, clearly these return errors. Define - * these as simple inline functions to avoid hair in drivers. + * If the system does not have PCI, clearly these return errors. Define + * these as simple inline functions to avoid hair in drivers. */ - #define _PCI_NOP(o, s, t) \ static inline int pci_##o##_config_##s(struct pci_dev *dev, \ int where, t val) \ @@ -1686,6 +1676,13 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } #define dev_is_pf(d) (false) static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) { return false; } +static inline int pci_irqd_intx_xlate(struct irq_domain *d, + struct device_node *node, + const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ return -EINVAL; } #endif /* CONFIG_PCI */ /* Include architecture-dependent settings and functions */ @@ -1726,8 +1723,10 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); #define pci_root_bus_fwnode(bus) NULL #endif -/* these helpers provide future and backwards compatibility - * for accessing popular PCI BAR info */ +/* + * These helpers provide future and backwards compatibility + * for accessing popular PCI BAR info + */ #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) @@ -1739,7 +1738,8 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); (pci_resource_end((dev), (bar)) - \ pci_resource_start((dev), (bar)) + 1)) -/* Similar to the helpers above, these manipulate per-pci_dev +/* + * Similar to the helpers above, these manipulate per-pci_dev * driver-specific data. They are really just a wrapper around * the generic device structure functions of these calls. */ @@ -1753,16 +1753,14 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) dev_set_drvdata(&pdev->dev, data); } -/* If you want to know what to call your pci_dev, ask this function. - * Again, it's a wrapper around the generic device. - */ static inline const char *pci_name(const struct pci_dev *pdev) { return dev_name(&pdev->dev); } -/* Some archs don't want to expose struct resource to userland as-is +/* + * Some archs don't want to expose struct resource to userland as-is * in sysfs and /proc */ #ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER @@ -1781,16 +1779,16 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, /* - * The world is not perfect and supplies us with broken PCI devices. - * For at least a part of these bugs we need a work-around, so both - * generic (drivers/pci/quirks.c) and per-architecture code can define - * fixup hooks to be called for particular buggy devices. + * The world is not perfect and supplies us with broken PCI devices. + * For at least a part of these bugs we need a work-around, so both + * generic (drivers/pci/quirks.c) and per-architecture code can define + * fixup hooks to be called for particular buggy devices. */ struct pci_fixup { - u16 vendor; /* You can use PCI_ANY_ID here of course */ - u16 device; /* You can use PCI_ANY_ID here of course */ - u32 class; /* You can use PCI_ANY_ID here too */ + u16 vendor; /* Or PCI_ANY_ID */ + u16 device; /* Or PCI_ANY_ID */ + u32 class; /* Or PCI_ANY_ID */ unsigned int class_shift; /* should be 0, 8, 16 */ void (*hook)(struct pci_dev *dev); }; @@ -1832,23 +1830,19 @@ enum pci_fixup_pass { #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ - resume##hook, vendor, device, class, \ - class_shift, hook) + resume##hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ - resume_early##hook, vendor, device, \ - class, class_shift, hook) + resume_early##hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ - suspend##hook, vendor, device, class, \ - class_shift, hook) + suspend##hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ class_shift, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ - suspend_late##hook, vendor, device, \ - class, class_shift, hook) + suspend_late##hook, vendor, device, class, class_shift, hook) #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ @@ -1864,20 +1858,16 @@ enum pci_fixup_pass { hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ - resume##hook, vendor, device, \ - PCI_ANY_ID, 0, hook) + resume##hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ - resume_early##hook, vendor, device, \ - PCI_ANY_ID, 0, hook) + resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ - suspend##hook, vendor, device, \ - PCI_ANY_ID, 0, hook) + suspend##hook, vendor, device, PCI_ANY_ID, 0, hook) #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ - suspend_late##hook, vendor, device, \ - PCI_ANY_ID, 0, hook) + suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook) #ifdef CONFIG_PCI_QUIRKS void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); @@ -1964,6 +1954,7 @@ int pci_vfs_assigned(struct pci_dev *dev); int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); int pci_sriov_get_totalvfs(struct pci_dev *dev); resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); +void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); #else static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) { @@ -1991,6 +1982,7 @@ static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) { return 0; } static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) { return 0; } +static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } #endif #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) @@ -2061,6 +2053,7 @@ void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, struct pci_dev *end, u16 acs_flags); +int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) @@ -2112,7 +2105,7 @@ static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) */ static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) { - return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); + return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); } /** @@ -2182,6 +2175,9 @@ void pci_release_of_node(struct pci_dev *dev); void pci_set_bus_of_node(struct pci_bus *bus); void pci_release_bus_of_node(struct pci_bus *bus); struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); +int pci_parse_request_of_pci_ranges(struct device *dev, + struct list_head *resources, + struct resource **bus_range); /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); @@ -2197,7 +2193,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) return bus ? bus->dev.of_node : NULL; } -#else /* CONFIG_OF */ +#else /* CONFIG_OF */ static inline void pci_set_of_node(struct pci_dev *dev) { } static inline void pci_release_of_node(struct pci_dev *dev) { } static inline void pci_set_bus_of_node(struct pci_bus *bus) { } @@ -2206,6 +2202,12 @@ static inline struct device_node * pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } +static inline int pci_parse_request_of_pci_ranges(struct device *dev, + struct list_head *resources, + struct resource **bus_range) +{ + return -EINVAL; +} #endif /* CONFIG_OF */ #ifdef CONFIG_ACPI @@ -2231,7 +2233,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data); -/* helper functions for operation of device flag */ +/* Helper functions for operation of device flag */ static inline void pci_set_dev_assigned(struct pci_dev *pdev) { pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; @@ -2278,7 +2280,55 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) return false; } -/* provide the legacy pci_dma_* API */ +/** + * pci_uevent_ers - emit a uevent during recovery path of pci device + * @pdev: pci device to check + * @err_type: type of error event + * + */ +static inline void pci_uevent_ers(struct pci_dev *pdev, + enum pci_ers_result err_type) +{ + int idx = 0; + char *envp[3]; + + switch (err_type) { + case PCI_ERS_RESULT_NONE: + case PCI_ERS_RESULT_CAN_RECOVER: + envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; + envp[idx++] = "DEVICE_ONLINE=0"; + break; + case PCI_ERS_RESULT_RECOVERED: + envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY"; + envp[idx++] = "DEVICE_ONLINE=1"; + break; + case PCI_ERS_RESULT_DISCONNECT: + envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY"; + envp[idx++] = "DEVICE_ONLINE=0"; + break; + default: + break; + } + + if (idx > 0) { + envp[idx++] = NULL; + kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp); + } +} + +/* Provide the legacy pci_dma_* API */ #include <linux/pci-dma-compat.h> +#define pci_printk(level, pdev, fmt, arg...) \ + dev_printk(level, &(pdev)->dev, fmt, ##arg) + +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) + #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 2e855afa0212..26213024e81b 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * PCI HotPlug Core Functions * @@ -7,21 +8,6 @@ * * All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * * Send feedback to <kristen.c.accardi@intel.com> * */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ab20dc5db423..eb13e84e1fef 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2381,6 +2381,8 @@ #define PCI_VENDOR_ID_LENOVO 0x17aa +#define PCI_VENDOR_ID_CDNS 0x17cd + #define PCI_VENDOR_ID_ARECA 0x17d3 #define PCI_DEVICE_ID_ARECA_1110 0x1110 #define PCI_DEVICE_ID_ARECA_1120 0x1120 diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 73a7bf30fe9a..4f052496cdfd 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -86,7 +86,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) return 0; } -static inline int percpu_counter_initialized(struct percpu_counter *fbc) +static inline bool percpu_counter_initialized(struct percpu_counter *fbc) { return (fbc->counters != NULL); } @@ -167,9 +167,9 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc) return percpu_counter_read(fbc); } -static inline int percpu_counter_initialized(struct percpu_counter *fbc) +static inline bool percpu_counter_initialized(struct percpu_counter *fbc) { - return 1; + return true; } #endif /* CONFIG_SMP */ diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 43b1d7648e82..a03c2642a87c 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -15,8 +15,10 @@ #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) #define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) +#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5)) #define PFN_FLAGS_TRACE \ + { PFN_SPECIAL, "SPECIAL" }, \ { PFN_SG_CHAIN, "SG_CHAIN" }, \ { PFN_SG_LAST, "SG_LAST" }, \ { PFN_DEV, "DEV" }, \ @@ -120,4 +122,15 @@ pud_t pud_mkdevmap(pud_t pud); #endif #endif /* __HAVE_ARCH_PTE_DEVMAP */ +#ifdef __HAVE_ARCH_PTE_SPECIAL +static inline bool pfn_t_special(pfn_t pfn) +{ + return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL; +} +#else +static inline bool pfn_t_special(pfn_t pfn) +{ + return false; +} +#endif /* __HAVE_ARCH_PTE_SPECIAL */ #endif /* _LINUX_PFN_T_H_ */ diff --git a/include/linux/phy.h b/include/linux/phy.h index dc82a07cb4fd..5a0c3e53e7c2 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -59,6 +59,7 @@ #define PHY_HAS_INTERRUPT 0x00000001 #define PHY_IS_INTERNAL 0x00000002 +#define PHY_RST_AFTER_CLK_EN 0x00000004 #define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ @@ -468,7 +469,6 @@ struct phy_device { /* Interrupt and Polling infrastructure */ struct work_struct phy_queue; struct delayed_work state_queue; - atomic_t irq_disable; struct mutex lock; @@ -497,19 +497,19 @@ struct phy_device { * flags: A bitfield defining certain other features this PHY * supports (like interrupts) * - * The drivers must implement config_aneg and read_status. All - * other functions are optional. Note that none of these - * functions should be called from interrupt time. The goal is - * for the bus read/write functions to be able to block when the - * bus transaction is happening, and be freed up by an interrupt - * (The MPC85xx has this ability, though it is not currently - * supported in the driver). + * All functions are optional. If config_aneg or read_status + * are not implemented, the phy core uses the genphy versions. + * Note that none of these functions should be called from + * interrupt time. The goal is for the bus read/write functions + * to be able to block when the bus transaction is happening, + * and be freed up by an interrupt (The MPC85xx has this ability, + * though it is not currently supported in the driver). */ struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; - unsigned int phy_id_mask; + u32 phy_id_mask; u32 features; u32 flags; const void *driver_data; @@ -634,6 +634,9 @@ struct phy_driver { int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, u16 val); + int (*read_page)(struct phy_device *dev); + int (*write_page)(struct phy_device *dev, int page); + /* Get the size and type of the eeprom contained within a plug-in * module */ int (*module_info)(struct phy_device *dev, @@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask, size_t maxbit); +void phy_resolve_aneg_linkmode(struct phy_device *phydev); + /** * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. @@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) } /** + * __phy_read - convenience function for reading a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to read + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_read(struct phy_device *phydev, u32 regnum) +{ + return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); +} + +/** * phy_write - Convenience function for writing a given PHY register * @phydev: the phy_device struct * @regnum: register number to write @@ -731,6 +748,72 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) } /** + * __phy_write - Convenience function for writing a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: value to write to @regnum + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, + val); +} + +int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); +int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); + +/** + * __phy_set_bits - Convenience function for setting bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to set + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return __phy_modify(phydev, regnum, 0, val); +} + +/** + * __phy_clear_bits - Convenience function for clearing bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to clear + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum, + u16 val) +{ + return __phy_modify(phydev, regnum, val, 0); +} + +/** + * phy_set_bits - Convenience function for setting bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to set + */ +static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return phy_modify(phydev, regnum, 0, val); +} + +/** + * phy_clear_bits - Convenience function for clearing bits in a PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: bits to clear + */ +static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) +{ + return phy_modify(phydev, regnum, val, 0); +} + +/** * phy_interrupt_is_valid - Convenience function for testing a given PHY irq * @phydev: the phy_device struct * @@ -763,6 +846,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) }; /** + * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z + * negotiation + * @mode: one of &enum phy_interface_t + * + * Returns true if the phy interface mode uses the 16-bit negotiation + * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding) + */ +static inline bool phy_interface_mode_is_8023z(phy_interface_t mode) +{ + return mode == PHY_INTERFACE_MODE_1000BASEX || + mode == PHY_INTERFACE_MODE_2500BASEX; +} + +/** * phy_interface_is_rgmii - Convenience function for testing if a PHY interface * is RGMII (all variants) * @phydev: the phy_device struct @@ -794,6 +891,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) */ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); +int phy_save_page(struct phy_device *phydev); +int phy_select_page(struct phy_device *phydev, int page); +int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); +int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); +int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); +int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set); + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); @@ -840,13 +945,11 @@ int phy_aneg_done(struct phy_device *phydev); int phy_stop_interrupts(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); +int phy_reset_after_clk_enable(struct phy_device *phydev); -static inline int phy_read_status(struct phy_device *phydev) +static inline void phy_device_reset(struct phy_device *phydev, int value) { - if (!phydev->drv) - return -EIO; - - return phydev->drv->read_status(phydev); + mdio_device_reset(&phydev->mdio, value); } #define phydev_err(_phydev, format, args...) \ @@ -889,6 +992,18 @@ int genphy_c45_read_lpa(struct phy_device *phydev); int genphy_c45_read_pma(struct phy_device *phydev); int genphy_c45_pma_setup_forced(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); +int genphy_c45_read_mdix(struct phy_device *phydev); + +static inline int phy_read_status(struct phy_device *phydev) +{ + if (!phydev->drv) + return -EIO; + + if (phydev->drv->read_status) + return phydev->drv->read_status(phydev); + else + return genphy_read_status(phydev); +} void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); @@ -898,7 +1013,7 @@ int phy_drivers_register(struct phy_driver *new_driver, int n, void phy_state_machine(struct work_struct *work); void phy_change(struct phy_device *phydev); void phy_change_work(struct work_struct *work); -void phy_mac_interrupt(struct phy_device *phydev, int new_link); +void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); void phy_trigger_machine(struct phy_device *phydev, bool sync); diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index cf6392de6eb0..ee54453a40a0 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); -extern int fixed_phy_update_state(struct phy_device *phydev, - const struct fixed_phy_status *status, - const struct fixed_phy_status *changed); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status, @@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, { return -ENODEV; } -static inline int fixed_phy_update_state(struct phy_device *phydev, - const struct fixed_phy_status *status, - const struct fixed_phy_status *changed) -{ - return -ENODEV; -} #endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index af67edd4ae38..bd137c273d38 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -7,6 +7,7 @@ struct device_node; struct ethtool_cmd; +struct fwnode_handle; struct net_device; enum { @@ -20,19 +21,31 @@ enum { MLO_AN_PHY = 0, /* Conventional PHY */ MLO_AN_FIXED, /* Fixed-link mode */ - MLO_AN_SGMII, /* Cisco SGMII protocol */ - MLO_AN_8023Z, /* 1000base-X protocol */ + MLO_AN_INBAND, /* In-band protocol */ }; static inline bool phylink_autoneg_inband(unsigned int mode) { - return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z; + return mode == MLO_AN_INBAND; } +/** + * struct phylink_link_state - link state structure + * @advertising: ethtool bitmask containing advertised link modes + * @lp_advertising: ethtool bitmask containing link partner advertised link + * modes + * @interface: link &typedef phy_interface_t mode + * @speed: link speed, one of the SPEED_* constants. + * @duplex: link duplex mode, one of DUPLEX_* constants. + * @pause: link pause state, described by MLO_PAUSE_* constants. + * @link: true if the link is up. + * @an_enabled: true if autonegotiation is enabled/desired. + * @an_complete: true if autonegotiation has completed. + */ struct phylink_link_state { __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); - phy_interface_t interface; /* PHY_INTERFACE_xxx */ + phy_interface_t interface; int speed; int duplex; int pause; @@ -41,72 +54,145 @@ struct phylink_link_state { unsigned int an_complete:1; }; +/** + * struct phylink_mac_ops - MAC operations structure. + * @validate: Validate and update the link configuration. + * @mac_link_state: Read the current link state from the hardware. + * @mac_config: configure the MAC for the selected mode and state. + * @mac_an_restart: restart 802.3z BaseX autonegotiation. + * @mac_link_down: take the link down. + * @mac_link_up: allow the link to come up. + * + * The individual methods are described more fully below. + */ struct phylink_mac_ops { - /** - * validate: validate and update the link configuration - * @ndev: net_device structure associated with MAC - * @config: configuration to validate - * - * Update the %config->supported and %config->advertised masks - * clearing bits that can not be supported. - * - * Note: the PHY may be able to transform from one connection - * technology to another, so, eg, don't clear 1000BaseX just - * because the MAC is unable to support it. This is more about - * clearing unsupported speeds and duplex settings. - * - * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX - * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode - * based on %config->advertised and/or %config->speed. - */ void (*validate)(struct net_device *ndev, unsigned long *supported, struct phylink_link_state *state); - - /* Read the current link state from the hardware */ - int (*mac_link_state)(struct net_device *, struct phylink_link_state *); - - /* Configure the MAC */ - /** - * mac_config: configure the MAC for the selected mode and state - * @ndev: net_device structure for the MAC - * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII - * @state: state structure - * - * The action performed depends on the currently selected mode: - * - * %MLO_AN_FIXED, %MLO_AN_PHY: - * set the specified speed, duplex, pause mode, and phy interface - * mode in the provided @state. - * %MLO_AN_8023Z: - * place the link in 1000base-X mode, advertising the parameters - * given in advertising in @state. - * %MLO_AN_SGMII: - * place the link in Cisco SGMII mode - there is no advertisment - * to make as the PHY communicates the speed and duplex to the - * MAC over the in-band control word. Configuration of the pause - * mode is as per MLO_AN_PHY since this is not included. - */ + int (*mac_link_state)(struct net_device *ndev, + struct phylink_link_state *state); void (*mac_config)(struct net_device *ndev, unsigned int mode, const struct phylink_link_state *state); - - /** - * mac_an_restart: restart 802.3z BaseX autonegotiation - * @ndev: net_device structure for the MAC - */ void (*mac_an_restart)(struct net_device *ndev); - - void (*mac_link_down)(struct net_device *, unsigned int mode); - void (*mac_link_up)(struct net_device *, unsigned int mode, - struct phy_device *); + void (*mac_link_down)(struct net_device *ndev, unsigned int mode); + void (*mac_link_up)(struct net_device *ndev, unsigned int mode, + struct phy_device *phy); }; -struct phylink *phylink_create(struct net_device *, struct device_node *, +#if 0 /* For kernel-doc purposes only. */ +/** + * validate - Validate and update the link configuration + * @ndev: a pointer to a &struct net_device for the MAC. + * @supported: ethtool bitmask for supported link modes. + * @state: a pointer to a &struct phylink_link_state. + * + * Clear bits in the @supported and @state->advertising masks that + * are not supportable by the MAC. + * + * Note that the PHY may be able to transform from one connection + * technology to another, so, eg, don't clear 1000BaseX just + * because the MAC is unable to BaseX mode. This is more about + * clearing unsupported speeds and duplex settings. + * + * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX + * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode + * based on @state->advertising and/or @state->speed and update + * @state->interface accordingly. + */ +void validate(struct net_device *ndev, unsigned long *supported, + struct phylink_link_state *state); + +/** + * mac_link_state() - Read the current link state from the hardware + * @ndev: a pointer to a &struct net_device for the MAC. + * @state: a pointer to a &struct phylink_link_state. + * + * Read the current link state from the MAC, reporting the current + * speed in @state->speed, duplex mode in @state->duplex, pause mode + * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits, + * negotiation completion state in @state->an_complete, and link + * up state in @state->link. + */ +int mac_link_state(struct net_device *ndev, + struct phylink_link_state *state); + +/** + * mac_config() - configure the MAC for the selected mode and state + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @state: a pointer to a &struct phylink_link_state. + * + * The action performed depends on the currently selected mode: + * + * %MLO_AN_FIXED, %MLO_AN_PHY: + * Configure the specified @state->speed, @state->duplex and + * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode. + * + * %MLO_AN_INBAND: + * place the link in an inband negotiation mode (such as 802.3z + * 1000base-X or Cisco SGMII mode depending on the @state->interface + * mode). In both cases, link state management (whether the link + * is up or not) is performed by the MAC, and reported via the + * mac_link_state() callback. Changes in link state must be made + * by calling phylink_mac_change(). + * + * If in 802.3z mode, the link speed is fixed, dependent on the + * @state->interface. Duplex is negotiated, and pause is advertised + * according to @state->an_enabled, @state->pause and + * @state->advertising flags. Beware of MACs which only support full + * duplex at gigabit and higher speeds. + * + * If in Cisco SGMII mode, the link speed and duplex mode are passed + * in the serial bitstream 16-bit configuration word, and the MAC + * should be configured to read these bits and acknowledge the + * configuration word. Nothing is advertised by the MAC. The MAC is + * responsible for reading the configuration word and configuring + * itself accordingly. + */ +void mac_config(struct net_device *ndev, unsigned int mode, + const struct phylink_link_state *state); + +/** + * mac_an_restart() - restart 802.3z BaseX autonegotiation + * @ndev: a pointer to a &struct net_device for the MAC. + */ +void mac_an_restart(struct net_device *ndev); + +/** + * mac_link_down() - take the link down + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: link autonegotiation mode + * + * If @mode is not an in-band negotiation mode (as defined by + * phylink_autoneg_inband()), force the link down and disable any + * Energy Efficient Ethernet MAC configuration. + */ +void mac_link_down(struct net_device *ndev, unsigned int mode); + +/** + * mac_link_up() - allow the link to come up + * @ndev: a pointer to a &struct net_device for the MAC. + * @mode: link autonegotiation mode + * @phy: any attached phy + * + * If @mode is not an in-band negotiation mode (as defined by + * phylink_autoneg_inband()), allow the link to come up. If @phy + * is non-%NULL, configure Energy Efficient Ethernet by calling + * phy_init_eee() and perform appropriate MAC configuration for EEE. + */ +void mac_link_up(struct net_device *ndev, unsigned int mode, + struct phy_device *phy); +#endif + +struct phylink *phylink_create(struct net_device *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *ops); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); -int phylink_of_phy_connect(struct phylink *, struct device_node *); +int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); void phylink_disconnect_phy(struct phylink *); +int phylink_fixed_state_cb(struct phylink *, + void (*cb)(struct net_device *dev, + struct phylink_link_state *)); void phylink_mac_change(struct phylink *, bool up); @@ -128,7 +214,6 @@ int phylink_ethtool_set_pauseparam(struct phylink *, int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *); int phylink_ethtool_get_module_eeprom(struct phylink *, struct ethtool_eeprom *, u8 *); -int phylink_init_eee(struct phylink *, bool); int phylink_get_eee_err(struct phylink *); int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h index 05082e407c4a..d01a8638bb45 100644 --- a/include/linux/pinctrl/devinfo.h +++ b/include/linux/pinctrl/devinfo.h @@ -43,6 +43,8 @@ extern int pinctrl_init_done(struct device *dev); #else +struct device; + /* Stubs if we're not using pinctrl */ static inline int pinctrl_bind_pins(struct device *dev) diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index ec6dadcc1fde..6c0680641108 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -94,6 +94,7 @@ * or latch delay (on outputs) this parameter (in a custom format) * specifies the clock skew or latch delay. It typically controls how * many double inverters are put in front of the line. + * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if * you need to pass in custom configurations to the pin controller, use * PIN_CONFIG_END+1 as the base offset. @@ -122,6 +123,7 @@ enum pin_config_param { PIN_CONFIG_SLEEP_HARDWARE_STATE, PIN_CONFIG_SLEW_RATE, PIN_CONFIG_SKEW_DELAY, + PIN_CONFIG_PERSIST_STATE, PIN_CONFIG_END = 0x7F, PIN_CONFIG_MAX = 0xFF, }; diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 5e45385c5bdc..8f5dbb84547a 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -18,6 +18,7 @@ #include <linux/list.h> #include <linux/seq_file.h> #include <linux/pinctrl/pinctrl-state.h> +#include <linux/pinctrl/devinfo.h> struct device; struct pinctrl_dev; diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h index 271a4e25af67..63507ff464ee 100644 --- a/include/linux/platform_data/at24.h +++ b/include/linux/platform_data/at24.h @@ -50,6 +50,8 @@ struct at24_platform_data { #define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ #define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ #define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ +#define AT24_FLAG_NO_RDROL BIT(1) /* does not auto-rollover reads to */ + /* the next slave address */ void (*setup)(struct nvmem_device *nvmem, void *context); void *context; diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h index 89fd34727a24..98967df07468 100644 --- a/include/linux/platform_data/i2c-davinci.h +++ b/include/linux/platform_data/i2c-davinci.h @@ -16,9 +16,8 @@ struct davinci_i2c_platform_data { unsigned int bus_freq; /* standard bus frequency (kHz) */ unsigned int bus_delay; /* post-transaction delay (usec) */ - unsigned int sda_pin; /* GPIO pin ID to use for SDA */ - unsigned int scl_pin; /* GPIO pin ID to use for SCL */ - bool has_pfunc; /*chip has a ICPFUNC register */ + bool gpio_recovery; /* Use GPIO recovery method */ + bool has_pfunc; /* Chip has a ICPFUNC register */ }; /* for board setup code */ diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/platform_data/i2c-pxa.h index 53aab243cbd8..5236f216dfae 100644 --- a/include/linux/i2c/pxa-i2c.h +++ b/include/linux/platform_data/i2c-pxa.h @@ -71,15 +71,4 @@ struct i2c_pxa_platform_data { unsigned char master_code; unsigned long rate; }; - -extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info); - -#ifdef CONFIG_PXA27x -extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info); -#endif - -#ifdef CONFIG_PXA3xx -extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info); -#endif - #endif diff --git a/include/linux/platform_data/mms114.h b/include/linux/platform_data/mms114.h deleted file mode 100644 index 5722ebfb2738..000000000000 --- a/include/linux/platform_data/mms114.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (C) 2012 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim <jy0922.shim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundationr - */ - -#ifndef __LINUX_MMS114_H -#define __LINUX_MMS114_H - -struct mms114_platform_data { - unsigned int x_size; - unsigned int y_size; - unsigned int contact_threshold; - unsigned int moving_threshold; - bool x_invert; - bool y_invert; - - void (*cfg_pin)(bool); -}; - -#endif /* __LINUX_MMS114_H */ diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h index 818c5c6e203f..c71a2dd66143 100644 --- a/include/linux/platform_data/si5351.h +++ b/include/linux/platform_data/si5351.h @@ -86,6 +86,7 @@ enum si5351_disable_state { * @multisynth_src: multisynth source clock * @clkout_src: clkout source clock * @pll_master: if true, clkout can also change pll rate + * @pll_reset: if true, clkout can reset its pll * @drive: output drive strength * @rate: initial clkout rate, or default if 0 */ @@ -95,6 +96,7 @@ struct si5351_clkout_config { enum si5351_drive_strength drive; enum si5351_disable_state disable_state; bool pll_master; + bool pll_reset; unsigned long rate; }; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h new file mode 100644 index 000000000000..1be356330b96 --- /dev/null +++ b/include/linux/platform_data/ti-sysc.h @@ -0,0 +1,86 @@ +#ifndef __TI_SYSC_DATA_H__ +#define __TI_SYSC_DATA_H__ + +enum ti_sysc_module_type { + TI_SYSC_OMAP2, + TI_SYSC_OMAP2_TIMER, + TI_SYSC_OMAP3_SHAM, + TI_SYSC_OMAP3_AES, + TI_SYSC_OMAP4, + TI_SYSC_OMAP4_TIMER, + TI_SYSC_OMAP4_SIMPLE, + TI_SYSC_OMAP34XX_SR, + TI_SYSC_OMAP36XX_SR, + TI_SYSC_OMAP4_SR, + TI_SYSC_OMAP4_MCASP, + TI_SYSC_OMAP4_USB_HOST_FS, +}; + +/** + * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets + * @midle_shift: Offset of the midle bit + * @clkact_shift: Offset of the clockactivity bit + * @sidle_shift: Offset of the sidle bit + * @enwkup_shift: Offset of the enawakeup bit + * @srst_shift: Offset of the softreset bit + * @autoidle_shift: Offset of the autoidle bit + * @dmadisable_shift: Offset of the dmadisable bit + * @emufree_shift; Offset of the emufree bit + * + * Note that 0 is a valid shift, and for ti-sysc.c -ENODEV can be used if a + * feature is not available. + */ +struct sysc_regbits { + s8 midle_shift; + s8 clkact_shift; + s8 sidle_shift; + s8 enwkup_shift; + s8 srst_shift; + s8 autoidle_shift; + s8 dmadisable_shift; + s8 emufree_shift; +}; + +#define SYSC_QUIRK_RESET_STATUS BIT(7) +#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) +#define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5) +#define SYSC_QUIRK_OPT_CLKS_NEEDED BIT(4) +#define SYSC_QUIRK_OPT_CLKS_IN_RESET BIT(3) +#define SYSC_QUIRK_16BIT BIT(2) +#define SYSC_QUIRK_UNCACHED BIT(1) +#define SYSC_QUIRK_USE_CLOCKACT BIT(0) + +#define SYSC_NR_IDLEMODES 4 + +/** + * struct sysc_capabilities - capabilities for an interconnect target module + * + * @sysc_mask: bitmask of supported SYSCONFIG register bits + * @regbits: bitmask of SYSCONFIG register bits + * @mod_quirks: bitmask of module specific quirks + */ +struct sysc_capabilities { + const enum ti_sysc_module_type type; + const u32 sysc_mask; + const struct sysc_regbits *regbits; + const u32 mod_quirks; +}; + +/** + * struct sysc_config - configuration for an interconnect target module + * @sysc_val: configured value for sysc register + * @midlemodes: bitmask of supported master idle modes + * @sidlemodes: bitmask of supported master idle modes + * @srst_udelay: optional delay needed after OCP soft reset + * @quirks: bitmask of enabled quirks + */ +struct sysc_config { + u32 sysc_val; + u32 syss_mask; + u8 midlemodes; + u8 sidlemodes; + u8 srst_udelay; + u32 quirks; +}; + +#endif /* __TI_SYSC_DATA_H__ */ diff --git a/include/linux/poll.h b/include/linux/poll.h index d384f12abdd5..04781a753326 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -37,7 +37,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_ */ typedef struct poll_table_struct { poll_queue_proc _qproc; - unsigned long _key; + __poll_t _key; } poll_table; static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) @@ -62,20 +62,20 @@ static inline bool poll_does_not_wait(const poll_table *p) * to be started implicitly on poll(). You typically only want to do that * if the application is actually polling for POLLIN and/or POLLOUT. */ -static inline unsigned long poll_requested_events(const poll_table *p) +static inline __poll_t poll_requested_events(const poll_table *p) { - return p ? p->_key : ~0UL; + return p ? p->_key : ~(__poll_t)0; } static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) { pt->_qproc = qproc; - pt->_key = ~0UL; /* all events enabled */ + pt->_key = ~(__poll_t)0; /* all events enabled */ } struct poll_table_entry { struct file *filp; - unsigned long key; + __poll_t key; wait_queue_entry_t wait; wait_queue_head_t *wait_address; }; diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 38d8225510f1..3a3bc71017d5 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -68,7 +68,7 @@ struct posix_clock_operations { int (*open) (struct posix_clock *pc, fmode_t f_mode); - uint (*poll) (struct posix_clock *pc, + __poll_t (*poll) (struct posix_clock *pc, struct file *file, poll_table *wait); int (*release) (struct posix_clock *pc); diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index b2b7255ec7f5..540595a321a7 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -12,6 +12,7 @@ #include <linux/bug.h> #include <linux/slab.h> #include <linux/rcupdate.h> +#include <linux/refcount.h> #include <uapi/linux/posix_acl.h> struct posix_acl_entry { @@ -24,7 +25,7 @@ struct posix_acl_entry { }; struct posix_acl { - atomic_t a_refcount; + refcount_t a_refcount; struct rcu_head a_rcu; unsigned int a_count; struct posix_acl_entry a_entries[0]; @@ -41,7 +42,7 @@ static inline struct posix_acl * posix_acl_dup(struct posix_acl *acl) { if (acl) - atomic_inc(&acl->a_refcount); + refcount_inc(&acl->a_refcount); return acl; } @@ -51,7 +52,7 @@ posix_acl_dup(struct posix_acl *acl) static inline void posix_acl_release(struct posix_acl *acl) { - if (acl && atomic_dec_and_test(&acl->a_refcount)) + if (acl && refcount_dec_and_test(&acl->a_refcount)) kfree_rcu(acl, a_rcu); } diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index e6187f524f2c..01fbf1b16258 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -16,6 +16,7 @@ enum bq27xxx_chip { BQ27520G2, /* bq27520G2 */ BQ27520G3, /* bq27520G3 */ BQ27520G4, /* bq27520G4 */ + BQ27521, /* bq27521 */ BQ27530, /* bq27530, bq27531 */ BQ27531, BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 2ff18c9840a7..d31cb6215905 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd); #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) extern void *ns_get_path(struct path *path, struct task_struct *task, const struct proc_ns_operations *ns_ops); +typedef struct ns_common *ns_get_path_helper_t(void *); +extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, + void *private_data); extern int ns_get_name(char *buf, size_t size, struct task_struct *task, const struct proc_ns_operations *ns_ops); diff --git a/include/linux/property.h b/include/linux/property.h index f6189a3ac63c..769d372c1edf 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -83,11 +83,17 @@ struct fwnode_handle *fwnode_get_next_parent( struct fwnode_handle *fwnode); struct fwnode_handle *fwnode_get_next_child_node( const struct fwnode_handle *fwnode, struct fwnode_handle *child); +struct fwnode_handle *fwnode_get_next_available_child_node( + const struct fwnode_handle *fwnode, struct fwnode_handle *child); #define fwnode_for_each_child_node(fwnode, child) \ for (child = fwnode_get_next_child_node(fwnode, NULL); child; \ child = fwnode_get_next_child_node(fwnode, child)) +#define fwnode_for_each_available_child_node(fwnode, child) \ + for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ + child = fwnode_get_next_available_child_node(fwnode, child)) + struct fwnode_handle *device_get_next_child_node( struct device *dev, struct fwnode_handle *child); @@ -103,6 +109,8 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev, struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); void fwnode_handle_put(struct fwnode_handle *fwnode); +int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index); + unsigned int device_get_child_node_count(struct device *dev); static inline bool device_property_read_bool(struct device *dev, @@ -206,7 +214,7 @@ struct property_entry { */ #define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ -{ \ +(struct property_entry) { \ .name = _name_, \ .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ .is_array = true, \ @@ -224,7 +232,7 @@ struct property_entry { PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ -{ \ +(struct property_entry) { \ .name = _name_, \ .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ .is_array = true, \ @@ -233,7 +241,7 @@ struct property_entry { } #define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ -{ \ +(struct property_entry) { \ .name = _name_, \ .length = sizeof(_type_), \ .is_string = false, \ @@ -250,7 +258,7 @@ struct property_entry { PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) #define PROPERTY_ENTRY_STRING(_name_, _val_) \ -{ \ +(struct property_entry) { \ .name = _name_, \ .length = sizeof(_val_), \ .is_string = true, \ @@ -258,7 +266,7 @@ struct property_entry { } #define PROPERTY_ENTRY_BOOL(_name_) \ -{ \ +(struct property_entry) { \ .name = _name_, \ } @@ -275,10 +283,15 @@ bool device_dma_supported(struct device *dev); enum dev_dma_attr device_get_dma_attr(struct device *dev); +void *device_get_match_data(struct device *dev); + int device_get_phy_mode(struct device *dev); void *device_get_mac_address(struct device *dev, char *addr, int alen); +int fwnode_get_phy_mode(struct fwnode_handle *fwnode); +void *fwnode_get_mac_address(struct fwnode_handle *fwnode, + char *addr, int alen); struct fwnode_handle *fwnode_graph_get_next_endpoint( const struct fwnode_handle *fwnode, struct fwnode_handle *prev); struct fwnode_handle * diff --git a/include/linux/psci.h b/include/linux/psci.h index bdea1cb5e1db..f724fd8c78e8 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -26,6 +26,7 @@ int psci_cpu_init_idle(unsigned int cpu); int psci_cpu_suspend_enter(unsigned long index); struct psci_operations { + u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); int (*cpu_off)(u32 state); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); @@ -46,10 +47,11 @@ static inline int psci_dt_init(void) { return 0; } #if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI) int __init psci_acpi_init(void); bool __init acpi_psci_present(void); -bool __init acpi_psci_use_hvc(void); +bool acpi_psci_use_hvc(void); #else static inline int psci_acpi_init(void) { return 0; } static inline bool acpi_psci_present(void) { return false; } +static inline bool acpi_psci_use_hvc(void) {return false; } #endif #endif /* __LINUX_PSCI_H */ diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index d72b2e7dd500..1883d6137e9b 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -45,9 +45,10 @@ struct ptr_ring { }; /* Note: callers invoking this in a loop must use a compiler barrier, - * for example cpu_relax(). If ring is ever resized, callers must hold - * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold - * producer_lock, the next call to __ptr_ring_produce may fail. + * for example cpu_relax(). + * + * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: + * see e.g. ptr_ring_full. */ static inline bool __ptr_ring_full(struct ptr_ring *r) { @@ -113,7 +114,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ smp_wmb(); - r->queue[r->producer++] = ptr; + WRITE_ONCE(r->queue[r->producer++], ptr); if (unlikely(r->producer >= r->size)) r->producer = 0; return 0; @@ -169,32 +170,36 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) return ret; } -/* Note: callers invoking this in a loop must use a compiler barrier, - * for example cpu_relax(). Callers must take consumer_lock - * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. - * If ring is never resized, and if the pointer is merely - * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. - * However, if called outside the lock, and if some other CPU - * consumes ring entries at the same time, the value returned - * is not guaranteed to be correct. - * In this case - to avoid incorrectly detecting the ring - * as empty - the CPU consuming the ring entries is responsible - * for either consuming all ring entries until the ring is empty, - * or synchronizing with some other CPU and causing it to - * execute __ptr_ring_peek and/or consume the ring enteries - * after the synchronization point. - */ static inline void *__ptr_ring_peek(struct ptr_ring *r) { if (likely(r->size)) - return r->queue[r->consumer_head]; + return READ_ONCE(r->queue[r->consumer_head]); return NULL; } -/* See __ptr_ring_peek above for locking rules. */ +/* + * Test ring empty status without taking any locks. + * + * NB: This is only safe to call if ring is never resized. + * + * However, if some other CPU consumes ring entries at the same time, the value + * returned is not guaranteed to be correct. + * + * In this case - to avoid incorrectly detecting the ring + * as empty - the CPU consuming the ring entries is responsible + * for either consuming all ring entries until the ring is empty, + * or synchronizing with some other CPU and causing it to + * re-test __ptr_ring_empty and/or consume the ring enteries + * after the synchronization point. + * + * Note: callers invoking this in a loop must use a compiler barrier, + * for example cpu_relax(). + */ static inline bool __ptr_ring_empty(struct ptr_ring *r) { - return !__ptr_ring_peek(r); + if (likely(r->size)) + return !r->queue[READ_ONCE(r->consumer_head)]; + return true; } static inline bool ptr_ring_empty(struct ptr_ring *r) @@ -248,22 +253,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) /* Fundamentally, what we want to do is update consumer * index and zero out the entry so producer can reuse it. * Doing it naively at each consume would be as simple as: - * r->queue[r->consumer++] = NULL; - * if (unlikely(r->consumer >= r->size)) - * r->consumer = 0; + * consumer = r->consumer; + * r->queue[consumer++] = NULL; + * if (unlikely(consumer >= r->size)) + * consumer = 0; + * r->consumer = consumer; * but that is suboptimal when the ring is full as producer is writing * out new entries in the same cache line. Defer these updates until a * batch of entries has been consumed. */ - int head = r->consumer_head++; + /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty + * to work correctly. + */ + int consumer_head = r->consumer_head; + int head = consumer_head++; /* Once we have processed enough entries invalidate them in * the ring all at once so producer can reuse their space in the ring. * We also do this when we reach end of the ring - not mandatory * but helps keep the implementation simple. */ - if (unlikely(r->consumer_head - r->consumer_tail >= r->batch || - r->consumer_head >= r->size)) { + if (unlikely(consumer_head - r->consumer_tail >= r->batch || + consumer_head >= r->size)) { /* Zero out entries in the reverse order: this way we touch the * cache line that producer might currently be reading the last; * producer won't make progress and touch other cache lines @@ -271,12 +282,14 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) */ while (likely(head >= r->consumer_tail)) r->queue[head--] = NULL; - r->consumer_tail = r->consumer_head; + r->consumer_tail = consumer_head; } - if (unlikely(r->consumer_head >= r->size)) { - r->consumer_head = 0; + if (unlikely(consumer_head >= r->size)) { + consumer_head = 0; r->consumer_tail = 0; } + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, consumer_head); } static inline void *__ptr_ring_consume(struct ptr_ring *r) @@ -527,7 +540,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, goto done; } r->queue[head] = batch[--n]; - r->consumer_tail = r->consumer_head = head; + r->consumer_tail = head; + /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ + WRITE_ONCE(r->consumer_head, head); } done: diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 1fd27d68926b..b401b962afff 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -13,6 +13,9 @@ #ifndef __QCOM_SCM_H #define __QCOM_SCM_H +#include <linux/types.h> +#include <linux/cpumask.h> + #define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 39e2a2ac2471..2b3b350e07b7 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -32,14 +32,15 @@ #ifndef _COMMON_HSI_H #define _COMMON_HSI_H + #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/slab.h> /* dma_addr_t manip */ -#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) -#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) +#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) +#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_REGPAIR_LE(x, val) do { \ @@ -47,39 +48,45 @@ (x).lo = DMA_LO_LE((val)); \ } while (0) -#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) -#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) -#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_64(hi, lo) \ + HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64) +#define HILO_64_REGPAIR(regpair) ({ \ + typeof(regpair) __regpair = (regpair); \ + HILO_64(__regpair.hi, __regpair.lo); }) #define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) #ifndef __COMMON_HSI__ #define __COMMON_HSI__ +/********************************/ +/* PROTOCOL COMMON FW CONSTANTS */ +/********************************/ -#define X_FINAL_CLEANUP_AGG_INT 1 +#define X_FINAL_CLEANUP_AGG_INT 1 -#define EVENT_RING_PAGE_SIZE_BYTES 4096 +#define EVENT_RING_PAGE_SIZE_BYTES 4096 -#define NUM_OF_GLOBAL_QUEUES 128 -#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 +#define NUM_OF_GLOBAL_QUEUES 128 +#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 -#define ISCSI_CDU_TASK_SEG_TYPE 0 -#define FCOE_CDU_TASK_SEG_TYPE 0 -#define RDMA_CDU_TASK_SEG_TYPE 1 +#define ISCSI_CDU_TASK_SEG_TYPE 0 +#define FCOE_CDU_TASK_SEG_TYPE 0 +#define RDMA_CDU_TASK_SEG_TYPE 1 -#define FW_ASSERT_GENERAL_ATTN_IDX 32 +#define FW_ASSERT_GENERAL_ATTN_IDX 32 -#define MAX_PINNED_CCFC 32 +#define MAX_PINNED_CCFC 32 /* Queue Zone sizes in bytes */ -#define TSTORM_QZONE_SIZE 8 -#define MSTORM_QZONE_SIZE 16 -#define USTORM_QZONE_SIZE 8 -#define XSTORM_QZONE_SIZE 8 -#define YSTORM_QZONE_SIZE 0 -#define PSTORM_QZONE_SIZE 0 - -#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 +#define TSTORM_QZONE_SIZE 8 +#define MSTORM_QZONE_SIZE 16 +#define USTORM_QZONE_SIZE 8 +#define XSTORM_QZONE_SIZE 8 +#define YSTORM_QZONE_SIZE 0 +#define PSTORM_QZONE_SIZE 0 + +#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 @@ -102,8 +109,8 @@ #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 20 -#define FW_REVISION_VERSION 0 +#define FW_MINOR_VERSION 33 +#define FW_REVISION_VERSION 1 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -115,10 +122,10 @@ #define MAX_NUM_PORTS_BB (2) #define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) -#define MAX_NUM_PFS_K2 (16) -#define MAX_NUM_PFS_BB (8) -#define MAX_NUM_PFS (MAX_NUM_PFS_K2) -#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ #define MAX_NUM_VFS_K2 (192) #define MAX_NUM_VFS_BB (120) @@ -141,29 +148,14 @@ /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ #define NUM_PHYS_TCS_4PORT_K2 (4) #define NUM_OF_PHYS_TCS (8) - +#define PURE_LB_TC NUM_OF_PHYS_TCS #define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) -#define LB_TC (NUM_OF_PHYS_TCS) - -/* Num of possible traffic priority values */ -#define NUM_OF_PRIO (8) - -#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) -#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) -#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) -#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) - /* CIDs */ -#define NUM_OF_CONNECTION_TYPES (8) -#define NUM_OF_LCIDS (320) -#define NUM_OF_LTIDS (320) - -/* Clock values */ -#define MASTER_CLK_FREQ_E4 (375e6) -#define STORM_CLK_FREQ_E4 (1000e6) -#define CLK25M_CLK_FREQ_E4 (25e6) +#define NUM_OF_CONNECTION_TYPES_E4 (8) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) /* Global PXP windows (GTT) */ #define NUM_OF_GTT 19 @@ -172,17 +164,17 @@ #define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) /* Tools Version */ -#define TOOLS_VERSION 10 +#define TOOLS_VERSION 10 /*****************/ /* CDU CONSTANTS */ /*****************/ -#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) -#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) -#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) -#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) @@ -201,45 +193,45 @@ #define DQ_DEMS_TOE_LOCAL_ADV_WND 4 #define DQ_DEMS_ROCE_CQ_CONS 7 -/* XCM agg val selection */ -#define DQ_XCM_AGG_VAL_SEL_WORD2 0 -#define DQ_XCM_AGG_VAL_SEL_WORD3 1 -#define DQ_XCM_AGG_VAL_SEL_WORD4 2 -#define DQ_XCM_AGG_VAL_SEL_WORD5 3 -#define DQ_XCM_AGG_VAL_SEL_REG3 4 -#define DQ_XCM_AGG_VAL_SEL_REG4 5 -#define DQ_XCM_AGG_VAL_SEL_REG5 6 -#define DQ_XCM_AGG_VAL_SEL_REG6 7 - -/* XCM agg val selection */ -#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 -#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 -#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 -#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 -#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 -#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 -#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 +/* XCM agg val selection (HW) */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection (FW) */ +#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 +#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 /* UCM agg val selection (HW) */ #define DQ_UCM_AGG_VAL_SEL_WORD0 0 #define DQ_UCM_AGG_VAL_SEL_WORD1 1 #define DQ_UCM_AGG_VAL_SEL_WORD2 2 #define DQ_UCM_AGG_VAL_SEL_WORD3 3 -#define DQ_UCM_AGG_VAL_SEL_REG0 4 -#define DQ_UCM_AGG_VAL_SEL_REG1 5 -#define DQ_UCM_AGG_VAL_SEL_REG2 6 -#define DQ_UCM_AGG_VAL_SEL_REG3 7 +#define DQ_UCM_AGG_VAL_SEL_REG0 4 +#define DQ_UCM_AGG_VAL_SEL_REG1 5 +#define DQ_UCM_AGG_VAL_SEL_REG2 6 +#define DQ_UCM_AGG_VAL_SEL_REG3 7 /* UCM agg val selection (FW) */ #define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 @@ -263,7 +255,7 @@ #define DQ_TCM_ROCE_RQ_PROD_CMD \ DQ_TCM_AGG_VAL_SEL_WORD0 -/* XCM agg counter flag selection */ +/* XCM agg counter flag selection (HW) */ #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 #define DQ_XCM_AGG_FLG_SHIFT_CF12 2 @@ -273,20 +265,20 @@ #define DQ_XCM_AGG_FLG_SHIFT_CF22 6 #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 -/* XCM agg counter flag selection */ -#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) -#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) -#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +/* XCM agg counter flag selection (FW) */ +#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) /* UCM agg counter flag selection (HW) */ #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 @@ -317,9 +309,9 @@ #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 /* TCM agg counter flag selection (FW) */ -#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) -#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) -#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) +#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) @@ -327,18 +319,18 @@ #define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) /* PWM address mapping */ -#define DQ_PWM_OFFSET_DPM_BASE 0x0 -#define DQ_PWM_OFFSET_DPM_END 0x27 +#define DQ_PWM_OFFSET_DPM_BASE 0x0 +#define DQ_PWM_OFFSET_DPM_END 0x27 #define DQ_PWM_OFFSET_XCM16_BASE 0x40 #define DQ_PWM_OFFSET_XCM32_BASE 0x44 #define DQ_PWM_OFFSET_UCM16_BASE 0x48 #define DQ_PWM_OFFSET_UCM32_BASE 0x4C -#define DQ_PWM_OFFSET_UCM16_4 0x50 +#define DQ_PWM_OFFSET_UCM16_4 0x50 #define DQ_PWM_OFFSET_TCM16_BASE 0x58 #define DQ_PWM_OFFSET_TCM32_BASE 0x5C -#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 -#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 -#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B +#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 +#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 +#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) @@ -347,10 +339,11 @@ #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) -#define DQ_REGION_SHIFT (12) + +#define DQ_REGION_SHIFT (12) /* DPM */ -#define DQ_DPM_WQE_BUFF_SIZE (320) +#define DQ_DPM_WQE_BUFF_SIZE (320) /* Conn type ranges */ #define DQ_CONN_TYPE_RANGE_SHIFT (4) @@ -359,29 +352,30 @@ /* QM CONSTANTS */ /*****************/ -/* number of TX queues in the QM */ +/* Number of TX queues in the QM */ #define MAX_QM_TX_QUEUES_K2 512 #define MAX_QM_TX_QUEUES_BB 448 #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 -/* number of Other queues in the QM */ +/* Number of Other queues in the QM */ #define MAX_QM_OTHER_QUEUES_BB 64 #define MAX_QM_OTHER_QUEUES_K2 128 #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 -/* number of queues in a PF queue group */ +/* Number of queues in a PF queue group */ #define QM_PF_QUEUE_GROUP_SIZE 8 -/* the size of a single queue element in bytes */ -#define QM_PQ_ELEMENT_SIZE 4 +/* The size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 -/* base number of Tx PQs in the CM PQ representation. - * should be used when storing PQ IDs in CM PQ registers and context +/* Base number of Tx PQs in the CM PQ representation. + * Should be used when storing PQ IDs in CM PQ registers and context. */ -#define CM_TX_PQ_BASE 0x200 +#define CM_TX_PQ_BASE 0x200 -/* number of global Vport/QCN rate limiters */ +/* Number of global Vport/QCN rate limiters */ #define MAX_QM_GLOBAL_RLS 256 + /* QM registers data */ #define QM_LINE_CRD_REG_WIDTH 16 #define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) @@ -400,7 +394,7 @@ #define CAU_FSM_ETH_TX 1 /* Number of Protocol Indices per Status Block */ -#define PIS_PER_SB 12 +#define PIS_PER_SB_E4 12 #define CAU_HC_STOPPED_STATE 3 #define CAU_HC_DISABLE_STATE 4 @@ -432,8 +426,7 @@ #define IGU_CMD_INT_ACK_BASE 0x0400 #define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ - MAX_TOT_SB_PER_PATH - \ - 1) + MAX_TOT_SB_PER_PATH - 1) #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 @@ -447,8 +440,7 @@ #define IGU_CMD_PROD_UPD_BASE 0x0600 #define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ - MAX_TOT_SB_PER_PATH - \ - 1) + MAX_TOT_SB_PER_PATH - 1) #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff /*****************/ @@ -514,129 +506,126 @@ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) /* PF BAR */ -#define PXP_BAR0_START_GRC 0x0000 -#define PXP_BAR0_GRC_LENGTH 0x1C00000 -#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ - PXP_BAR0_GRC_LENGTH - 1) - -#define PXP_BAR0_START_IGU 0x1C00000 -#define PXP_BAR0_IGU_LENGTH 0x10000 -#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ - PXP_BAR0_IGU_LENGTH - 1) - -#define PXP_BAR0_START_TSDM 0x1C80000 -#define PXP_BAR0_SDM_LENGTH 0x40000 +#define PXP_BAR0_START_GRC 0x0000 +#define PXP_BAR0_GRC_LENGTH 0x1C00000 +#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ + PXP_BAR0_GRC_LENGTH - 1) + +#define PXP_BAR0_START_IGU 0x1C00000 +#define PXP_BAR0_IGU_LENGTH 0x10000 +#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ + PXP_BAR0_IGU_LENGTH - 1) + +#define PXP_BAR0_START_TSDM 0x1C80000 +#define PXP_BAR0_SDM_LENGTH 0x40000 #define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 -#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_MSDM 0x1D00000 -#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_MSDM 0x1D00000 +#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_USDM 0x1D80000 -#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_USDM 0x1D80000 +#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_XSDM 0x1E00000 -#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_XSDM 0x1E00000 +#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_YSDM 0x1E80000 -#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_YSDM 0x1E80000 +#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_PSDM 0x1F00000 -#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_PSDM 0x1F00000 +#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ + PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) /* VF BAR */ -#define PXP_VF_BAR0 0 - -#define PXP_VF_BAR0_START_GRC 0x3E00 -#define PXP_VF_BAR0_GRC_LENGTH 0x200 -#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ - PXP_VF_BAR0_GRC_LENGTH - 1) - -#define PXP_VF_BAR0_START_IGU 0 -#define PXP_VF_BAR0_IGU_LENGTH 0x3000 -#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ - PXP_VF_BAR0_IGU_LENGTH - 1) - -#define PXP_VF_BAR0_START_DQ 0x3000 -#define PXP_VF_BAR0_DQ_LENGTH 0x200 -#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 -#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ - PXP_VF_BAR0_DQ_OPAQUE_OFFSET) -#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ - + 4) -#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ - PXP_VF_BAR0_DQ_LENGTH - 1) - -#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 -#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 -#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 -#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 -#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 -#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 -#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 -#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 -#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 - -#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 - -#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 -#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 +#define PXP_VF_BAR0 0 + +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + + 4) +#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_GRC 0x3E00 +#define PXP_VF_BAR0_GRC_LENGTH 0x200 +#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ + PXP_VF_BAR0_GRC_LENGTH - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_START_IGU2 0x10000 +#define PXP_VF_BAR0_IGU2_LENGTH 0xD000 +#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \ + PXP_VF_BAR0_IGU2_LENGTH - 1) + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) -#define PXP_QUEUES_ZONE_MAX_NUM 320 + +/* Host Interface */ +#define PXP_QUEUES_ZONE_MAX_NUM 320 + /*****************/ /* PRM CONSTANTS */ /*****************/ -#define PRM_DMA_PAD_BYTES_NUM 2 +#define PRM_DMA_PAD_BYTES_NUM 2 + /*****************/ /* SDMs CONSTANTS */ /*****************/ -#define SDM_OP_GEN_TRIG_NONE 0 -#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 -#define SDM_OP_GEN_TRIG_AGG_INT 2 -#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 #define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 #define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 @@ -644,26 +633,26 @@ /* Completion types */ /********************/ -#define SDM_COMP_TYPE_NONE 0 -#define SDM_COMP_TYPE_WAKE_THREAD 1 -#define SDM_COMP_TYPE_AGG_INT 2 -#define SDM_COMP_TYPE_CM 3 -#define SDM_COMP_TYPE_LOADER 4 -#define SDM_COMP_TYPE_PXP 5 -#define SDM_COMP_TYPE_INDICATE_ERROR 6 -#define SDM_COMP_TYPE_RELEASE_THREAD 7 -#define SDM_COMP_TYPE_RAM 8 -#define SDM_COMP_TYPE_INC_ORDER_CNT 9 +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +#define SDM_COMP_TYPE_PXP 5 +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +#define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /*****************/ -/* PBF Constants */ +/* PBF CONSTANTS */ /*****************/ /* Number of PBF command queue lines. Each line is 32B. */ -#define PBF_MAX_CMD_LINES 3328 +#define PBF_MAX_CMD_LINES 3328 /* Number of BTB blocks. Each block is 256B. */ -#define BTB_MAX_BLOCKS 1440 +#define BTB_MAX_BLOCKS 1440 /*****************/ /* PRS CONSTANTS */ @@ -671,14 +660,7 @@ #define PRS_GFT_CAM_LINES_NO_MATCH 31 -/* Async data KCQ CQE */ -struct async_data { - __le32 cid; - __le16 itid; - u8 error_code; - u8 fw_debug_param; -}; - +/* Interrupt coalescing TimeSet */ struct coalescing_timeset { u8 value; #define COALESCING_TIMESET_TIMESET_MASK 0x7F @@ -692,23 +674,32 @@ struct common_queue_zone { __le16 reserved; }; +/* ETH Rx producers data */ struct eth_rx_prod_data { __le16 bd_prod; __le16 cqe_prod; }; -struct regpair { - __le32 lo; - __le32 hi; +struct tcp_ulp_connect_done_params { + __le16 mss; + u8 snd_wnd_scale; + u8 flags; +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1 +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0 +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1 }; -struct vf_pf_channel_eqe_data { - struct regpair msg_addr; +struct iscsi_connect_done_results { + __le16 icid; + __le16 conn_id; + struct tcp_ulp_connect_done_params params; }; struct iscsi_eqe_data { - __le32 cid; + __le16 icid; __le16 conn_id; + __le16 reserved; u8 error_code; u8 error_pdu_opcode_reserved; #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F @@ -719,52 +710,6 @@ struct iscsi_eqe_data { #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 }; -struct rdma_eqe_destroy_qp { - __le32 cid; - u8 reserved[4]; -}; - -union rdma_eqe_data { - struct regpair async_handle; - struct rdma_eqe_destroy_qp rdma_destroy_qp_data; -}; - -struct malicious_vf_eqe_data { - u8 vf_id; - u8 err_id; - __le16 reserved[3]; -}; - -struct initial_cleanup_eqe_data { - u8 vf_id; - u8 reserved[7]; -}; - -/* Event Data Union */ -union event_ring_data { - u8 bytes[8]; - struct vf_pf_channel_eqe_data vf_pf_channel; - struct iscsi_eqe_data iscsi_info; - union rdma_eqe_data rdma_data; - struct malicious_vf_eqe_data malicious_vf; - struct initial_cleanup_eqe_data vf_init_cleanup; -}; - -/* Event Ring Entry */ -struct event_ring_entry { - u8 protocol_id; - u8 opcode; - __le16 reserved0; - __le16 echo; - u8 fw_return_code; - u8 flags; -#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 -#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 -#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F -#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 - union event_ring_data data; -}; - /* Multi function mode */ enum mf_mode { ERROR_MODE /* Unsupported mode */, @@ -781,13 +726,31 @@ enum protocol_type { PROTOCOLID_CORE, PROTOCOLID_ETH, PROTOCOLID_IWARP, - PROTOCOLID_RESERVED5, + PROTOCOLID_RESERVED0, PROTOCOLID_PREROCE, PROTOCOLID_COMMON, - PROTOCOLID_RESERVED6, + PROTOCOLID_RESERVED1, MAX_PROTOCOL_TYPE }; +struct regpair { + __le32 lo; + __le32 hi; +}; + +/* RoCE Destroy Event Data */ +struct rdma_eqe_destroy_qp { + __le32 cid; + u8 reserved[4]; +}; + +/* RDMA Event Data Union */ +union rdma_eqe_data { + struct regpair async_handle; + struct rdma_eqe_destroy_qp rdma_destroy_qp_data; +}; + +/* Ustorm Queue Zone */ struct ustorm_eth_queue_zone { struct coalescing_timeset int_coalescing_timeset; u8 reserved[3]; @@ -798,62 +761,71 @@ struct ustorm_queue_zone { struct common_queue_zone common; }; -/* status block structure */ +/* Status block structure */ struct cau_pi_entry { - u32 prod; -#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF -#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 -#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F -#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 -#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 -#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 -#define CAU_PI_ENTRY_RESERVED_MASK 0xFF -#define CAU_PI_ENTRY_RESERVED_SHIFT 24 + __le32 prod; +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 }; -/* status block structure */ +/* Status block structure */ struct cau_sb_entry { - u32 data; -#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF -#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 -#define CAU_SB_ENTRY_STATE0_MASK 0xF -#define CAU_SB_ENTRY_STATE0_SHIFT 24 -#define CAU_SB_ENTRY_STATE1_MASK 0xF -#define CAU_SB_ENTRY_STATE1_SHIFT 28 - u32 params; -#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F -#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 -#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F -#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 -#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 -#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 -#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 -#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 -#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF -#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 -#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 -#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 -#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF -#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 -#define CAU_SB_ENTRY_TPH_MASK 0x1 -#define CAU_SB_ENTRY_TPH_SHIFT 31 + __le32 data; +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + __le32 params; +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 }; -/* core doorbell data */ +/* Igu cleanup bit values to distinguish between clean or producer consumer + * update. + */ +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT +}; + +/* Core doorbell data */ struct core_db_data { u8 params; -#define CORE_DB_DATA_DEST_MASK 0x3 -#define CORE_DB_DATA_DEST_SHIFT 0 -#define CORE_DB_DATA_AGG_CMD_MASK 0x3 -#define CORE_DB_DATA_AGG_CMD_SHIFT 2 -#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 -#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 -#define CORE_DB_DATA_RESERVED_MASK 0x1 -#define CORE_DB_DATA_RESERVED_SHIFT 5 -#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 spq_prod; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; }; /* Enum of doorbell aggregative command selection */ @@ -909,67 +881,69 @@ struct db_l2_dpm_sge { struct regpair addr; __le16 nbytes; __le16 bitfields; -#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF -#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 -#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 -#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 -#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 -#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 -#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF -#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 +#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF +#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 +#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 +#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 +#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 +#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 +#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF +#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 __le32 reserved2; }; /* Structure for doorbell address, in legacy mode */ struct db_legacy_addr { __le32 addr; -#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 -#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 -#define DB_LEGACY_ADDR_DEMS_MASK 0x7 -#define DB_LEGACY_ADDR_DEMS_SHIFT 2 -#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF -#define DB_LEGACY_ADDR_ICID_SHIFT 5 +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 }; /* Structure for doorbell address, in PWM mode */ struct db_pwm_addr { __le32 addr; #define DB_PWM_ADDR_RESERVED0_MASK 0x7 -#define DB_PWM_ADDR_RESERVED0_SHIFT 0 -#define DB_PWM_ADDR_OFFSET_MASK 0x7F +#define DB_PWM_ADDR_RESERVED0_SHIFT 0 +#define DB_PWM_ADDR_OFFSET_MASK 0x7F #define DB_PWM_ADDR_OFFSET_SHIFT 3 -#define DB_PWM_ADDR_WID_MASK 0x3 -#define DB_PWM_ADDR_WID_SHIFT 10 -#define DB_PWM_ADDR_DPI_MASK 0xFFFF -#define DB_PWM_ADDR_DPI_SHIFT 12 +#define DB_PWM_ADDR_WID_MASK 0x3 +#define DB_PWM_ADDR_WID_SHIFT 10 +#define DB_PWM_ADDR_DPI_MASK 0xFFFF +#define DB_PWM_ADDR_DPI_SHIFT 12 #define DB_PWM_ADDR_RESERVED1_MASK 0xF -#define DB_PWM_ADDR_RESERVED1_SHIFT 28 +#define DB_PWM_ADDR_RESERVED1_SHIFT 28 }; -/* Parameters to RoCE firmware, passed in EDPM doorbell */ +/* Parameters to RDMA firmware, passed in EDPM doorbell */ struct db_rdma_dpm_params { __le32 params; -#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F -#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 -#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 -#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 -#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF -#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 -#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF -#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 -#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 -#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; -/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ +/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a + * DPM burst. + */ struct db_rdma_dpm_data { __le16 icid; __le16 prod_val; @@ -987,22 +961,22 @@ enum igu_int_cmd { /* IGU producer or consumer update command */ struct igu_prod_cons_update { - u32 sb_id_and_flags; -#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF -#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 -#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 -#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 -#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 -#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 -#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 -#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 -#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 -#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 - u32 reserved1; + __le32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; }; /* Igu segments access for default status block only */ @@ -1012,38 +986,63 @@ enum igu_seg_access { MAX_IGU_SEG_ACCESS }; +/* Enumeration for L3 type field of parsing_and_err_flags. + * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6 + * (This field can be filled according to the last-ethertype) + */ +enum l3_type { + e_l3_type_unknown, + e_l3_type_ipv4, + e_l3_type_ipv6, + MAX_L3_TYPE +}; + +/* Enumeration for l4Protocol field of parsing_and_err_flags. + * L4-protocol: 0 - none, 1 - TCP, 2 - UDP. + * If the packet is IPv4 fragment, and its not the first fragment, the + * protocol-type should be set to none. + */ +enum l4_protocol { + e_l4_protocol_none, + e_l4_protocol_tcp, + e_l4_protocol_udp, + MAX_L4_PROTOCOL +}; + +/* Parsing and error flags field */ struct parsing_and_err_flags { __le16 flags; -#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 -#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 -#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 -#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 -#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 -#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 -#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 -#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 -#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 -#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 -#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 -#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 -#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 -#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; +/* Parsing error flags bitmap */ struct parsing_err_flags { __le16 flags; #define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 @@ -1080,266 +1079,260 @@ struct parsing_err_flags { #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 }; +/* Pb context */ struct pb_context { __le32 crc[4]; }; +/* Concrete Function ID */ struct pxp_concrete_fid { __le16 fid; -#define PXP_CONCRETE_FID_PFID_MASK 0xF -#define PXP_CONCRETE_FID_PFID_SHIFT 0 -#define PXP_CONCRETE_FID_PORT_MASK 0x3 -#define PXP_CONCRETE_FID_PORT_SHIFT 4 -#define PXP_CONCRETE_FID_PATH_MASK 0x1 -#define PXP_CONCRETE_FID_PATH_SHIFT 6 -#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 -#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 -#define PXP_CONCRETE_FID_VFID_MASK 0xFF -#define PXP_CONCRETE_FID_VFID_SHIFT 8 +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 }; +/* Concrete Function ID */ struct pxp_pretend_concrete_fid { __le16 fid; -#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF -#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 -#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 -#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 -#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 -#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 -#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF -#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 }; +/* Function ID */ union pxp_pretend_fid { struct pxp_pretend_concrete_fid concrete_fid; - __le16 opaque_fid; + __le16 opaque_fid; }; -/* Pxp Pretend Command Register. */ +/* Pxp Pretend Command Register */ struct pxp_pretend_cmd { - union pxp_pretend_fid fid; - __le16 control; -#define PXP_PRETEND_CMD_PATH_MASK 0x1 -#define PXP_PRETEND_CMD_PATH_SHIFT 0 -#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 -#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 -#define PXP_PRETEND_CMD_PORT_MASK 0x3 -#define PXP_PRETEND_CMD_PORT_SHIFT 2 -#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF -#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 -#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF -#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 -#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 -#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 -#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 -#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 -#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 }; -/* PTT Record in PXP Admin Window. */ +/* PTT Record in PXP Admin Window */ struct pxp_ptt_entry { - __le32 offset; -#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF -#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 -#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF -#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 - struct pxp_pretend_cmd pretend; + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; }; -/* VF Zone A Permission Register. */ +/* VF Zone A Permission Register */ struct pxp_vf_zone_a_permission { __le32 control; -#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF -#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 -#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 -#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 -#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F -#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 -#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF -#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 +#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF +#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 +#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 +#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 }; -/* RSS hash type */ +/* Rdif context */ struct rdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; u8 flags0; -#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 -#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 -#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 -#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 -#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 -#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 -#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 -#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 -#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 -#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 -#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 -#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 -#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7 u8 partial_dif_data[7]; __le16 partial_crc_value; __le16 partial_checksum_value; __le32 offset_in_io; __le16 flags1; -#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 -#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 -#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 -#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 -#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 -#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 -#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 -#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 -#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 -#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 -#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 -#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 -#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 -#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 -#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 -#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 +#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 __le16 state; -#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF -#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 -#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF -#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 -#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 -#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 -#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 -#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 -#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF -#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 -#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 -#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0 +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9 +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10 +#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 +#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 __le32 reserved2; }; -/* RSS hash type */ -enum rss_hash_type { - RSS_HASH_TYPE_DEFAULT = 0, - RSS_HASH_TYPE_IPV4 = 1, - RSS_HASH_TYPE_TCP_IPV4 = 2, - RSS_HASH_TYPE_IPV6 = 3, - RSS_HASH_TYPE_TCP_IPV6 = 4, - RSS_HASH_TYPE_UDP_IPV4 = 5, - RSS_HASH_TYPE_UDP_IPV6 = 6, - MAX_RSS_HASH_TYPE -}; - -/* status block structure */ -struct status_block { - __le16 pi_array[PIS_PER_SB]; +/* Status block structure */ +struct status_block_e4 { + __le16 pi_array[PIS_PER_SB_E4]; __le32 sb_num; -#define STATUS_BLOCK_SB_NUM_MASK 0x1FF -#define STATUS_BLOCK_SB_NUM_SHIFT 0 -#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F -#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 -#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF -#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 +#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16 __le32 prod_index; -#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF -#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 -#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF -#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 +#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24 }; +/* Tdif context */ struct tdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; - __le16 partial_crc_valueB; - __le16 partial_checksum_valueB; + __le16 partial_crc_value_b; + __le16 partial_checksum_value_b; __le16 stateB; -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 -#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 -#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 -#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F -#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9 +#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F +#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 u8 reserved1; u8 flags0; -#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 -#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 -#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 -#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 -#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 -#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 -#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 -#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 -#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 -#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 -#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 -#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 __le32 flags1; -#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 -#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 -#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 -#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 -#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 -#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 -#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 -#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 -#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 -#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 -#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 -#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 -#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 -#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 -#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 -#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 -#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF -#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 -#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 -#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 -#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 -#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 - __le32 offset_in_iob; +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23 +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30 +#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 + __le32 offset_in_io_b; __le16 partial_crc_value_a; - __le16 partial_checksum_valuea_; - __le32 offset_in_ioa; + __le16 partial_checksum_value_a; + __le32 offset_in_io_a; u8 partial_dif_data_a[8]; u8 partial_dif_data_b[8]; }; +/* Timers context */ struct timers_context { __le32 logical_client_0; #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF @@ -1385,6 +1378,7 @@ struct timers_context { #define TIMERS_CONTEXT_RESERVED7_SHIFT 29 }; +/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */ enum tunnel_next_protocol { e_unknown = 0, e_l2 = 1, diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index cb06e6e368e1..9db02856623b 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -36,150 +36,168 @@ /********************/ /* ETH FW CONSTANTS */ /********************/ -#define ETH_HSI_VER_MAJOR 3 -#define ETH_HSI_VER_MINOR 10 + +#define ETH_HSI_VER_MAJOR 3 +#define ETH_HSI_VER_MINOR 10 #define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 -#define ETH_CACHE_LINE_SIZE 64 -#define ETH_RX_CQE_GAP 32 -#define ETH_MAX_RAMROD_PER_CON 8 -#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 -#define ETH_RX_NUM_NEXT_PAGE_BDS 2 - -#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 -#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 - -#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 -#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 -#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 -#define ETH_TX_MAX_LSO_HDR_NBD 4 -#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 -#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 -#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 -#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 -#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) -#define ETH_TX_MAX_LSO_HDR_BYTES 510 -#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) -#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 -#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 -#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 -#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF - -#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define ETH_CACHE_LINE_SIZE 64 +#define ETH_RX_CQE_GAP 32 +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 + +#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 +#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 +#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) +#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 +#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 +#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 +#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF + +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ -#define ETH_RX_MAX_BUFF_PER_PKT 5 -#define ETH_RX_BD_THRESHOLD 12 +#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_BD_THRESHOLD 12 -/* num of MAC/VLAN filters */ -#define ETH_NUM_MAC_FILTERS 512 -#define ETH_NUM_VLAN_FILTERS 512 +/* Num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 -/* approx. multicast constants */ -#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 -#define ETH_MULTICAST_MAC_BINS 256 -#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) +/* Approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) -/* ethernet vport update constants */ -#define ETH_FILTER_RULES_COUNT 10 -#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 -#define ETH_RSS_KEY_SIZE_REGS 10 -#define ETH_RSS_ENGINE_NUM_K2 207 -#define ETH_RSS_ENGINE_NUM_BB 127 +/* Ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 /* TPA constants */ -#define ETH_TPA_MAX_AGGS_NUM 64 -#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT -#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 -#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Control frame check constants */ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 +/* GFS constants */ +#define ETH_GFT_TRASH_CAN_VPORT 0x1FF + +/* Destination port mode */ +enum dest_port_mode { + DEST_PORT_PHY, + DEST_PORT_LOOPBACK, + DEST_PORT_PHY_LOOPBACK, + DEST_PORT_DROP, + MAX_DEST_PORT_MODE +}; + +/* Ethernet address type */ +enum eth_addr_type { + BROADCAST_ADDRESS, + MULTICAST_ADDRESS, + UNICAST_ADDRESS, + UNKNOWN_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + struct eth_tx_1st_bd_flags { u8 bitfields; -#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 -#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 -#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 -#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 }; -/* The parsing information data fo rthe first tx bd of a given packet. */ +/* The parsing information data fo rthe first tx bd of a given packet */ struct eth_tx_data_1st_bd { __le16 vlan; u8 nbds; struct eth_tx_1st_bd_flags bd_flags; __le16 bitfields; -#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 -#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 -#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF -#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 }; -/* The parsing information data for the second tx bd of a given packet. */ +/* The parsing information data for the second tx bd of a given packet */ struct eth_tx_data_2nd_bd { __le16 tunn_ip_size; __le16 bitfields1; -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 -#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 -#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 -#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 __le16 bitfields2; -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 -#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; -/* Firmware data for L2-EDPM packet. */ +/* Firmware data for L2-EDPM packet */ struct eth_edpm_fw_data { struct eth_tx_data_1st_bd data_1st_bd; struct eth_tx_data_2nd_bd data_2nd_bd; __le32 reserved; }; -struct eth_fast_path_cqe_fw_debug { - __le16 reserved2; -}; - -/* tunneling parsing flags */ +/* Tunneling parsing flags */ struct eth_tunnel_parsing_flags { u8 flags; #define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 @@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags { /* PMD flow control bits */ struct eth_pmd_flow_flags { u8 flags; -#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 -#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 -#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 -#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 -#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F -#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 +#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 +#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F +#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 }; -/* Regular ETH Rx FP CQE. */ +/* Regular ETH Rx FP CQE */ struct eth_fast_path_rx_reg_cqe { u8 type; u8 bitfields; -#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 -#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF -#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 __le16 pkt_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; @@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe { u8 placement_offset; struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 bd_num; - u8 reserved[9]; - struct eth_fast_path_cqe_fw_debug fw_debug; - u8 reserved1[3]; + u8 reserved; + __le16 flow_id; + u8 reserved1[11]; struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-continue ETH Rx FP CQE. */ +/* TPA-continue ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_cont_cqe { u8 type; u8 tpa_agg_index; @@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-end ETH Rx FP CQE. */ +/* TPA-end ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_end_cqe { u8 type; u8 tpa_agg_index; @@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-start ETH Rx FP CQE. */ +/* TPA-start ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_start_cqe { u8 type; u8 bitfields; -#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF -#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 __le16 seg_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; @@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe { u8 tpa_agg_index; u8 header_len; __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; - struct eth_fast_path_cqe_fw_debug fw_debug; + __le16 flow_id; u8 reserved; struct eth_pmd_flow_flags pmd_flags; }; @@ -295,24 +313,24 @@ struct eth_rx_bd { struct regpair addr; }; -/* regular ETH Rx SP CQE */ +/* Regular ETH Rx SP CQE */ struct eth_slow_path_rx_cqe { - u8 type; - u8 ramrod_cmd_id; - u8 error_flag; - u8 reserved[25]; - __le16 echo; - u8 reserved1; + u8 type; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[25]; + __le16 echo; + u8 reserved1; struct eth_pmd_flow_flags pmd_flags; }; -/* union for all ETH Rx CQE types */ +/* Union for all ETH Rx CQE types */ union eth_rx_cqe { - struct eth_fast_path_rx_reg_cqe fast_path_regular; - struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; - struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; - struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; - struct eth_slow_path_rx_cqe slow_path; + struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; + struct eth_slow_path_rx_cqe slow_path; }; /* ETH Rx CQE type */ @@ -339,7 +357,7 @@ enum eth_rx_tunn_type { MAX_ETH_RX_TUNN_TYPE }; -/* Aggregation end reason. */ +/* Aggregation end reason. */ enum eth_tpa_end_reason { ETH_AGG_END_UNUSED, ETH_AGG_END_SP_UPDATE, @@ -354,59 +372,59 @@ enum eth_tpa_end_reason { /* The first tx bd of a given packet */ struct eth_tx_1st_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_1st_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_1st_bd data; }; /* The second tx bd of a given packet */ struct eth_tx_2nd_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_2nd_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_2nd_bd data; }; -/* The parsing information data for the third tx bd of a given packet. */ +/* The parsing information data for the third tx bd of a given packet */ struct eth_tx_data_3rd_bd { __le16 lso_mss; __le16 bitfields; -#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF -#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 -#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF -#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 -#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F -#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 u8 tunn_l4_hdr_start_offset_w; u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ struct eth_tx_3rd_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_3rd_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_3rd_bd data; }; -/* Complementary information for the regular tx bd of a given packet. */ +/* Complementary information for the regular tx bd of a given packet */ struct eth_tx_data_bd { - __le16 reserved0; - __le16 bitfields; -#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF -#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 -#define ETH_TX_DATA_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F -#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 __le16 reserved3; }; /* The common non-special TX BD ring element */ struct eth_tx_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_bd data; }; union eth_tx_bd_types { @@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone { /* ETH doorbell data */ struct eth_db_data { u8 params; -#define ETH_DB_DATA_DEST_MASK 0x3 -#define ETH_DB_DATA_DEST_SHIFT 0 -#define ETH_DB_DATA_AGG_CMD_MASK 0x3 -#define ETH_DB_DATA_AGG_CMD_SHIFT 2 -#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 -#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 -#define ETH_DB_DATA_RESERVED_MASK 0x1 -#define ETH_DB_DATA_RESERVED_SHIFT 5 -#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 bd_prod; }; +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + #endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 12fc9e788eea..22077c586853 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -8,217 +8,78 @@ #ifndef __FCOE_COMMON__ #define __FCOE_COMMON__ + /*********************/ /* FCOE FW CONSTANTS */ /*********************/ #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 -struct fcoe_abts_pkt { - __le32 abts_rsp_fc_payload_lo; - __le16 abts_rsp_rx_id; - u8 abts_rsp_rctl; - u8 reserved2; -}; - -/* FCoE additional WQE (Sq/XferQ) information */ -union fcoe_additional_info_union { - __le32 previous_tid; - __le32 parent_tid; - __le32 burst_length; - __le32 seq_rec_updated_offset; -}; - -struct fcoe_exp_ro { - __le32 data_offset; - __le32 reserved; -}; - -union fcoe_cleanup_addr_exp_ro_union { - struct regpair abts_rsp_fc_payload_hi; - struct fcoe_exp_ro exp_ro; -}; - -/* FCoE Ramrod Command IDs */ -enum fcoe_completion_status { - FCOE_COMPLETION_STATUS_SUCCESS, - FCOE_COMPLETION_STATUS_FCOE_VER_ERR, - FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, - MAX_FCOE_COMPLETION_STATUS -}; - -struct fc_addr_nw { - u8 addr_lo; - u8 addr_mid; - u8 addr_hi; -}; - -/* FCoE connection offload */ -struct fcoe_conn_offload_ramrod_data { - struct regpair sq_pbl_addr; - struct regpair sq_curr_page_addr; - struct regpair sq_next_page_addr; - struct regpair xferq_pbl_addr; - struct regpair xferq_curr_page_addr; - struct regpair xferq_next_page_addr; - struct regpair respq_pbl_addr; - struct regpair respq_curr_page_addr; - struct regpair respq_next_page_addr; - __le16 dst_mac_addr_lo; - __le16 dst_mac_addr_mid; - __le16 dst_mac_addr_hi; - __le16 src_mac_addr_lo; - __le16 src_mac_addr_mid; - __le16 src_mac_addr_hi; - __le16 tx_max_fc_pay_len; - __le16 e_d_tov_timer_val; - __le16 rx_max_fc_pay_len; - __le16 vlan_tag; -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 - __le16 physical_q0; - __le16 rec_rr_tov_timer_val; - struct fc_addr_nw s_id; - u8 max_conc_seqs_c3; - struct fc_addr_nw d_id; - u8 flags; -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 - __le16 conn_id; - u8 def_q_idx; - u8 reserved[5]; -}; - -/* FCoE terminate connection request */ -struct fcoe_conn_terminate_ramrod_data { - struct regpair terminate_params_addr; -}; - -struct fcoe_slow_sgl_ctx { - struct regpair base_sgl_addr; - __le16 curr_sge_off; - __le16 remainder_num_sges; - __le16 curr_sgl_index; - __le16 reserved; -}; - -union fcoe_dix_desc_ctx { - struct fcoe_slow_sgl_ctx dix_sgl; - struct scsi_sge cached_dix_sge; +/* The fcoe storm task context protection-information of Ystorm */ +struct protection_info_ctx { + __le16 flags; +#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 +#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 +#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F +#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 + u8 dix_block_size; + u8 dst_size; }; -struct fcoe_fast_sgl_ctx { - struct regpair sgl_start_addr; - __le32 sgl_byte_offset; - __le16 task_reuse_cnt; - __le16 init_offset_in_first_sge; +/* The fcoe storm task context protection-information of Ystorm */ +union protection_info_union_ctx { + struct protection_info_ctx info; + __le32 value; }; +/* FCP CMD payload */ struct fcoe_fcp_cmd_payload { __le32 opaque[8]; }; +/* FCP RSP payload */ struct fcoe_fcp_rsp_payload { __le32 opaque[6]; }; -struct fcoe_fcp_xfer_payload { - __le32 opaque[3]; -}; - -/* FCoE firmware function init */ -struct fcoe_init_func_ramrod_data { - struct scsi_init_func_params func_params; - struct scsi_init_func_queues q_params; - __le16 mtu; - __le16 sq_num_pages_in_pbl; - __le32 reserved; -}; - -/* FCoE: Mode of the connection: Target or Initiator or both */ -enum fcoe_mode_type { - FCOE_INITIATOR_MODE = 0x0, - FCOE_TARGET_MODE = 0x1, - FCOE_BOTH_OR_NOT_CHOSEN = 0x3, - MAX_FCOE_MODE_TYPE -}; - -struct fcoe_rx_stat { - struct regpair fcoe_rx_byte_cnt; - struct regpair fcoe_rx_data_pkt_cnt; - struct regpair fcoe_rx_xfer_pkt_cnt; - struct regpair fcoe_rx_other_pkt_cnt; - __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; - __le32 fcoe_silent_drop_pkt_rq_full_cnt; - __le32 fcoe_silent_drop_pkt_crc_error_cnt; - __le32 fcoe_silent_drop_pkt_task_invalid_cnt; - __le32 fcoe_silent_drop_total_pkt_cnt; - __le32 rsrv; -}; - -struct fcoe_stat_ramrod_data { - struct regpair stat_params_addr; -}; - -struct protection_info_ctx { - __le16 flags; -#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 -#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 -#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 -#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 -#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF -#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 -#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F -#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 - u8 dix_block_size; - u8 dst_size; -}; - -union protection_info_union_ctx { - struct protection_info_ctx info; - __le32 value; -}; - +/* FCP RSP payload */ struct fcp_rsp_payload_padded { struct fcoe_fcp_rsp_payload rsp_payload; __le32 reserved[2]; }; +/* FCP RSP payload */ +struct fcoe_fcp_xfer_payload { + __le32 opaque[3]; +}; + +/* FCP RSP payload */ struct fcp_xfer_payload_padded { struct fcoe_fcp_xfer_payload xfer_payload; __le32 reserved[5]; }; +/* Task params */ struct fcoe_tx_data_params { __le32 data_offset; __le32 offset_in_io; u8 flags; -#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 -#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 -#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 -#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F -#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 +#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F +#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 u8 dif_residual; __le16 seq_cnt; __le16 single_sge_saved_offset; @@ -227,6 +88,7 @@ struct fcoe_tx_data_params { __le16 reserved3; }; +/* Middle path parameters: FC header fields provided by the driver */ struct fcoe_tx_mid_path_params { __le32 parameter; u8 r_ctl; @@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params { __le16 ox_id; }; +/* Task params */ struct fcoe_tx_params { struct fcoe_tx_data_params data; struct fcoe_tx_mid_path_params mid_path; }; +/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */ union fcoe_tx_info_union_ctx { struct fcoe_fcp_cmd_payload fcp_cmd_payload; struct fcp_rsp_payload_padded fcp_rsp_payload; @@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx { struct fcoe_tx_params tx_params; }; +/* Data sgl */ +struct fcoe_slow_sgl_ctx { + struct regpair base_sgl_addr; + __le16 curr_sge_off; + __le16 remainder_num_sges; + __le16 curr_sgl_index; + __le16 reserved; +}; + +/* Union of DIX SGL \ cached DIX sges */ +union fcoe_dix_desc_ctx { + struct fcoe_slow_sgl_ctx dix_sgl; + struct scsi_sge cached_dix_sge; +}; + +/* The fcoe storm task context of Ystorm */ struct ystorm_fcoe_task_st_ctx { u8 task_type; u8 sgl_mode; -#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 -#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 -#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F -#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 u8 cached_dix_sge; u8 expect_first_xfer; __le32 num_pbf_zero_write; @@ -272,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx { u8 reserved2[8]; }; -struct ystorm_fcoe_task_ag_ctx { +struct e4_ystorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; @@ -328,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx { __le32 reg2; }; -struct tstorm_fcoe_task_ag_ctx { +struct e4_tstorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 u8 flags1; -#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 u8 flags3; -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 -#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 cleanup_state; __le16 last_sent_tid; __le32 rec_rr_tov_exp_timeout; @@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx { __le32 data_offset_next; }; +/* Cached data sges */ +struct fcoe_exp_ro { + __le32 data_offset; + __le32 reserved; +}; + +/* Union of Cleanup address \ expected relative offsets */ +union fcoe_cleanup_addr_exp_ro_union { + struct regpair abts_rsp_fc_payload_hi; + struct fcoe_exp_ro exp_ro; +}; + +/* Fields coppied from ABTSrsp pckt */ +struct fcoe_abts_pkt { + __le32 abts_rsp_fc_payload_lo; + __le16 abts_rsp_rx_id; + u8 abts_rsp_rctl; + u8 reserved2; +}; + +/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; __le16 flags; -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 __le16 seq_cnt; u8 seq_id; u8 ooo_rx_seq_id; @@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { __le16 reserved1; }; +/* FW read only part The fcoe task storm context of Tstorm */ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { u8 task_type; u8 dev_type; @@ -446,54 +348,55 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { __le32 rsrv; }; +/** The fcoe task storm context of Tstorm */ struct tstorm_fcoe_task_st_ctx { struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; }; -struct mstorm_fcoe_task_ag_ctx { +struct e4_mstorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 icid; u8 flags0; -#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 -#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 cleanup_state; __le32 received_bytes; u8 byte3; @@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx { __le32 reg2; }; +/* The fcoe task storm context of Mstorm */ struct mstorm_fcoe_task_st_ctx { struct regpair rsp_buf_addr; __le32 rsrv[2]; @@ -515,79 +419,79 @@ struct mstorm_fcoe_task_st_ctx { __le32 data_buffer_offset; __le16 parent_id; __le16 flags; -#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF -#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 -#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 -#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 -#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 -#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 -#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 struct scsi_cached_sges data_desc; }; -struct ustorm_fcoe_task_ag_ctx { +struct e4_ustorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; -#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 global_cq_num; @@ -596,21 +500,189 @@ struct ustorm_fcoe_task_ag_ctx { __le32 reg5; }; -struct fcoe_task_context { +/* FCoE task context */ +struct e4_fcoe_task_context { struct ystorm_fcoe_task_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct tdif_task_context tdif_context; - struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; - struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; + struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context; + struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context; struct timers_context timer_context; struct tstorm_fcoe_task_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; + struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context; struct mstorm_fcoe_task_st_ctx mstorm_st_context; - struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; + struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context; struct rdif_task_context rdif_context; }; +/* FCoE additional WQE (Sq/XferQ) information */ +union fcoe_additional_info_union { + __le32 previous_tid; + __le32 parent_tid; + __le32 burst_length; + __le32 seq_rec_updated_offset; +}; + +/* FCoE Ramrod Command IDs */ +enum fcoe_completion_status { + FCOE_COMPLETION_STATUS_SUCCESS, + FCOE_COMPLETION_STATUS_FCOE_VER_ERR, + FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, + MAX_FCOE_COMPLETION_STATUS +}; + +/* FC address (SID/DID) network presentation */ +struct fc_addr_nw { + u8 addr_lo; + u8 addr_mid; + u8 addr_hi; +}; + +/* FCoE connection offload */ +struct fcoe_conn_offload_ramrod_data { + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le16 tx_max_fc_pay_len; + __le16 e_d_tov_timer_val; + __le16 rx_max_fc_pay_len; + __le16 vlan_tag; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 + __le16 physical_q0; + __le16 rec_rr_tov_timer_val; + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7 + __le16 conn_id; + u8 def_q_idx; + u8 reserved[5]; +}; + +/* FCoE terminate connection request */ +struct fcoe_conn_terminate_ramrod_data { + struct regpair terminate_params_addr; +}; + +/* FCoE device type */ +enum fcoe_device_type { + FCOE_TASK_DEV_TYPE_DISK, + FCOE_TASK_DEV_TYPE_TAPE, + MAX_FCOE_DEVICE_TYPE +}; + +/* Data sgl */ +struct fcoe_fast_sgl_ctx { + struct regpair sgl_start_addr; + __le32 sgl_byte_offset; + __le16 task_reuse_cnt; + __le16 init_offset_in_first_sge; +}; + +/* FCoE firmware function init */ +struct fcoe_init_func_ramrod_data { + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; + __le16 mtu; + __le16 sq_num_pages_in_pbl; + __le32 reserved[3]; +}; + +/* FCoE: Mode of the connection: Target or Initiator or both */ +enum fcoe_mode_type { + FCOE_INITIATOR_MODE = 0x0, + FCOE_TARGET_MODE = 0x1, + FCOE_BOTH_OR_NOT_CHOSEN = 0x3, + MAX_FCOE_MODE_TYPE +}; + +/* Per PF FCoE receive path statistics - tStorm RAM structure */ +struct fcoe_rx_stat { + struct regpair fcoe_rx_byte_cnt; + struct regpair fcoe_rx_data_pkt_cnt; + struct regpair fcoe_rx_xfer_pkt_cnt; + struct regpair fcoe_rx_other_pkt_cnt; + __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; + __le32 fcoe_silent_drop_pkt_rq_full_cnt; + __le32 fcoe_silent_drop_pkt_crc_error_cnt; + __le32 fcoe_silent_drop_pkt_task_invalid_cnt; + __le32 fcoe_silent_drop_total_pkt_cnt; + __le32 rsrv; +}; + +/* FCoE SQE request type */ +enum fcoe_sqe_request_type { + SEND_FCOE_CMD, + SEND_FCOE_MIDPATH, + SEND_FCOE_ABTS_REQUEST, + FCOE_EXCHANGE_CLEANUP, + FCOE_SEQUENCE_RECOVERY, + SEND_FCOE_XFER_RDY, + SEND_FCOE_RSP, + SEND_FCOE_RSP_WITH_SENSE_DATA, + SEND_FCOE_TARGET_DATA, + SEND_FCOE_INITIATOR_DATA, + SEND_FCOE_XFER_CONTINUATION_RDY, + SEND_FCOE_TARGET_ABTS_RSP, + MAX_FCOE_SQE_REQUEST_TYPE +}; + +/* FCoe statistics request */ +struct fcoe_stat_ramrod_data { + struct regpair stat_params_addr; +}; + +/* FCoE task type */ +enum fcoe_task_type { + FCOE_TASK_TYPE_WRITE_INITIATOR, + FCOE_TASK_TYPE_READ_INITIATOR, + FCOE_TASK_TYPE_MIDPATH, + FCOE_TASK_TYPE_UNSOLICITED, + FCOE_TASK_TYPE_ABTS, + FCOE_TASK_TYPE_EXCHANGE_CLEANUP, + FCOE_TASK_TYPE_SEQUENCE_CLEANUP, + FCOE_TASK_TYPE_WRITE_TARGET, + FCOE_TASK_TYPE_READ_TARGET, + FCOE_TASK_TYPE_RSP, + FCOE_TASK_TYPE_RSP_SENSE_DATA, + FCOE_TASK_TYPE_ABTS_TARGET, + FCOE_TASK_TYPE_ENUM_SIZE, + MAX_FCOE_TASK_TYPE +}; + +/* Per PF FCoE transmit path statistics - pStorm RAM structure */ struct fcoe_tx_stat { struct regpair fcoe_tx_byte_cnt; struct regpair fcoe_tx_data_pkt_cnt; @@ -618,51 +690,55 @@ struct fcoe_tx_stat { struct regpair fcoe_tx_other_pkt_cnt; }; +/* FCoE SQ/XferQ element */ struct fcoe_wqe { __le16 task_id; __le16 flags; -#define FCOE_WQE_REQ_TYPE_MASK 0xF -#define FCOE_WQE_REQ_TYPE_SHIFT 0 -#define FCOE_WQE_SGL_MODE_MASK 0x1 -#define FCOE_WQE_SGL_MODE_SHIFT 4 -#define FCOE_WQE_CONTINUATION_MASK 0x1 -#define FCOE_WQE_CONTINUATION_SHIFT 5 -#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 -#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 -#define FCOE_WQE_RESERVED_MASK 0x1 -#define FCOE_WQE_RESERVED_SHIFT 7 -#define FCOE_WQE_NUM_SGES_MASK 0xF -#define FCOE_WQE_NUM_SGES_SHIFT 8 -#define FCOE_WQE_RESERVED1_MASK 0xF -#define FCOE_WQE_RESERVED1_SHIFT 12 +#define FCOE_WQE_REQ_TYPE_MASK 0xF +#define FCOE_WQE_REQ_TYPE_SHIFT 0 +#define FCOE_WQE_SGL_MODE_MASK 0x1 +#define FCOE_WQE_SGL_MODE_SHIFT 4 +#define FCOE_WQE_CONTINUATION_MASK 0x1 +#define FCOE_WQE_CONTINUATION_SHIFT 5 +#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 +#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 +#define FCOE_WQE_RESERVED_MASK 0x1 +#define FCOE_WQE_RESERVED_SHIFT 7 +#define FCOE_WQE_NUM_SGES_MASK 0xF +#define FCOE_WQE_NUM_SGES_SHIFT 8 +#define FCOE_WQE_RESERVED1_MASK 0xF +#define FCOE_WQE_RESERVED1_SHIFT 12 union fcoe_additional_info_union additional_info_union; }; +/* FCoE XFRQ element */ struct xfrqe_prot_flags { u8 flags; -#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF -#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 -#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 -#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 -#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 -#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 -#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 -#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 +#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 +#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 }; +/* FCoE doorbell data */ struct fcoe_db_data { u8 params; -#define FCOE_DB_DATA_DEST_MASK 0x3 -#define FCOE_DB_DATA_DEST_SHIFT 0 -#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 -#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 -#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 -#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 -#define FCOE_DB_DATA_RESERVED_MASK 0x1 -#define FCOE_DB_DATA_RESERVED_SHIFT 5 -#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define FCOE_DB_DATA_DEST_MASK 0x3 +#define FCOE_DB_DATA_DEST_SHIFT 0 +#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 +#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 +#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define FCOE_DB_DATA_RESERVED_MASK 0x1 +#define FCOE_DB_DATA_RESERVED_SHIFT 5 +#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 sq_prod; }; + #endif /* __FCOE_COMMON__ */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 85e086cba639..4cc9b37b8d95 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -32,47 +32,48 @@ #ifndef __ISCSI_COMMON__ #define __ISCSI_COMMON__ + /**********************/ /* ISCSI FW CONSTANTS */ /**********************/ /* iSCSI HSI constants */ -#define ISCSI_DEFAULT_MTU (1500) +#define ISCSI_DEFAULT_MTU (1500) /* KWQ (kernel work queue) layer codes */ -#define ISCSI_SLOW_PATH_LAYER_CODE (6) +#define ISCSI_SLOW_PATH_LAYER_CODE (6) /* iSCSI parameter defaults */ -#define ISCSI_DEFAULT_HEADER_DIGEST (0) -#define ISCSI_DEFAULT_DATA_DIGEST (0) -#define ISCSI_DEFAULT_INITIAL_R2T (1) -#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) -#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) -#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) -#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) -#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) +#define ISCSI_DEFAULT_HEADER_DIGEST (0) +#define ISCSI_DEFAULT_DATA_DIGEST (0) +#define ISCSI_DEFAULT_INITIAL_R2T (1) +#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) +#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) +#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) +#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) /* iSCSI parameter limits */ -#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) -#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) -#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) -#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) -#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) -#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) +#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) +#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) +#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) +#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) -#define ISCSI_AHS_CNTL_SIZE 4 +#define ISCSI_AHS_CNTL_SIZE 4 -#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) +#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) /* iSCSI reserved params */ #define ISCSI_ITT_ALL_ONES (0xffffffff) #define ISCSI_TTT_ALL_ONES (0xffffffff) -#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 -#define ISCSI_OPTION_2_ON_CHIP_TCP 2 +#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 +#define ISCSI_OPTION_2_ON_CHIP_TCP 2 -#define ISCSI_INITIATOR_MODE 0 -#define ISCSI_TARGET_MODE 1 +#define ISCSI_INITIATOR_MODE 0 +#define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ #define ISCSI_OPCODE_NOP_OUT (0) @@ -84,41 +85,48 @@ #define ISCSI_OPCODE_LOGOUT_REQUEST (6) /* iSCSI response/messages op codes */ -#define ISCSI_OPCODE_NOP_IN (0x20) -#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) -#define ISCSI_OPCODE_TMF_RESPONSE (0x22) -#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) -#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) -#define ISCSI_OPCODE_DATA_IN (0x25) -#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) -#define ISCSI_OPCODE_R2T (0x31) -#define ISCSI_OPCODE_ASYNC_MSG (0x32) -#define ISCSI_OPCODE_REJECT (0x3f) +#define ISCSI_OPCODE_NOP_IN (0x20) +#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) +#define ISCSI_OPCODE_TMF_RESPONSE (0x22) +#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) +#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) +#define ISCSI_OPCODE_DATA_IN (0x25) +#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) +#define ISCSI_OPCODE_R2T (0x31) +#define ISCSI_OPCODE_ASYNC_MSG (0x32) +#define ISCSI_OPCODE_REJECT (0x3f) /* iSCSI stages */ -#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) -#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) -#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) +#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) +#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) +#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) /* iSCSI CQE errors */ -#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) -#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) +#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) +#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) + +/* Union of data bd_opaque/ tq_tid */ +union bd_opaque_tq_union { + __le16 bd_opaque; + __le16 tq_tid; +}; +/* ISCSI SGL entry */ struct cqe_error_bitmap { u8 cqe_error_status_bits; -#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 -#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 -#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 -#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 -#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 -#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 -#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 +#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 +#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 }; union cqe_error_status { @@ -126,86 +134,133 @@ union cqe_error_status { struct cqe_error_bitmap error_bits; }; +/* iSCSI Login Response PDU header */ struct data_hdr { __le32 data[12]; }; -struct iscsi_async_msg_hdr { - __le16 reserved0; - u8 flags_attr; -#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F -#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 -#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 -#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 - u8 opcode; - __le32 hdr_second_dword; -#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 - struct regpair lun; - __le32 all_ones; - __le32 reserved1; - __le32 stat_sn; - __le32 exp_cmd_sn; - __le32 max_cmd_sn; - __le16 param1_rsrv; - u8 async_vcode; - u8 async_event; - __le16 param3_rsrv; - __le16 param2_rsrv; - __le32 reserved7; +struct lun_mapper_addr_reserved { + struct regpair lun_mapper_addr; + u8 reserved0[8]; +}; + +/* rdif conetxt for dif on immediate */ +struct dif_on_immediate_params { + __le32 initial_ref_tag; + __le16 application_tag; + __le16 application_tag_mask; + __le16 flags1; +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5 +#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6 +#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8 +#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF +#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 + u8 flags0; +#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0 +#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1 +#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3 +#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3 +#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4 +#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6 +#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7 + u8 reserved_zero[5]; +}; + +/* iSCSI dif on immediate mode attributes union */ +union dif_configuration_params { + struct lun_mapper_addr_reserved lun_mapper_address; + struct dif_on_immediate_params def_dif_conf; +}; + +/* Union of data/r2t sequence number */ +union iscsi_seq_num { + __le16 data_sn; + __le16 r2t_sn; }; -struct iscsi_cmd_hdr { - __le16 reserved1; - u8 flags_attr; -#define ISCSI_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_CMD_HDR_READ_MASK 0x1 -#define ISCSI_CMD_HDR_READ_SHIFT 6 -#define ISCSI_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_CMD_HDR_FINAL_SHIFT 7 - u8 hdr_first_byte; -#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F -#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 -#define ISCSI_CMD_HDR_IMM_MASK 0x1 -#define ISCSI_CMD_HDR_IMM_SHIFT 6 -#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 -#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 - __le32 hdr_second_dword; -#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 - struct regpair lun; - __le32 itt; - __le32 expected_transfer_length; - __le32 cmd_sn; - __le32 exp_stat_sn; - __le32 cdb[4]; +/* iSCSI DIF flags */ +struct iscsi_dif_flags { + u8 flags; +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 }; +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_state { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 exp_r2t_sn; + __le32 buffer_offset; + union iscsi_seq_num seq_num; + struct iscsi_dif_flags dif_flags; + u8 flags; +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 +#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3 +}; + +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_rxmit_opt { + __le32 fast_rxmit_sge_offset; + __le32 scan_start_buffer_offset; + __le32 fast_rxmit_buffer_offset; + u8 scan_start_sgl_index; + u8 fast_rxmit_sgl_index; + __le16 reserved; +}; + +/* iSCSI Common PDU header */ struct iscsi_common_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 hdr_first_byte; -#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F -#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 -#define ISCSI_COMMON_HDR_IMM_MASK 0x1 -#define ISCSI_COMMON_HDR_IMM_SHIFT 6 -#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 -#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 +#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F +#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 +#define ISCSI_COMMON_HDR_IMM_MASK 0x1 +#define ISCSI_COMMON_HDR_IMM_SHIFT 6 +#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 +#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 __le32 hdr_second_dword; -#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun_reserved; __le32 itt; __le32 ttt; @@ -215,86 +270,60 @@ struct iscsi_common_hdr { __le32 data[3]; }; -struct iscsi_conn_offload_params { - struct regpair sq_pbl_addr; - struct regpair r2tq_pbl_addr; - struct regpair xhq_pbl_addr; - struct regpair uhq_pbl_addr; - __le32 initial_ack; - __le16 physical_q0; - __le16 physical_q1; - u8 flags; -#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 -#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 - u8 pbl_page_size_log; - u8 pbe_page_size_log; - u8 default_cq; - __le32 stat_sn; -}; - -struct iscsi_slow_path_hdr { - u8 op_code; - u8 flags; -#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF -#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 -#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 -#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 -#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 -#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 -}; - -struct iscsi_conn_update_ramrod_params { - struct iscsi_slow_path_hdr hdr; - __le16 conn_id; - __le32 fw_cid; - u8 flags; -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6 - u8 reserved0[3]; - __le32 max_seq_size; - __le32 max_send_pdu_length; - __le32 max_recv_pdu_length; - __le32 first_seq_length; +/* iSCSI Command PDU header */ +struct iscsi_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 hdr_first_byte; +#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F +#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 +#define ISCSI_CMD_HDR_IMM_MASK 0x1 +#define ISCSI_CMD_HDR_IMM_SHIFT 6 +#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 +#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 + __le32 hdr_second_dword; +#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; __le32 exp_stat_sn; + __le32 cdb[4]; }; +/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */ struct iscsi_ext_cdb_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 -#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF -#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 struct regpair lun; __le32 itt; __le32 expected_transfer_length; @@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr { struct scsi_sge cdb_sge; }; +/* iSCSI login request PDU header */ struct iscsi_login_req_hdr { u8 version_min; u8 version_max; u8 flags_attr; -#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 -#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 -#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 -#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 -#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 -#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 +#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; @@ -334,6 +364,7 @@ struct iscsi_login_req_hdr { __le32 reserved2[4]; }; +/* iSCSI logout request PDU header */ struct iscsi_logout_req_hdr { __le16 reserved0; u8 reason_code; @@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr { __le32 reserved4[4]; }; +/* iSCSI Data-out PDU header */ struct iscsi_data_out_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F -#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 -#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 -#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 +#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -368,22 +400,23 @@ struct iscsi_data_out_hdr { __le32 reserved5; }; +/* iSCSI Data-in PDU header */ struct iscsi_data_in_hdr { u8 status_rsvd; u8 reserved1; u8 flags; -#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 -#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 -#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 -#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 -#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 -#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 -#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 -#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 -#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 -#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 -#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 -#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 +#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 +#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 +#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 +#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 +#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 +#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 +#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -397,6 +430,7 @@ struct iscsi_data_in_hdr { __le32 residual_count; }; +/* iSCSI R2T PDU header */ struct iscsi_r2t_hdr { u8 reserved0[3]; u8 opcode; @@ -412,13 +446,14 @@ struct iscsi_r2t_hdr { __le32 desired_data_trns_len; }; +/* iSCSI NOP-out PDU header */ struct iscsi_nop_out_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F -#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 -#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 -#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 +#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr { __le32 reserved6; }; +/* iSCSI NOP-in PDU header */ struct iscsi_nop_in_hdr { __le16 reserved0; u8 flags_attr; -#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F -#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 -#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 -#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 +#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr { __le32 reserved7; }; +/* iSCSI Login Response PDU header */ struct iscsi_login_response_hdr { u8 version_active; u8 version_max; u8 flags_attr; -#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 -#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 -#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 -#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 -#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 -#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; @@ -490,16 +527,17 @@ struct iscsi_login_response_hdr { __le32 reserved4[2]; }; +/* iSCSI Logout Response PDU header */ struct iscsi_logout_response_hdr { u8 reserved1; u8 response; u8 flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 reserved2[2]; __le32 itt; __le32 reserved3; @@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr { __le32 reserved5[1]; }; +/* iSCSI Text Request PDU header */ struct iscsi_text_request_hdr { __le16 reserved0; u8 flags_attr; -#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F -#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 -#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 -#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 -#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 -#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 +#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 +#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -535,21 +574,22 @@ struct iscsi_text_request_hdr { __le32 reserved4[4]; }; +/* iSCSI Text Response PDU header */ struct iscsi_text_response_hdr { __le16 reserved1; u8 flags; -#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F -#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 -#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 -#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 -#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -559,15 +599,16 @@ struct iscsi_text_response_hdr { __le32 reserved4[3]; }; +/* iSCSI TMF Request PDU header */ struct iscsi_tmf_request_hdr { __le16 reserved0; u8 function; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 rtt; @@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr { u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 itt; __le32 reserved1; @@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr { __le32 reserved4[3]; }; +/* iSCSI Response PDU header */ struct iscsi_response_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 snack_tag; @@ -618,16 +660,17 @@ struct iscsi_response_hdr { __le32 residual_count; }; +/* iSCSI Reject PDU header */ struct iscsi_reject_hdr { u8 reserved4; u8 hdr_reason; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 all_ones; __le32 reserved2; @@ -638,6 +681,35 @@ struct iscsi_reject_hdr { __le32 reserved3[2]; }; +/* iSCSI Asynchronous Message PDU header */ +struct iscsi_async_msg_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F +#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 +#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 all_ones; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 param1_rsrv; + u8 async_vcode; + u8 async_event; + __le16 param3_rsrv; + __le16 param2_rsrv; + __le32 reserved7; +}; + +/* PDU header part of Ystorm task context */ union iscsi_task_hdr { struct iscsi_common_hdr common; struct data_hdr data; @@ -661,6 +733,348 @@ union iscsi_task_hdr { struct iscsi_async_msg_hdr async_msg; }; +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_st_ctx { + struct ystorm_iscsi_task_state state; + struct ystorm_iscsi_task_rxmit_opt rxmit_opt; + union iscsi_task_hdr pdu_hdr; +}; + +struct e4_ystorm_iscsi_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 word0; + u8 flags0; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 TTT; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct e4_mstorm_iscsi_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct e4_ustorm_iscsi_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + u8 next_tid_valid; + u8 byte3; + __le16 word1; + __le16 next_tid; + __le16 word3; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +/* The iscsi storm task context of Mstorm */ +struct mstorm_iscsi_task_st_ctx { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 rem_task_size; + __le32 data_buffer_offset; + u8 task_type; + struct iscsi_dif_flags dif_flags; + __le16 dif_task_icid; + struct regpair sense_db; + __le32 expected_itt; + __le32 reserved1; +}; + +struct iscsi_reg1 { + __le32 reg1_map; +#define ISCSI_REG1_NUM_SGES_MASK 0xF +#define ISCSI_REG1_NUM_SGES_SHIFT 0 +#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF +#define ISCSI_REG1_RESERVED1_SHIFT 4 +}; + +struct tqe_opaque { + __le16 opaque[2]; +}; + +/* The iscsi storm task context of Ustorm */ +struct ustorm_iscsi_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair lun; + struct iscsi_reg1 reg1; + u8 flags2; +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 + struct iscsi_dif_flags dif_flags; + __le16 reserved3; + struct tqe_opaque tqe_opaque_list; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; + u8 task_type; + u8 error_flags; +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 + u8 flags; +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 + u8 cq_rss_number; +}; + +/* iscsi task context */ +struct e4_iscsi_task_context { + struct ystorm_iscsi_task_st_ctx ystorm_st_context; + struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct tdif_task_context tdif_context; + struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct mstorm_iscsi_task_st_ctx mstorm_st_context; + struct ustorm_iscsi_task_st_ctx ustorm_st_context; + struct rdif_task_context rdif_context; +}; + +/* iSCSI connection offload params passed by driver to FW in ISCSI offload + * ramrod. + */ +struct iscsi_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le32 initial_ack; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 + u8 pbl_page_size_log; + u8 pbe_page_size_log; + u8 default_cq; + __le32 stat_sn; +}; + +/* iSCSI connection statistics */ +struct iscsi_conn_stats_params { + struct regpair iscsi_tcp_tx_packets_cnt; + struct regpair iscsi_tcp_tx_bytes_cnt; + struct regpair iscsi_tcp_tx_rxmit_cnt; + struct regpair iscsi_tcp_rx_packets_cnt; + struct regpair iscsi_tcp_rx_bytes_cnt; + struct regpair iscsi_tcp_rx_dup_ack_cnt; + __le32 iscsi_tcp_rx_chksum_err_cnt; + __le32 reserved; +}; + +/* spe message header */ +struct iscsi_slow_path_hdr { + u8 op_code; + u8 flags; +#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF +#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 +}; + +/* iSCSI connection update params passed by driver to FW in ISCSI update + *ramrod. + */ +struct iscsi_conn_update_ramrod_params { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 flags; +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7 + u8 reserved0[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 exp_stat_sn; + union dif_configuration_params dif_on_imme_params; +}; + +/* iSCSI CQ element */ struct iscsi_cqe_common { __le16 conn_id; u8 cqe_type; @@ -669,6 +1083,7 @@ struct iscsi_cqe_common { union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ struct iscsi_cqe_solicited { __le16 conn_id; u8 cqe_type; @@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited { u8 fw_dbg_field; u8 caused_conn_err; u8 reserved0[3]; - __le32 reserved1[1]; + __le32 data_truncated_bytes; union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ struct iscsi_cqe_unsolicited { __le16 conn_id; u8 cqe_type; @@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited { __le16 reserved0; u8 reserved1; u8 unsol_cqe_type; - struct regpair rqe_opaque; + __le16 rqe_opaque; + __le16 reserved2[3]; union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ union iscsi_cqe { struct iscsi_cqe_common cqe_common; struct iscsi_cqe_solicited cqe_solicited; struct iscsi_cqe_unsolicited cqe_unsolicited; }; +/* iSCSI CQE type */ enum iscsi_cqes_type { ISCSI_CQE_TYPE_SOLICITED = 1, ISCSI_CQE_TYPE_UNSOLICITED, @@ -708,6 +1127,7 @@ enum iscsi_cqes_type { MAX_ISCSI_CQES_TYPE }; +/* iSCSI CQE type */ enum iscsi_cqe_unsolicited_type { ISCSI_CQE_UNSOLICITED_NONE, ISCSI_CQE_UNSOLICITED_SINGLE, @@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type { MAX_ISCSI_CQE_UNSOLICITED_TYPE }; - +/* iscsi debug modes */ struct iscsi_debug_modes { u8 flags; -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7 -}; - -struct iscsi_dif_flags { - u8 flags; -#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF -#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 -#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 -#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 -#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 -#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 -}; - +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6 +#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7 +}; + +/* iSCSI kernel completion queue IDs */ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_INIT_FUNC = 0, ISCSI_EVENT_TYPE_DESTROY_FUNC, @@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_CLEAR_SQ, ISCSI_EVENT_TYPE_TERMINATE_CONN, ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, + ISCSI_EVENT_TYPE_COLLECT_STATS_CONN, ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, - RESERVED9, ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, @@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode { MAX_ISCSI_EQE_OPCODE }; +/* iSCSI EQE and CQE completion status */ enum iscsi_error_types { ISCSI_STATUS_NONE = 0, ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, @@ -823,7 +1235,7 @@ enum iscsi_error_types { MAX_ISCSI_ERROR_TYPES }; - +/* iSCSI Ramrod Command IDs */ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_UNUSED = 0, ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, @@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, + ISCSI_RAMROD_CMD_ID_CONN_STATS = 8, MAX_ISCSI_RAMROD_CMD_ID }; -struct iscsi_reg1 { - __le32 reg1_map; -#define ISCSI_REG1_NUM_SGES_MASK 0xF -#define ISCSI_REG1_NUM_SGES_SHIFT 0 -#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF -#define ISCSI_REG1_RESERVED1_SHIFT 4 -}; - -union iscsi_seq_num { - __le16 data_sn; - __le16 r2t_sn; -}; - +/* iSCSI connection termination request */ struct iscsi_spe_conn_mac_update { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update { u8 reserved0[2]; }; +/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in + * iSCSI offload ramrod. + */ struct iscsi_spe_conn_offload { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload { struct tcp_offload_params tcp; }; +/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in + * iSCSI offload ramrod. + */ struct iscsi_spe_conn_offload_option2 { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 { struct tcp_offload_params_opt2 tcp; }; +/* iSCSI collect connection statistics request */ +struct iscsi_spe_conn_statistics { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 reset_stats; + u8 reserved0[7]; + struct regpair stats_cnts_addr; +}; + +/* iSCSI connection termination request */ struct iscsi_spe_conn_termination { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination { struct regpair query_params_addr; }; +/* iSCSI firmware function destroy parameters */ struct iscsi_spe_func_dstry { struct iscsi_slow_path_hdr hdr; __le16 reserved0; __le32 reserved1; }; +/* iSCSI firmware function init parameters */ struct iscsi_spe_func_init { struct iscsi_slow_path_hdr hdr; __le16 half_way_close_timeout; @@ -898,283 +1318,19 @@ struct iscsi_spe_func_init { u8 num_r2tq_pages_in_ring; u8 num_uhq_pages_in_ring; u8 ll2_rx_queue_id; - u8 ooo_enable; + u8 flags; +#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 +#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 +#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F +#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1 struct iscsi_debug_modes debug_mode; __le16 reserved1; __le32 reserved2; - __le32 reserved3; - __le32 reserved4; struct scsi_init_func_params func_params; struct scsi_init_func_queues q_params; }; -struct ystorm_iscsi_task_state { - struct scsi_cached_sges data_desc; - struct scsi_sgl_params sgl_params; - __le32 exp_r2t_sn; - __le32 buffer_offset; - union iscsi_seq_num seq_num; - struct iscsi_dif_flags dif_flags; - u8 flags; -#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 -#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2 -}; - -struct ystorm_iscsi_task_rxmit_opt { - __le32 fast_rxmit_sge_offset; - __le32 scan_start_buffer_offset; - __le32 fast_rxmit_buffer_offset; - u8 scan_start_sgl_index; - u8 fast_rxmit_sgl_index; - __le16 reserved; -}; - -struct ystorm_iscsi_task_st_ctx { - struct ystorm_iscsi_task_state state; - struct ystorm_iscsi_task_rxmit_opt rxmit_opt; - union iscsi_task_hdr pdu_hdr; -}; - -struct ystorm_iscsi_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 word0; - u8 flags0; -#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 - u8 flags1; -#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 byte2; - __le32 TTT; - u8 byte3; - u8 byte4; - __le16 word1; -}; - -struct mstorm_iscsi_task_ag_ctx { - u8 cdu_validation; - u8 byte1; - __le16 task_cid; - u8 flags0; -#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 - u8 flags1; -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 byte2; - __le32 reg0; - u8 byte3; - u8 byte4; - __le16 word1; -}; - -struct ustorm_iscsi_task_ag_ctx { - u8 reserved; - u8 state; - __le16 icid; - u8 flags0; -#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 - u8 flags1; -#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 - u8 flags2; -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 -#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 - u8 flags3; -#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 - __le32 dif_err_intervals; - __le32 dif_error_1st_interval; - __le32 rcv_cont_len; - __le32 exp_cont_len; - __le32 total_data_acked; - __le32 exp_data_acked; - u8 next_tid_valid; - u8 byte3; - __le16 word1; - __le16 next_tid; - __le16 word3; - __le32 hdr_residual_count; - __le32 exp_r2t_sn; -}; - -struct mstorm_iscsi_task_st_ctx { - struct scsi_cached_sges data_desc; - struct scsi_sgl_params sgl_params; - __le32 rem_task_size; - __le32 data_buffer_offset; - u8 task_type; - struct iscsi_dif_flags dif_flags; - u8 reserved0[2]; - struct regpair sense_db; - __le32 expected_itt; - __le32 reserved1; -}; - -struct ustorm_iscsi_task_st_ctx { - __le32 rem_rcv_len; - __le32 exp_data_transfer_len; - __le32 exp_data_sn; - struct regpair lun; - struct iscsi_reg1 reg1; - u8 flags2; -#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 - struct iscsi_dif_flags dif_flags; - __le16 reserved3; - __le32 reserved4; - __le32 reserved5; - __le32 reserved6; - __le32 reserved7; - u8 task_type; - u8 error_flags; -#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 -#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 - u8 flags; -#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 -#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 -#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 -#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 -#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 -#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 - u8 cq_rss_number; -}; - -struct iscsi_task_context { - struct ystorm_iscsi_task_st_ctx ystorm_st_context; - struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; - struct regpair ystorm_ag_padding[2]; - struct tdif_task_context tdif_context; - struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; - struct regpair mstorm_ag_padding[2]; - struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; - struct mstorm_iscsi_task_st_ctx mstorm_st_context; - struct ustorm_iscsi_task_st_ctx ustorm_st_context; - struct rdif_task_context rdif_context; -}; - +/* iSCSI task type */ enum iscsi_task_type { ISCSI_TASK_TYPE_INITIATOR_WRITE, ISCSI_TASK_TYPE_INITIATOR_READ, @@ -1186,53 +1342,57 @@ enum iscsi_task_type { ISCSI_TASK_TYPE_TARGET_READ, ISCSI_TASK_TYPE_TARGET_RESPONSE, ISCSI_TASK_TYPE_LOGIN_RESPONSE, + ISCSI_TASK_TYPE_TARGET_IMM_W_DIF, MAX_ISCSI_TASK_TYPE }; +/* iSCSI DesiredDataTransferLength/ttt union */ union iscsi_ttt_txlen_union { __le32 desired_tx_len; __le32 ttt; }; +/* iSCSI uHQ element */ struct iscsi_uhqe { __le32 reg1; -#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF -#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 -#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 -#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 -#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 -#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 -#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 -#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 -#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 -#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 -#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF -#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 +#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 +#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 +#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 +#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 +#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 +#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 +#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 __le32 reg2; -#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF -#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 -#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF -#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 +#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF +#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 +#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 }; - +/* iSCSI WQ element */ struct iscsi_wqe { __le16 task_id; u8 flags; -#define ISCSI_WQE_WQE_TYPE_MASK 0x7 -#define ISCSI_WQE_WQE_TYPE_SHIFT 0 -#define ISCSI_WQE_NUM_SGES_MASK 0xF -#define ISCSI_WQE_NUM_SGES_SHIFT 3 -#define ISCSI_WQE_RESPONSE_MASK 0x1 -#define ISCSI_WQE_RESPONSE_SHIFT 7 +#define ISCSI_WQE_WQE_TYPE_MASK 0x7 +#define ISCSI_WQE_WQE_TYPE_SHIFT 0 +#define ISCSI_WQE_NUM_SGES_MASK 0xF +#define ISCSI_WQE_NUM_SGES_SHIFT 3 +#define ISCSI_WQE_RESPONSE_MASK 0x1 +#define ISCSI_WQE_RESPONSE_SHIFT 7 struct iscsi_dif_flags prot_flags; __le32 contlen_cdbsize; -#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF -#define ISCSI_WQE_CONT_LEN_SHIFT 0 -#define ISCSI_WQE_CDB_SIZE_MASK 0xFF -#define ISCSI_WQE_CDB_SIZE_SHIFT 24 +#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF +#define ISCSI_WQE_CONT_LEN_SHIFT 0 +#define ISCSI_WQE_CDB_SIZE_MASK 0xFF +#define ISCSI_WQE_CDB_SIZE_SHIFT 24 }; +/* iSCSI wqe type */ enum iscsi_wqe_type { ISCSI_WQE_TYPE_NORMAL, ISCSI_WQE_TYPE_TASK_CLEANUP, @@ -1244,6 +1404,7 @@ enum iscsi_wqe_type { MAX_ISCSI_WQE_TYPE }; +/* iSCSI xHQ element */ struct iscsi_xhqe { union iscsi_ttt_txlen_union ttt_or_txlen; __le32 exp_stat_sn; @@ -1251,120 +1412,134 @@ struct iscsi_xhqe { u8 total_ahs_length; u8 opcode; u8 flags; -#define ISCSI_XHQE_FINAL_MASK 0x1 -#define ISCSI_XHQE_FINAL_SHIFT 0 -#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 -#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 -#define ISCSI_XHQE_NUM_SGES_MASK 0xF -#define ISCSI_XHQE_NUM_SGES_SHIFT 2 -#define ISCSI_XHQE_RESERVED0_MASK 0x3 -#define ISCSI_XHQE_RESERVED0_SHIFT 6 +#define ISCSI_XHQE_FINAL_MASK 0x1 +#define ISCSI_XHQE_FINAL_SHIFT 0 +#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 +#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 +#define ISCSI_XHQE_NUM_SGES_MASK 0xF +#define ISCSI_XHQE_NUM_SGES_SHIFT 2 +#define ISCSI_XHQE_RESERVED0_MASK 0x3 +#define ISCSI_XHQE_RESERVED0_SHIFT 6 union iscsi_seq_num seq_num; __le16 reserved1; }; +/* Per PF iSCSI receive path statistics - mStorm RAM structure */ struct mstorm_iscsi_stats_drv { struct regpair iscsi_rx_dropped_pdus_task_not_valid; + struct regpair iscsi_rx_dup_ack_cnt; }; +/* Per PF iSCSI transmit path statistics - pStorm RAM structure */ struct pstorm_iscsi_stats_drv { struct regpair iscsi_tx_bytes_cnt; struct regpair iscsi_tx_packet_cnt; }; +/* Per PF iSCSI receive path statistics - tStorm RAM structure */ struct tstorm_iscsi_stats_drv { struct regpair iscsi_rx_bytes_cnt; struct regpair iscsi_rx_packet_cnt; struct regpair iscsi_rx_new_ooo_isle_events_cnt; + struct regpair iscsi_rx_tcp_payload_bytes_cnt; + struct regpair iscsi_rx_tcp_pkt_cnt; + struct regpair iscsi_rx_pure_ack_cnt; __le32 iscsi_cmdq_threshold_cnt; __le32 iscsi_rq_threshold_cnt; __le32 iscsi_immq_threshold_cnt; }; +/* Per PF iSCSI receive path statistics - uStorm RAM structure */ struct ustorm_iscsi_stats_drv { struct regpair iscsi_rx_data_pdu_cnt; struct regpair iscsi_rx_r2t_pdu_cnt; struct regpair iscsi_rx_total_pdu_cnt; }; +/* Per PF iSCSI transmit path statistics - xStorm RAM structure */ struct xstorm_iscsi_stats_drv { struct regpair iscsi_tx_go_to_slow_start_event_cnt; struct regpair iscsi_tx_fast_retransmit_event_cnt; + struct regpair iscsi_tx_pure_ack_cnt; + struct regpair iscsi_tx_delayed_ack_cnt; }; +/* Per PF iSCSI transmit path statistics - yStorm RAM structure */ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_data_pdu_cnt; struct regpair iscsi_tx_r2t_pdu_cnt; struct regpair iscsi_tx_total_pdu_cnt; + struct regpair iscsi_tx_tcp_payload_bytes_cnt; + struct regpair iscsi_tx_tcp_pkt_cnt; }; -struct tstorm_iscsi_task_ag_ctx { +struct e4_tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx { __le32 reg1; __le32 reg2; }; + +/* iSCSI doorbell data */ struct iscsi_db_data { u8 params; -#define ISCSI_DB_DATA_DEST_MASK 0x3 -#define ISCSI_DB_DATA_DEST_SHIFT 0 -#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 -#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 -#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 -#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 -#define ISCSI_DB_DATA_RESERVED_MASK 0x1 -#define ISCSI_DB_DATA_RESERVED_SHIFT 5 -#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 sq_prod; }; diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index b8b3e1cfae90..c6cfd39cd910 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h @@ -29,9 +29,12 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ + #ifndef __IWARP_COMMON__ #define __IWARP_COMMON__ + #include <linux/qed/rdma_common.h> + /************************/ /* IWARP FW CONSTANTS */ /************************/ @@ -40,14 +43,14 @@ #define IWARP_PASSIVE_MODE 1 #define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) -#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) -#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) -#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) -#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) -#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) -#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) +#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) +#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) -#define IWARP_MAX_QPS (64 * 1024) +#define IWARP_MAX_QPS (64 * 1024) #endif /* __IWARP_COMMON__ */ diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d60de4a39810..147d08ccf813 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -61,6 +61,35 @@ struct qed_txq_start_ret_params { void *p_handle; }; +enum qed_filter_config_mode { + QED_FILTER_CONFIG_MODE_DISABLE, + QED_FILTER_CONFIG_MODE_5_TUPLE, + QED_FILTER_CONFIG_MODE_L4_PORT, + QED_FILTER_CONFIG_MODE_IP_DEST, +}; + +struct qed_ntuple_filter_params { + /* Physically mapped address containing header of buffer to be used + * as filter. + */ + dma_addr_t addr; + + /* Length of header in bytes */ + u16 length; + + /* Relative queue-id to receive classified packet */ +#define QED_RFS_NTUPLE_QID_RSS ((u16)-1) + u16 qid; + + /* Identifier can either be according to vport-id or vfid */ + bool b_is_vf; + u8 vport_id; + u8 vf_id; + + /* true iff this filter is to be added. Else to be removed */ + bool b_is_add; +}; + struct qed_dev_eth_info { struct qed_dev_info common; @@ -316,13 +345,12 @@ struct qed_eth_ops { int (*tunn_config)(struct qed_dev *cdev, struct qed_tunn_params *params); - int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie, - dma_addr_t mapping, u16 length, - u16 vport_id, u16 rx_queue_id, - bool add_filter); + int (*ntuple_filter_config)(struct qed_dev *cdev, + void *cookie, + struct qed_ntuple_filter_params *params); int (*configure_arfs_searcher)(struct qed_dev *cdev, - bool en_searcher); + enum qed_filter_config_mode mode); int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); }; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index cc646ca97974..15e398c7230e 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -244,16 +244,11 @@ struct qed_fcoe_pf_params { /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; - u64 bdq_pbl_base_addr[2]; - u32 max_cwnd; + u64 bdq_pbl_base_addr[3]; u16 cq_num_entries; u16 cmdq_num_entries; u32 two_msl_timer; - u16 dup_ack_threshold; u16 tx_sws_timer; - u16 min_rto; - u16 min_rto_rt; - u16 max_rto; /* The following parameters are used during HW-init * and these parameters need to be passed as arguments @@ -264,8 +259,8 @@ struct qed_iscsi_pf_params { /* The following parameters are used during protocol-init */ u16 half_way_close_timeout; - u16 bdq_xoff_threshold[2]; - u16 bdq_xon_threshold[2]; + u16 bdq_xoff_threshold[3]; + u16 bdq_xon_threshold[3]; u16 cmdq_xoff_threshold; u16 cmdq_xon_threshold; u16 rq_buffer_size; @@ -281,10 +276,11 @@ struct qed_iscsi_pf_params { u8 gl_cmd_pi; u8 debug_mode; u8 ll2_ooo_queue_id; - u8 ooo_enable; u8 is_target; - u8 bdq_pbl_num_entries[2]; + u8 is_soc_en; + u8 soc_num_of_blocks_log; + u8 bdq_pbl_num_entries[3]; }; struct qed_rdma_pf_params { @@ -316,16 +312,16 @@ enum qed_int_mode { }; struct qed_sb_info { - struct status_block *sb_virt; - dma_addr_t sb_phys; - u32 sb_ack; /* Last given ack */ - u16 igu_sb_id; - void __iomem *igu_addr; - u8 flags; -#define QED_SB_INFO_INIT 0x1 -#define QED_SB_INFO_SETUP 0x2 + struct status_block_e4 *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void __iomem *igu_addr; + u8 flags; +#define QED_SB_INFO_INIT 0x1 +#define QED_SB_INFO_SETUP 0x2 - struct qed_dev *cdev; + struct qed_dev *cdev; }; enum qed_dev_type { @@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) u16 rc = 0; prod = le32_to_cpu(sb_info->sb_virt->prod_index) & - STATUS_BLOCK_PROD_INDEX_MASK; + STATUS_BLOCK_E4_PROD_INDEX_MASK; if (sb_info->sb_ack != prod) { sb_info->sb_ack = prod; rc |= QED_SB_IDX; diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 111e606a74c8..d0df1bec5357 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -102,7 +102,6 @@ struct qed_iscsi_params_offload { u32 ss_thresh; u16 srtt; u16 rtt_var; - u32 ts_time; u32 ts_recent; u32 ts_recent_age; u32 total_rt; @@ -124,7 +123,6 @@ struct qed_iscsi_params_offload { u16 mss; u8 snd_wnd_scale; u8 rcv_wnd_scale; - u32 ts_ticks_per_second; u16 da_timeout_value; u8 ack_frequency; }; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index e755954d85fd..266c1fb45387 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data { u32 opaque_data_1; /* GSI only */ - u32 gid_dst[4]; + u32 src_qp; u16 qp_id; union { diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index a9b3050f469c..c1a446ebe362 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -32,28 +32,29 @@ #ifndef __RDMA_COMMON__ #define __RDMA_COMMON__ + /************************/ /* RDMA FW CONSTANTS */ /************************/ -#define RDMA_RESERVED_LKEY (0) -#define RDMA_RING_PAGE_SIZE (0x1000) +#define RDMA_RESERVED_LKEY (0) +#define RDMA_RING_PAGE_SIZE (0x1000) -#define RDMA_MAX_SGE_PER_SQ_WQE (4) -#define RDMA_MAX_SGE_PER_RQ_WQE (4) +#define RDMA_MAX_SGE_PER_SQ_WQE (4) +#define RDMA_MAX_SGE_PER_RQ_WQE (4) #define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) -#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) -#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) +#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) +#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) -#define RDMA_MAX_CQS (64 * 1024) -#define RDMA_MAX_TIDS (128 * 1024 - 1) -#define RDMA_MAX_PDS (64 * 1024) +#define RDMA_MAX_CQS (64 * 1024) +#define RDMA_MAX_TIDS (128 * 1024 - 1) +#define RDMA_MAX_PDS (64 * 1024) -#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS -#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 -#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB +#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 +#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index fe6a33e45977..e15e0da71240 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -33,13 +33,18 @@ #ifndef __ROCE_COMMON__ #define __ROCE_COMMON__ -#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) -#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) +/************************/ +/* ROCE FW CONSTANTS */ +/************************/ -#define ROCE_MAX_QPS (32 * 1024) -#define ROCE_DCQCN_NP_MAX_QPS (64) -#define ROCE_DCQCN_RP_MAX_QPS (64) +#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) +#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) +#define ROCE_MAX_QPS (32 * 1024) +#define ROCE_DCQCN_NP_MAX_QPS (64) +#define ROCE_DCQCN_RP_MAX_QPS (64) + +/* Affiliated asynchronous events / errors enumeration */ enum roce_async_events_type { ROCE_ASYNC_EVENT_NONE = 0, ROCE_ASYNC_EVENT_COMM_EST = 1, diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 08df82a096b6..505c0b48a761 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -33,43 +33,77 @@ #ifndef __STORAGE_COMMON__ #define __STORAGE_COMMON__ -#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) -#define BDQ_NUM_RESOURCES (4) - -#define BDQ_ID_RQ (0) -#define BDQ_ID_IMM_DATA (1) -#define BDQ_NUM_IDS (2) - -#define SCSI_NUM_SGES_SLOW_SGL_THR 8 +/*********************/ +/* SCSI CONSTANTS */ +/*********************/ + +#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2) +#define BDQ_NUM_RESOURCES (4) + +#define BDQ_ID_RQ (0) +#define BDQ_ID_IMM_DATA (1) +#define BDQ_ID_TQ (2) +#define BDQ_NUM_IDS (3) + +#define SCSI_NUM_SGES_SLOW_SGL_THR 8 + +#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15) + +/* SCSI op codes */ +#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89) +#define SCSI_OPCODE_READ_10 (0x28) +#define SCSI_OPCODE_WRITE_6 (0x0A) +#define SCSI_OPCODE_WRITE_10 (0x2A) +#define SCSI_OPCODE_WRITE_12 (0xAA) +#define SCSI_OPCODE_WRITE_16 (0x8A) +#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E) +#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE) +#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E) + +/* iSCSI Drv opaque */ +struct iscsi_drv_opaque { + __le16 reserved_zero[3]; + __le16 opaque; +}; -#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) +/* Scsi 2B/8B opaque union */ +union scsi_opaque { + struct regpair fcoe_opaque; + struct iscsi_drv_opaque iscsi_opaque; +}; +/* SCSI buffer descriptor */ struct scsi_bd { struct regpair address; - struct regpair opaque; + union scsi_opaque opaque; }; +/* Scsi Drv BDQ struct */ struct scsi_bdq_ram_drv_data { __le16 external_producer; __le16 reserved0[3]; }; +/* SCSI SGE entry */ struct scsi_sge { struct regpair sge_addr; __le32 sge_len; __le32 reserved; }; +/* Cached SGEs section */ struct scsi_cached_sges { struct scsi_sge sge[4]; }; +/* Scsi Drv CMDQ struct */ struct scsi_drv_cmdq { __le16 cmdq_cons; __le16 reserved0; __le32 reserved1; }; +/* Common SCSI init params passed by driver to FW in function init ramrod */ struct scsi_init_func_params { __le16 num_tasks; u8 log_page_size; @@ -77,6 +111,7 @@ struct scsi_init_func_params { u8 reserved2[12]; }; +/* SCSI RQ/CQ/CMDQ firmware function init parameters */ struct scsi_init_func_queues { struct regpair glbl_q_params_addr; __le16 rq_buffer_size; @@ -84,39 +119,45 @@ struct scsi_init_func_queues { __le16 cmdq_num_entries; u8 bdq_resource_id; u8 q_validity; -#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 -#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 -#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 -#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F -#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 +#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3 +#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4 +#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7 +#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5 + __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS]; u8 num_queues; u8 queue_relative_offset; u8 cq_sb_pi; u8 cmdq_sb_pi; - __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS]; - __le16 reserved0; u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; + u8 reserved1; struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; - __le16 bdq_xon_threshold[BDQ_NUM_IDS]; __le16 cmdq_xoff_threshold; + __le16 bdq_xon_threshold[BDQ_NUM_IDS]; __le16 cmdq_xon_threshold; - __le32 reserved1; }; +/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */ struct scsi_ram_per_bdq_resource_drv_data { struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; }; +/* SCSI SGL types */ enum scsi_sgl_mode { SCSI_TX_SLOW_SGL, SCSI_FAST_SGL, MAX_SCSI_SGL_MODE }; +/* SCSI SGL parameters */ struct scsi_sgl_params { struct regpair sgl_addr; __le32 sgl_total_length; @@ -126,10 +167,16 @@ struct scsi_sgl_params { u8 reserved; }; +/* SCSI terminate connection params */ struct scsi_terminate_extra_params { __le16 unsolicited_cq_count; __le16 cmdq_count; u8 reserved[4]; }; +/* SCSI Task Queue Element */ +struct scsi_tqe { + __le16 itid; +}; + #endif /* __STORAGE_COMMON__ */ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index dbf7a43c3e1f..4a4845193539 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -33,8 +33,13 @@ #ifndef __TCP_COMMON__ #define __TCP_COMMON__ -#define TCP_INVALID_TIMEOUT_VAL -1 +/********************/ +/* TCP FW CONSTANTS */ +/********************/ +#define TCP_INVALID_TIMEOUT_VAL -1 + +/* OOO opaque data received from LL2 */ struct ooo_opaque { __le32 cid; u8 drop_isle; @@ -43,25 +48,29 @@ struct ooo_opaque { u8 ooo_isle; }; +/* tcp connect mode enum */ enum tcp_connect_mode { TCP_CONNECT_ACTIVE, TCP_CONNECT_PASSIVE, MAX_TCP_CONNECT_MODE }; +/* tcp function init parameters */ struct tcp_init_params { __le32 two_msl_timer; __le16 tx_sws_timer; - u8 maxfinrt; + u8 max_fin_rt; u8 reserved[9]; }; +/* tcp IPv4/IPv6 enum */ enum tcp_ip_version { TCP_IPV4, TCP_IPV6, MAX_TCP_IP_VERSION }; +/* tcp offload parameters */ struct tcp_offload_params { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; @@ -70,24 +79,29 @@ struct tcp_offload_params { __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; - u8 flags; -#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 -#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 -#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 -#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 + __le16 flags; +#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8 +#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F +#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9 u8 ip_version; + u8 reserved0[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; @@ -99,17 +113,21 @@ struct tcp_offload_params { u8 rcv_wnd_scale; u8 connect_mode; __le16 srtt; - __le32 cwnd; __le32 ss_thresh; - __le16 reserved1; + __le32 rcv_wnd; + __le32 cwnd; u8 ka_max_probe_cnt; u8 dup_ack_theshold; + __le16 reserved1; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 initial_rcv_wnd; __le32 rcv_next; __le32 snd_una; __le32 snd_next; __le32 snd_max; __le32 snd_wnd; - __le32 rcv_wnd; __le32 snd_wl1; __le32 ts_recent; __le32 ts_recent_age; @@ -122,16 +140,13 @@ struct tcp_offload_params { u8 rt_cnt; __le16 rtt_var; __le16 fw_internal; - __le32 ka_timeout; - __le32 ka_interval; - __le32 max_rt_time; - __le32 initial_rcv_wnd; u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; - __le32 reserved3[2]; + __le32 reserved3; }; +/* tcp offload parameters */ struct tcp_offload_params_opt2 { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; @@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 { __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; - u8 flags; -#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 -#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 -#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 + __le16 flags; +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4 u8 ip_version; + u8 reserved1[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; @@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 { __le16 syn_ip_payload_length; __le32 syn_phy_addr_lo; __le32 syn_phy_addr_hi; - __le32 reserved1[22]; + __le32 cwnd; + u8 ka_max_probe_cnt; + u8 reserved2[3]; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 reserved3[16]; }; +/* tcp IPv4/IPv6 enum */ enum tcp_seg_placement_event { TCP_EVENT_ADD_PEN, TCP_EVENT_ADD_NEW_ISLE, @@ -177,40 +202,41 @@ enum tcp_seg_placement_event { MAX_TCP_SEG_PLACEMENT_EVENT }; +/* tcp init parameters */ struct tcp_update_params { __le16 flags; -#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 -#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 -#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 -#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 -#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 -#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 -#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 -#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 -#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 -#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 -#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 -#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 -#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 -#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 -#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 -#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 -#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 +#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 +#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 +#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; @@ -226,6 +252,7 @@ struct tcp_update_params { u8 reserved1[7]; }; +/* toe upload parameters */ struct tcp_upload_params { __le32 rcv_next; __le32 snd_una; diff --git a/include/linux/refcount.h b/include/linux/refcount.h index e8286585e149..4193c41e383a 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -8,7 +8,7 @@ #include <linux/kernel.h> /** - * refcount_t - variant of atomic_t specialized for reference counts + * struct refcount_t - variant of atomic_t specialized for reference counts * @refs: atomic_t counter field * * The counter saturates at UINT_MAX and will not move once diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 20268b7d5001..6a3aeba40e9e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -24,6 +24,7 @@ struct module; struct device; struct i2c_client; struct irq_domain; +struct slim_device; struct spi_device; struct spmi_device; struct regmap; @@ -511,6 +512,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name); +struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); struct regmap *__regmap_init_spi(struct spi_device *dev, const struct regmap_config *config, struct lock_class_key *lock_key, @@ -636,6 +641,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, i2c, config) /** + * regmap_init_slimbus() - Initialise register map + * + * @slimbus: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +#define regmap_init_slimbus(slimbus, config) \ + __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \ + slimbus, config) + +/** * regmap_init_spi() - Initialise register map * * @dev: Device that will be interacted with diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 44e630eb3d94..728d421fffe9 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -324,6 +324,7 @@ struct rproc_mem_entry { }; struct rproc; +struct firmware; /** * struct rproc_ops - platform-specific device handlers @@ -331,12 +332,24 @@ struct rproc; * @stop: power off the device * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations + * @load_rsc_table: load resource table from firmware image + * @find_loaded_rsc_table: find the loaded resouce table + * @load: load firmeware to memory, where the remote processor + * expects to find it + * @sanity_check: sanity check the fw image + * @get_boot_addr: get boot address to entry point specified in firmware */ struct rproc_ops { int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, int len); + int (*load_rsc_table)(struct rproc *rproc, const struct firmware *fw); + struct resource_table *(*find_loaded_rsc_table)( + struct rproc *rproc, const struct firmware *fw); + int (*load)(struct rproc *rproc, const struct firmware *fw); + int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); + u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); }; /** @@ -390,7 +403,6 @@ enum rproc_crash_type { * @priv: private data which belongs to the platform-specific rproc module * @ops: platform-specific start/stop rproc handlers * @dev: virtual device for refcounting and common remoteproc behavior - * @fw_ops: firmware-specific handlers * @power: refcount of users who need this rproc powered up * @state: state of the device * @lock: lock which protects concurrent manipulations of the rproc @@ -406,11 +418,11 @@ enum rproc_crash_type { * @index: index of this rproc device * @crash_handler: workqueue for handling a crash * @crash_cnt: crash counter - * @crash_comp: completion used to sync crash handler and the rproc reload * @recovery_disabled: flag that state if recovery was disabled * @max_notifyid: largest allocated notify id. * @table_ptr: pointer to the resource table in effect * @cached_table: copy of the resource table + * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU */ struct rproc { @@ -419,9 +431,8 @@ struct rproc { const char *name; char *firmware; void *priv; - const struct rproc_ops *ops; + struct rproc_ops *ops; struct device dev; - const struct rproc_fw_ops *fw_ops; atomic_t power; unsigned int state; struct mutex lock; @@ -437,11 +448,11 @@ struct rproc { int index; struct work_struct crash_handler; unsigned int crash_cnt; - struct completion crash_comp; bool recovery_disabled; int max_notifyid; struct resource_table *table_ptr; struct resource_table *cached_table; + size_t table_sz; bool has_iommu; bool auto_boot; }; diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 21fc84d82d41..02166e815afb 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -167,6 +167,29 @@ reservation_object_lock(struct reservation_object *obj, } /** + * reservation_object_lock_interruptible - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object interruptible for exclusive access and + * modification. Note, that the lock is only against other writers, readers + * will run concurrently with a writer under RCU. The seqlock is used to + * notify readers if they overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock_interruptible(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock_interruptible(&obj->lock, ctx); +} + + +/** * reservation_object_trylock - trylock the reservation object * @obj: the reservation object * diff --git a/include/linux/reset.h b/include/linux/reset.h index 4c7871ddf3c6..09732c36f351 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -2,8 +2,10 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ -#include <linux/device.h> +#include <linux/types.h> +struct device; +struct device_node; struct reset_control; #ifdef CONFIG_RESET_CONTROLLER @@ -20,22 +22,16 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional); void reset_control_put(struct reset_control *rstc); +int __device_reset(struct device *dev, bool optional); struct reset_control *__devm_reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional); -int __must_check device_reset(struct device *dev); - struct reset_control *devm_reset_control_array_get(struct device *dev, bool shared, bool optional); struct reset_control *of_reset_control_array_get(struct device_node *np, bool shared, bool optional); -static inline int device_reset_optional(struct device *dev) -{ - return device_reset(dev); -} - #else static inline int reset_control_reset(struct reset_control *rstc) @@ -62,15 +58,9 @@ static inline void reset_control_put(struct reset_control *rstc) { } -static inline int __must_check device_reset(struct device *dev) -{ - WARN_ON(1); - return -ENOTSUPP; -} - -static inline int device_reset_optional(struct device *dev) +static inline int __device_reset(struct device *dev, bool optional) { - return -ENOTSUPP; + return optional ? 0 : -ENOTSUPP; } static inline struct reset_control *__of_reset_control_get( @@ -109,6 +99,16 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional) #endif /* CONFIG_RESET_CONTROLLER */ +static inline int __must_check device_reset(struct device *dev) +{ + return __device_reset(dev, false); +} + +static inline int device_reset_optional(struct device *dev) +{ + return __device_reset(dev, true); +} + /** * reset_control_get_exclusive - Lookup and obtain an exclusive reference * to a reset controller. @@ -127,9 +127,6 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional) static inline struct reset_control * __must_check reset_control_get_exclusive(struct device *dev, const char *id) { -#ifndef CONFIG_RESET_CONTROLLER - WARN_ON(1); -#endif return __reset_control_get(dev, id, 0, false, false); } @@ -275,9 +272,6 @@ static inline struct reset_control * __must_check devm_reset_control_get_exclusive(struct device *dev, const char *id) { -#ifndef CONFIG_RESET_CONTROLLER - WARN_ON(1); -#endif return __devm_reset_control_get(dev, id, 0, false, false); } @@ -350,18 +344,6 @@ devm_reset_control_get_shared_by_index(struct device *dev, int index) * These inline function calls will be removed once all consumers * have been moved over to the new explicit API. */ -static inline struct reset_control *reset_control_get( - struct device *dev, const char *id) -{ - return reset_control_get_exclusive(dev, id); -} - -static inline struct reset_control *reset_control_get_optional( - struct device *dev, const char *id) -{ - return reset_control_get_optional_exclusive(dev, id); -} - static inline struct reset_control *of_reset_control_get( struct device_node *node, const char *id) { diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 361c08e35dbc..c9df2527e0cd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -207,6 +207,7 @@ struct rhashtable_iter { struct rhashtable_walker walker; unsigned int slot; unsigned int skip; + bool end_of_table; }; static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) @@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); } -static inline unsigned int rht_key_hashfn( - struct rhashtable *ht, const struct bucket_table *tbl, - const void *key, const struct rhashtable_params params) +static inline unsigned int rht_key_get_hash(struct rhashtable *ht, + const void *key, const struct rhashtable_params params, + unsigned int hash_rnd) { unsigned int hash; /* params must be equal to ht->p if it isn't constant. */ if (!__builtin_constant_p(params.key_len)) - hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); + hash = ht->p.hashfn(key, ht->key_len, hash_rnd); else if (params.key_len) { unsigned int key_len = params.key_len; if (params.hashfn) - hash = params.hashfn(key, key_len, tbl->hash_rnd); + hash = params.hashfn(key, key_len, hash_rnd); else if (key_len & (sizeof(u32) - 1)) - hash = jhash(key, key_len, tbl->hash_rnd); + hash = jhash(key, key_len, hash_rnd); else - hash = jhash2(key, key_len / sizeof(u32), - tbl->hash_rnd); + hash = jhash2(key, key_len / sizeof(u32), hash_rnd); } else { unsigned int key_len = ht->p.key_len; if (params.hashfn) - hash = params.hashfn(key, key_len, tbl->hash_rnd); + hash = params.hashfn(key, key_len, hash_rnd); else - hash = jhash(key, key_len, tbl->hash_rnd); + hash = jhash(key, key_len, hash_rnd); } + return hash; +} + +static inline unsigned int rht_key_hashfn( + struct rhashtable *ht, const struct bucket_table *tbl, + const void *key, const struct rhashtable_params params) +{ + unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); + return rht_bucket_index(tbl, hash); } @@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); -int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); +int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); + +static inline void rhashtable_walk_start(struct rhashtable_iter *iter) +{ + (void)rhashtable_walk_start_check(iter); +} + void *rhashtable_walk_next(struct rhashtable_iter *iter); +void *rhashtable_walk_peek(struct rhashtable_iter *iter); void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); void rhashtable_free_and_destroy(struct rhashtable *ht, diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 289e4d54e3e0..7d9eb39fa76a 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -96,7 +96,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k }) int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); -int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, +__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table); diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 10d6ae8bbb7d..ca07366c4c33 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -157,7 +157,7 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len); -unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, +__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait); #else @@ -258,7 +258,7 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, return -ENXIO; } -static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, +static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait) { /* This shouldn't be possible */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 41319a2e409b..fc6c90b57be0 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -87,7 +87,6 @@ struct rtc_class_ops { int (*set_offset)(struct device *, long offset); }; -#define RTC_DEVICE_NAME_SIZE 20 typedef struct rtc_task { void (*func)(void *private_data); void *private_data; diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 1eadec3fc228..1fdcde96eb65 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -19,10 +19,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, - gfp_t flags, int *new_nsid); + gfp_t flags, int *new_nsid, int new_ifindex); struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, unsigned change, u32 event, - gfp_t flags, int *new_nsid); + gfp_t flags, int *new_nsid, + int new_ifindex); void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags); @@ -96,13 +97,9 @@ void rtnetlink_init(void); void __rtnl_unlock(void); void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); -#define ASSERT_RTNL() do { \ - if (unlikely(!rtnl_is_locked())) { \ - printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ - __FILE__, __LINE__); \ - dump_stack(); \ - } \ -} while(0) +#define ASSERT_RTNL() \ + WARN_ONCE(!rtnl_is_locked(), \ + "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 03a169087a18..1149533aa2fa 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -12,7 +12,7 @@ /* * Routines for handling mm_structs */ -extern struct mm_struct * mm_alloc(void); +extern struct mm_struct *mm_alloc(void); /** * mmgrab() - Pin a &struct mm_struct. @@ -36,32 +36,7 @@ static inline void mmgrab(struct mm_struct *mm) atomic_inc(&mm->mm_count); } -/* mmdrop drops the mm and the page tables */ -extern void __mmdrop(struct mm_struct *); -static inline void mmdrop(struct mm_struct *mm) -{ - /* - * The implicit full barrier implied by atomic_dec_and_test() is - * required by the membarrier system call before returning to - * user-space, after storing to rq->curr. - */ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); -} - -static inline void mmdrop_async_fn(struct work_struct *work) -{ - struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); - __mmdrop(mm); -} - -static inline void mmdrop_async(struct mm_struct *mm) -{ - if (unlikely(atomic_dec_and_test(&mm->mm_count))) { - INIT_WORK(&mm->async_put_work, mmdrop_async_fn); - schedule_work(&mm->async_put_work); - } -} +extern void mmdrop(struct mm_struct *mm); /** * mmget() - Pin the address space associated with a &struct mm_struct. diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 0aa4548fb492..23b4f9cb82db 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -285,6 +285,34 @@ static inline void kernel_signal_stop(void) schedule(); } +#ifdef __ARCH_SI_TRAPNO +# define ___ARCH_SI_TRAPNO(_a1) , _a1 +#else +# define ___ARCH_SI_TRAPNO(_a1) +#endif +#ifdef __ia64__ +# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 +#else +# define ___ARCH_SI_IA64(_a1, _a2, _a3) +#endif + +int force_sig_fault(int sig, int code, void __user *addr + ___ARCH_SI_TRAPNO(int trapno) + ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) + , struct task_struct *t); +int send_sig_fault(int sig, int code, void __user *addr + ___ARCH_SI_TRAPNO(int trapno) + ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) + , struct task_struct *t); + +int force_sig_mceerr(int code, void __user *, short, struct task_struct *); +int send_sig_mceerr(int code, void __user *, short, struct task_struct *); + +int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); +int force_sig_pkuerr(void __user *addr, u32 pkey); + +int force_sig_ptrace_errno_trap(int errno, void __user *addr); + extern int send_sig_info(int, struct siginfo *, struct task_struct *); extern int force_sigsegv(int, struct task_struct *); extern int force_sig_info(int, struct siginfo *, struct task_struct *); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 05b8650f06f5..5be31eb7b266 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -104,6 +104,20 @@ extern int arch_task_struct_size __read_mostly; # define arch_task_struct_size (sizeof(struct task_struct)) #endif +#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST +/* + * If an architecture has not declared a thread_struct whitelist we + * must assume something there may need to be copied to userspace. + */ +static inline void arch_thread_struct_whitelist(unsigned long *offset, + unsigned long *size) +{ + *offset = 0; + /* Handle dynamically sized thread_struct. */ + *size = arch_task_struct_size - offsetof(struct task_struct, thread); +} +#endif + #ifdef CONFIG_VMAP_STACK static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) { diff --git a/include/linux/scif.h b/include/linux/scif.h index 49a35d6edc94..7046111b8d0a 100644 --- a/include/linux/scif.h +++ b/include/linux/scif.h @@ -123,8 +123,8 @@ struct scif_range { */ struct scif_pollepd { scif_epd_t epd; - short events; - short revents; + __poll_t events; + __poll_t revents; }; /** diff --git a/include/linux/sctp.h b/include/linux/sctp.h index da803dfc7a39..b36c76635f18 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -102,11 +102,15 @@ enum sctp_cid { /* AUTH Extension Section 4.1 */ SCTP_CID_AUTH = 0x0F, + /* sctp ndata 5.1. I-DATA */ + SCTP_CID_I_DATA = 0x40, + /* PR-SCTP Sec 3.2 */ SCTP_CID_FWD_TSN = 0xC0, /* Use hex, as defined in ADDIP sec. 3.1 */ SCTP_CID_ASCONF = 0xC1, + SCTP_CID_I_FWD_TSN = 0xC2, SCTP_CID_ASCONF_ACK = 0x80, SCTP_CID_RECONF = 0x82, }; /* enum */ @@ -240,6 +244,23 @@ struct sctp_data_chunk { struct sctp_datahdr data_hdr; }; +struct sctp_idatahdr { + __be32 tsn; + __be16 stream; + __be16 reserved; + __be32 mid; + union { + __u32 ppid; + __be32 fsn; + }; + __u8 payload[0]; +}; + +struct sctp_idata_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_idatahdr data_hdr; +}; + /* DATA Chuck Specific Flags */ enum { SCTP_DATA_MIDDLE_FRAG = 0x00, @@ -596,6 +617,22 @@ struct sctp_fwdtsn_chunk { struct sctp_fwdtsn_hdr fwdtsn_hdr; }; +struct sctp_ifwdtsn_skip { + __be16 stream; + __u8 reserved; + __u8 flags; + __be32 mid; +}; + +struct sctp_ifwdtsn_hdr { + __be32 new_cum_tsn; + struct sctp_ifwdtsn_skip skip[0]; +}; + +struct sctp_ifwdtsn_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_ifwdtsn_hdr fwdtsn_hdr; +}; /* ADDIP * Section 3.1.1 Address Configuration Change Chunk (ASCONF) diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 10f25f7e4304..c723a5c4e3ff 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -95,11 +95,19 @@ static inline void get_seccomp_filter(struct task_struct *tsk) #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) extern long seccomp_get_filter(struct task_struct *task, unsigned long filter_off, void __user *data); +extern long seccomp_get_metadata(struct task_struct *task, + unsigned long filter_off, void __user *data); #else static inline long seccomp_get_filter(struct task_struct *task, unsigned long n, void __user *data) { return -EINVAL; } +static inline long seccomp_get_metadata(struct task_struct *task, + unsigned long filter_off, + void __user *data) +{ + return -EINVAL; +} #endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ #endif /* _LINUX_SECCOMP_H */ diff --git a/include/linux/serdev.h b/include/linux/serdev.h index d4bb46a26dc3..f153b2c7f0cd 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -27,8 +27,10 @@ struct serdev_device; /** * struct serdev_device_ops - Callback operations for a serdev device - * @receive_buf: Function called with data received from device. - * @write_wakeup: Function called when ready to transmit more data. + * @receive_buf: Function called with data received from device; + * returns number of bytes accepted; may sleep. + * @write_wakeup: Function called when ready to transmit more data; must + * not sleep. */ struct serdev_device_ops { int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); @@ -76,6 +78,12 @@ static inline struct serdev_device_driver *to_serdev_device_driver(struct device return container_of(d, struct serdev_device_driver, driver); } +enum serdev_parity { + SERDEV_PARITY_NONE, + SERDEV_PARITY_EVEN, + SERDEV_PARITY_ODD, +}; + /* * serdev controller structures */ @@ -86,6 +94,7 @@ struct serdev_controller_ops { int (*open)(struct serdev_controller *); void (*close)(struct serdev_controller *); void (*set_flow_control)(struct serdev_controller *, bool); + int (*set_parity)(struct serdev_controller *, enum serdev_parity); unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); void (*wait_until_sent)(struct serdev_controller *, long); int (*get_tiocm)(struct serdev_controller *); @@ -299,6 +308,9 @@ static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enabl return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS); } +int serdev_device_set_parity(struct serdev_device *serdev, + enum serdev_parity parity); + /* * serdev hooks into TTY core */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 37b044e78333..4c310c34ddad 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -387,7 +387,7 @@ struct uart_port *uart_get_console(struct uart_port *ports, int nr, struct console *c); int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, char **options); -void uart_parse_options(char *options, int *baud, int *parity, int *bits, +void uart_parse_options(const char *options, int *baud, int *parity, int *bits, int *flow); int uart_set_options(struct uart_port *port, struct console *co, int baud, int parity, int bits, int flow); @@ -501,9 +501,5 @@ static inline int uart_handle_break(struct uart_port *port) (cflag) & CRTSCTS || \ !((cflag) & CLOCAL)) -/* - * Common device tree parsing helpers - */ -void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf); - +void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf); #endif /* LINUX_SERIAL_CORE_H */ diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 4a906f560817..e724d5a3dd80 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -3,7 +3,7 @@ #include <linux/phy.h> -struct __packed sfp_eeprom_base { +struct sfp_eeprom_base { u8 phys_id; u8 phys_ext_id; u8 connector; @@ -165,13 +165,47 @@ struct __packed sfp_eeprom_base { char vendor_rev[4]; union { __be16 optical_wavelength; - u8 cable_spec; - }; + __be16 cable_compliance; + struct { +#if defined __BIG_ENDIAN_BITFIELD + u8 reserved60_2:6; + u8 fc_pi_4_app_h:1; + u8 sff8431_app_e:1; + u8 reserved61:8; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 sff8431_app_e:1; + u8 fc_pi_4_app_h:1; + u8 reserved60_2:6; + u8 reserved61:8; +#else +#error Unknown Endian +#endif + } __packed passive; + struct { +#if defined __BIG_ENDIAN_BITFIELD + u8 reserved60_4:4; + u8 fc_pi_4_lim:1; + u8 sff8431_lim:1; + u8 fc_pi_4_app_h:1; + u8 sff8431_app_e:1; + u8 reserved61:8; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 sff8431_app_e:1; + u8 fc_pi_4_app_h:1; + u8 sff8431_lim:1; + u8 fc_pi_4_lim:1; + u8 reserved60_4:4; + u8 reserved61:8; +#else +#error Unknown Endian +#endif + } __packed active; + } __packed; u8 reserved62; u8 cc_base; -}; +} __packed; -struct __packed sfp_eeprom_ext { +struct sfp_eeprom_ext { __be16 options; u8 br_max; u8 br_min; @@ -181,12 +215,21 @@ struct __packed sfp_eeprom_ext { u8 enhopts; u8 sff8472_compliance; u8 cc_ext; -}; - -struct __packed sfp_eeprom_id { +} __packed; + +/** + * struct sfp_eeprom_id - raw SFP module identification information + * @base: base SFP module identification structure + * @ext: extended SFP module identification structure + * + * See the SFF-8472 specification and related documents for the definition + * of these structure members. This can be obtained from + * ftp://ftp.seagate.com/sff + */ +struct sfp_eeprom_id { struct sfp_eeprom_base base; struct sfp_eeprom_ext ext; -}; +} __packed; /* SFP EEPROM registers */ enum { @@ -222,6 +265,7 @@ enum { SFP_SFF8472_COMPLIANCE = 0x5e, SFP_CC_EXT = 0x5f, + SFP_PHYS_ID_SFF = 0x02, SFP_PHYS_ID_SFP = 0x03, SFP_PHYS_EXT_ID_SFP = 0x04, SFP_CONNECTOR_UNSPEC = 0x00, @@ -347,19 +391,32 @@ enum { SFP_PAGE = 0x7f, }; -struct device_node; +struct fwnode_handle; struct ethtool_eeprom; struct ethtool_modinfo; struct net_device; struct sfp_bus; +/** + * struct sfp_upstream_ops - upstream operations structure + * @module_insert: called after a module has been detected to determine + * whether the module is supported for the upstream device. + * @module_remove: called after the module has been removed. + * @link_down: called when the link is non-operational for whatever + * reason. + * @link_up: called when the link is operational. + * @connect_phy: called when an I2C accessible PHY has been detected + * on the module. + * @disconnect_phy: called when a module with an I2C accessible PHY has + * been removed. + */ struct sfp_upstream_ops { - int (*module_insert)(void *, const struct sfp_eeprom_id *id); - void (*module_remove)(void *); - void (*link_down)(void *); - void (*link_up)(void *); - int (*connect_phy)(void *, struct phy_device *); - void (*disconnect_phy)(void *); + int (*module_insert)(void *priv, const struct sfp_eeprom_id *id); + void (*module_remove)(void *priv); + void (*link_down)(void *priv); + void (*link_up)(void *priv); + int (*connect_phy)(void *priv, struct phy_device *); + void (*disconnect_phy)(void *priv); }; #if IS_ENABLED(CONFIG_SFP) @@ -375,7 +432,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, u8 *data); void sfp_upstream_start(struct sfp_bus *bus); void sfp_upstream_stop(struct sfp_bus *bus); -struct sfp_bus *sfp_register_upstream(struct device_node *np, +struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, struct net_device *ndev, void *upstream, const struct sfp_upstream_ops *ops); void sfp_unregister_upstream(struct sfp_bus *bus); @@ -419,7 +476,8 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus) { } -static inline struct sfp_bus *sfp_register_upstream(struct device_node *np, +static inline struct sfp_bus *sfp_register_upstream( + struct fwnode_handle *fwnode, struct net_device *ndev, void *upstream, const struct sfp_upstream_ops *ops) { diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index 94081e9a5010..6dfda97a6c1a 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -5,12 +5,9 @@ #include <linux/phy.h> #include <linux/if_ether.h> -enum {EDMAC_LITTLE_ENDIAN}; - struct sh_eth_plat_data { int phy; int phy_irq; - int edmac_endian; phy_interface_t phy_interface; void (*set_mdio_gate)(void *addr); diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 06b295bec00d..73b5e655a76e 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -112,13 +112,11 @@ extern void shmem_uncharge(struct inode *inode, long pages); #ifdef CONFIG_TMPFS -extern int shmem_add_seals(struct file *file, unsigned int seals); -extern int shmem_get_seals(struct file *file); -extern long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg); +extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg); #else -static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a) +static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a) { return -EINVAL; } diff --git a/include/linux/signal.h b/include/linux/signal.h index 042968dd98f0..a9bc7e1b077e 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -11,13 +11,14 @@ struct task_struct; /* for sysctl */ extern int print_fatal_signals; -static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) +static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from) { - if (from->si_code < 0) - memcpy(to, from, sizeof(*to)); - else - /* _sigchld is currently the largest know union member */ - memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); + memcpy(to, from, sizeof(*to)); +} + +static inline void clear_siginfo(struct siginfo *info) +{ + memset(info, 0, sizeof(*info)); } int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); @@ -29,9 +30,7 @@ enum siginfo_layout { SIL_FAULT, SIL_CHLD, SIL_RT, -#ifdef __ARCH_SIGSYS SIL_SYS, -#endif }; enum siginfo_layout siginfo_layout(int sig, int si_code); diff --git a/include/linux/siox.h b/include/linux/siox.h new file mode 100644 index 000000000000..d79624e83134 --- /dev/null +++ b/include/linux/siox.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2015 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ + +#include <linux/device.h> + +#define to_siox_device(_dev) container_of((_dev), struct siox_device, dev) +struct siox_device { + struct list_head node; /* node in smaster->devices */ + struct siox_master *smaster; + struct device dev; + + const char *type; + size_t inbytes; + size_t outbytes; + u8 statustype; + + u8 status_read_clean; + u8 status_written; + u8 status_written_lastcycle; + bool connected; + + /* statistics */ + unsigned int watchdog_errors; + unsigned int status_errors; + + struct kernfs_node *status_errors_kn; + struct kernfs_node *watchdog_kn; + struct kernfs_node *watchdog_errors_kn; + struct kernfs_node *connected_kn; +}; + +bool siox_device_synced(struct siox_device *sdevice); +bool siox_device_connected(struct siox_device *sdevice); + +struct siox_driver { + int (*probe)(struct siox_device *sdevice); + int (*remove)(struct siox_device *sdevice); + void (*shutdown)(struct siox_device *sdevice); + + /* + * buf is big enough to hold sdev->inbytes - 1 bytes, the status byte + * is in the scope of the framework. + */ + int (*set_data)(struct siox_device *sdevice, u8 status, u8 buf[]); + /* + * buf is big enough to hold sdev->outbytes - 1 bytes, the status byte + * is in the scope of the framework + */ + int (*get_data)(struct siox_device *sdevice, const u8 buf[]); + + struct device_driver driver; +}; + +static inline struct siox_driver *to_siox_driver(struct device_driver *driver) +{ + if (driver) + return container_of(driver, struct siox_driver, driver); + else + return NULL; +} + +int __siox_driver_register(struct siox_driver *sdriver, struct module *owner); + +static inline int siox_driver_register(struct siox_driver *sdriver) +{ + return __siox_driver_register(sdriver, THIS_MODULE); +} + +static inline void siox_driver_unregister(struct siox_driver *sdriver) +{ + return driver_unregister(&sdriver->driver); +} diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index 8621ffdeecbf..a6b6e8bb3d7b 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -69,7 +69,12 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb */ static inline bool __skb_array_empty(struct skb_array *a) { - return !__ptr_ring_peek(&a->ring); + return __ptr_ring_empty(&a->ring); +} + +static inline struct sk_buff *__skb_array_peek(struct skb_array *a) +{ + return __ptr_ring_peek(&a->ring); } static inline bool skb_array_empty(struct skb_array *a) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a38c80e9f91e..5ebc0f869720 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1211,6 +1211,11 @@ static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow, data, proto, nhoff, hlen, flags); } +void +skb_flow_dissect_tunnel_info(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) @@ -3241,7 +3246,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, int *peeked, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); -unsigned int datagram_poll(struct file *file, struct socket *sock, +__poll_t datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int skb_copy_datagram_iter(const struct sk_buff *from, int offset, struct iov_iter *to, int size); @@ -3282,6 +3287,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); void skb_scrub_packet(struct sk_buff *skb, bool xnet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); +bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); int skb_ensure_writable(struct sk_buff *skb, int write_len); @@ -4115,6 +4121,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) return hdr_len + skb_gso_transport_seglen(skb); } +/** + * skb_gso_mac_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_mac_seglen is used to determine the real size of the + * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 + * headers (TCP/UDP). + */ +static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) +{ + unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); + return hdr_len + skb_gso_transport_seglen(skb); +} + /* Local Checksum Offload. * Compute outer checksum based on the assumption that the * inner checksum will be offloaded later. diff --git a/include/linux/slab.h b/include/linux/slab.h index 50697a1d6621..231abc8976c5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -135,9 +135,15 @@ struct mem_cgroup; void __init kmem_cache_init(void); bool slab_is_available(void); -struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, - slab_flags_t, - void (*)(void *)); +extern bool usercopy_fallback; + +struct kmem_cache *kmem_cache_create(const char *name, size_t size, + size_t align, slab_flags_t flags, + void (*ctor)(void *)); +struct kmem_cache *kmem_cache_create_usercopy(const char *name, + size_t size, size_t align, slab_flags_t flags, + size_t useroffset, size_t usersize, + void (*ctor)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); @@ -153,9 +159,20 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *); * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ -#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ - sizeof(struct __struct), __alignof__(struct __struct),\ - (__flags), NULL) +#define KMEM_CACHE(__struct, __flags) \ + kmem_cache_create(#__struct, sizeof(struct __struct), \ + __alignof__(struct __struct), (__flags), NULL) + +/* + * To whitelist a single field for copying to/from usercopy, use this + * macro instead for KMEM_CACHE() above. + */ +#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ + kmem_cache_create_usercopy(#__struct, \ + sizeof(struct __struct), \ + __alignof__(struct __struct), (__flags), \ + offsetof(struct __struct, __field), \ + sizeof_field(struct __struct, __field), NULL) /* * Common kmalloc functions provided by all allocators @@ -167,15 +184,11 @@ void kzfree(const void *); size_t ksize(const void *); #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR -const char *__check_heap_object(const void *ptr, unsigned long n, - struct page *page); +void __check_heap_object(const void *ptr, unsigned long n, struct page *page, + bool to_user); #else -static inline const char *__check_heap_object(const void *ptr, - unsigned long n, - struct page *page) -{ - return NULL; -} +static inline void __check_heap_object(const void *ptr, unsigned long n, + struct page *page, bool to_user) { } #endif /* diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 072e46e9e1d5..7385547c04b1 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -85,6 +85,9 @@ struct kmem_cache { unsigned int *random_seq; #endif + size_t useroffset; /* Usercopy region offset */ + size_t usersize; /* Usercopy region size */ + struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h new file mode 100644 index 000000000000..c36cf121d2cd --- /dev/null +++ b/include/linux/slimbus.h @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011-2017, The Linux Foundation + */ + +#ifndef _LINUX_SLIMBUS_H +#define _LINUX_SLIMBUS_H +#include <linux/device.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/mod_devicetable.h> + +extern struct bus_type slimbus_bus; + +/** + * struct slim_eaddr - Enumeration address for a SLIMbus device + * @manf_id: Manufacturer Id for the device + * @prod_code: Product code + * @dev_index: Device index + * @instance: Instance value + */ +struct slim_eaddr { + u16 manf_id; + u16 prod_code; + u8 dev_index; + u8 instance; +} __packed; + +/** + * enum slim_device_status - slim device status + * @SLIM_DEVICE_STATUS_DOWN: Slim device is absent or not reported yet. + * @SLIM_DEVICE_STATUS_UP: Slim device is announced on the bus. + * @SLIM_DEVICE_STATUS_RESERVED: Reserved for future use. + */ +enum slim_device_status { + SLIM_DEVICE_STATUS_DOWN = 0, + SLIM_DEVICE_STATUS_UP, + SLIM_DEVICE_STATUS_RESERVED, +}; + +struct slim_controller; + +/** + * struct slim_device - Slim device handle. + * @dev: Driver model representation of the device. + * @e_addr: Enumeration address of this device. + * @status: slim device status + * @ctrl: slim controller instance. + * @laddr: 1-byte Logical address of this device. + * @is_laddr_valid: indicates if the laddr is valid or not + * + * This is the client/device handle returned when a SLIMbus + * device is registered with a controller. + * Pointer to this structure is used by client-driver as a handle. + */ +struct slim_device { + struct device dev; + struct slim_eaddr e_addr; + struct slim_controller *ctrl; + enum slim_device_status status; + u8 laddr; + bool is_laddr_valid; +}; + +#define to_slim_device(d) container_of(d, struct slim_device, dev) + +/** + * struct slim_driver - SLIMbus 'generic device' (slave) device driver + * (similar to 'spi_device' on SPI) + * @probe: Binds this driver to a SLIMbus device. + * @remove: Unbinds this driver from the SLIMbus device. + * @shutdown: Standard shutdown callback used during powerdown/halt. + * @device_status: This callback is called when + * - The device reports present and gets a laddr assigned + * - The device reports absent, or the bus goes down. + * @driver: SLIMbus device drivers should initialize name and owner field of + * this structure + * @id_table: List of SLIMbus devices supported by this driver + */ + +struct slim_driver { + int (*probe)(struct slim_device *sl); + void (*remove)(struct slim_device *sl); + void (*shutdown)(struct slim_device *sl); + int (*device_status)(struct slim_device *sl, + enum slim_device_status s); + struct device_driver driver; + const struct slim_device_id *id_table; +}; +#define to_slim_driver(d) container_of(d, struct slim_driver, driver) + +/** + * struct slim_val_inf - Slimbus value or information element + * @start_offset: Specifies starting offset in information/value element map + * @rbuf: buffer to read the values + * @wbuf: buffer to write + * @num_bytes: upto 16. This ensures that the message will fit the slicesize + * per SLIMbus spec + * @comp: completion for asynchronous operations, valid only if TID is + * required for transaction, like REQUEST operations. + * Rest of the transactions are synchronous anyway. + */ +struct slim_val_inf { + u16 start_offset; + u8 num_bytes; + u8 *rbuf; + const u8 *wbuf; + struct completion *comp; +}; + +/* + * use a macro to avoid include chaining to get THIS_MODULE + */ +#define slim_driver_register(drv) \ + __slim_driver_register(drv, THIS_MODULE) +int __slim_driver_register(struct slim_driver *drv, struct module *owner); +void slim_driver_unregister(struct slim_driver *drv); + +/** + * module_slim_driver() - Helper macro for registering a SLIMbus driver + * @__slim_driver: slimbus_driver struct + * + * Helper macro for SLIMbus drivers which do not do anything special in module + * init/exit. This eliminates a lot of boilerplate. Each module may only + * use this macro once, and calling it replaces module_init() and module_exit() + */ +#define module_slim_driver(__slim_driver) \ + module_driver(__slim_driver, slim_driver_register, \ + slim_driver_unregister) + +static inline void *slim_get_devicedata(const struct slim_device *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void slim_set_devicedata(struct slim_device *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +struct slim_device *slim_get_device(struct slim_controller *ctrl, + struct slim_eaddr *e_addr); +int slim_get_logical_addr(struct slim_device *sbdev); + +/* Information Element management messages */ +#define SLIM_MSG_MC_REQUEST_INFORMATION 0x20 +#define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION 0x21 +#define SLIM_MSG_MC_REPLY_INFORMATION 0x24 +#define SLIM_MSG_MC_CLEAR_INFORMATION 0x28 +#define SLIM_MSG_MC_REPORT_INFORMATION 0x29 + +/* Value Element management messages */ +#define SLIM_MSG_MC_REQUEST_VALUE 0x60 +#define SLIM_MSG_MC_REQUEST_CHANGE_VALUE 0x61 +#define SLIM_MSG_MC_REPLY_VALUE 0x64 +#define SLIM_MSG_MC_CHANGE_VALUE 0x68 + +int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg, + u8 mc); +int slim_readb(struct slim_device *sdev, u32 addr); +int slim_writeb(struct slim_device *sdev, u32 addr, u8 value); +int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val); +int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val); +#endif /* _LINUX_SLIMBUS_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0adae162dc8f..8ad99c47b19c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -135,6 +135,9 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif + size_t useroffset; /* Usercopy region offset */ + size_t usersize; /* Usercopy region size */ + struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h index 12e548938bbb..8e884e0dda0a 100644 --- a/include/linux/soc/brcmstb/brcmstb.h +++ b/include/linux/soc/brcmstb/brcmstb.h @@ -13,12 +13,6 @@ static inline u32 BRCM_REV(u32 reg) } /* - * Bus Interface Unit control register setup, must happen early during boot, - * before SMP is brought up, called by machine entry point. - */ -void brcmstb_biuctrl_init(void); - -/* * Helper functions for getting family or product id from the * SoC driver. */ diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index e8d9f0d52933..b0a507d356ef 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -28,7 +28,8 @@ #define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ BIT(7) | BIT(8)) -int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask); -int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask); - +int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask, + bool reg_update); +int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask, + bool reg_update); #endif /* __SOC_MEDIATEK_INFRACFG_H */ diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h new file mode 100644 index 000000000000..f4de33654a60 --- /dev/null +++ b/include/linux/soc/qcom/qmi.h @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, Linaro Ltd. + */ +#ifndef __QMI_HELPERS_H__ +#define __QMI_HELPERS_H__ + +#include <linux/completion.h> +#include <linux/idr.h> +#include <linux/list.h> +#include <linux/qrtr.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct socket; + +/** + * qmi_header - wireformat header of QMI messages + * @type: type of message + * @txn_id: transaction id + * @msg_id: message id + * @msg_len: length of message payload following header + */ +struct qmi_header { + u8 type; + u16 txn_id; + u16 msg_id; + u16 msg_len; +} __packed; + +#define QMI_REQUEST 0 +#define QMI_RESPONSE 2 +#define QMI_INDICATION 4 + +#define QMI_COMMON_TLV_TYPE 0 + +enum qmi_elem_type { + QMI_EOTI, + QMI_OPT_FLAG, + QMI_DATA_LEN, + QMI_UNSIGNED_1_BYTE, + QMI_UNSIGNED_2_BYTE, + QMI_UNSIGNED_4_BYTE, + QMI_UNSIGNED_8_BYTE, + QMI_SIGNED_2_BYTE_ENUM, + QMI_SIGNED_4_BYTE_ENUM, + QMI_STRUCT, + QMI_STRING, +}; + +enum qmi_array_type { + NO_ARRAY, + STATIC_ARRAY, + VAR_LEN_ARRAY, +}; + +/** + * struct qmi_elem_info - describes how to encode a single QMI element + * @data_type: Data type of this element. + * @elem_len: Array length of this element, if an array. + * @elem_size: Size of a single instance of this data type. + * @array_type: Array type of this element. + * @tlv_type: QMI message specific type to identify which element + * is present in an incoming message. + * @offset: Specifies the offset of the first instance of this + * element in the data structure. + * @ei_array: Null-terminated array of @qmi_elem_info to describe nested + * structures. + */ +struct qmi_elem_info { + enum qmi_elem_type data_type; + u32 elem_len; + u32 elem_size; + enum qmi_array_type array_type; + u8 tlv_type; + u32 offset; + struct qmi_elem_info *ei_array; +}; + +#define QMI_RESULT_SUCCESS_V01 0 +#define QMI_RESULT_FAILURE_V01 1 + +#define QMI_ERR_NONE_V01 0 +#define QMI_ERR_MALFORMED_MSG_V01 1 +#define QMI_ERR_NO_MEMORY_V01 2 +#define QMI_ERR_INTERNAL_V01 3 +#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 +#define QMI_ERR_INVALID_ID_V01 41 +#define QMI_ERR_ENCODING_V01 58 +#define QMI_ERR_INCOMPATIBLE_STATE_V01 90 +#define QMI_ERR_NOT_SUPPORTED_V01 94 + +/** + * qmi_response_type_v01 - common response header (decoded) + * @result: result of the transaction + * @error: error value, when @result is QMI_RESULT_FAILURE_V01 + */ +struct qmi_response_type_v01 { + u16 result; + u16 error; +}; + +extern struct qmi_elem_info qmi_response_type_v01_ei[]; + +/** + * struct qmi_service - context to track lookup-results + * @service: service type + * @version: version of the @service + * @instance: instance id of the @service + * @node: node of the service + * @port: port of the service + * @priv: handle for client's use + * @list_node: list_head for house keeping + */ +struct qmi_service { + unsigned int service; + unsigned int version; + unsigned int instance; + + unsigned int node; + unsigned int port; + + void *priv; + struct list_head list_node; +}; + +struct qmi_handle; + +/** + * struct qmi_ops - callbacks for qmi_handle + * @new_server: inform client of a new_server lookup-result, returning + * successfully from this call causes the library to call + * @del_server as the service is removed from the + * lookup-result. @priv of the qmi_service can be used by + * the client + * @del_server: inform client of a del_server lookup-result + * @net_reset: inform client that the name service was restarted and + * that and any state needs to be released + * @msg_handler: invoked for incoming messages, allows a client to + * override the usual QMI message handler + * @bye: inform a client that all clients from a node are gone + * @del_client: inform a client that a particular client is gone + */ +struct qmi_ops { + int (*new_server)(struct qmi_handle *qmi, struct qmi_service *svc); + void (*del_server)(struct qmi_handle *qmi, struct qmi_service *svc); + void (*net_reset)(struct qmi_handle *qmi); + void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + const void *data, size_t count); + void (*bye)(struct qmi_handle *qmi, unsigned int node); + void (*del_client)(struct qmi_handle *qmi, + unsigned int node, unsigned int port); +}; + +/** + * struct qmi_txn - transaction context + * @qmi: QMI handle this transaction is associated with + * @id: transaction id + * @lock: for synchronization between handler and waiter of messages + * @completion: completion object as the transaction receives a response + * @result: result code for the completed transaction + * @ei: description of the QMI encoded response (optional) + * @dest: destination buffer to decode message into (optional) + */ +struct qmi_txn { + struct qmi_handle *qmi; + + int id; + + struct mutex lock; + struct completion completion; + int result; + + struct qmi_elem_info *ei; + void *dest; +}; + +/** + * struct qmi_msg_handler - description of QMI message handler + * @type: type of message + * @msg_id: message id + * @ei: description of the QMI encoded message + * @decoded_size: size of the decoded object + * @fn: function to invoke as the message is decoded + */ +struct qmi_msg_handler { + unsigned int type; + unsigned int msg_id; + + struct qmi_elem_info *ei; + + size_t decoded_size; + void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *decoded); +}; + +/** + * struct qmi_handle - QMI context + * @sock: socket handle + * @sock_lock: synchronization of @sock modifications + * @sq: sockaddr of @sock + * @work: work for handling incoming messages + * @wq: workqueue to post @work on + * @recv_buf: scratch buffer for handling incoming messages + * @recv_buf_size: size of @recv_buf + * @lookups: list of registered lookup requests + * @lookup_results: list of lookup-results advertised to the client + * @services: list of registered services (by this client) + * @ops: reference to callbacks + * @txns: outstanding transactions + * @txn_lock: lock for modifications of @txns + * @handlers: list of handlers for incoming messages + */ +struct qmi_handle { + struct socket *sock; + struct mutex sock_lock; + + struct sockaddr_qrtr sq; + + struct work_struct work; + struct workqueue_struct *wq; + + void *recv_buf; + size_t recv_buf_size; + + struct list_head lookups; + struct list_head lookup_results; + struct list_head services; + + struct qmi_ops ops; + + struct idr txns; + struct mutex txn_lock; + + const struct qmi_msg_handler *handlers; +}; + +int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance); +int qmi_add_server(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance); + +int qmi_handle_init(struct qmi_handle *qmi, size_t max_msg_len, + const struct qmi_ops *ops, + const struct qmi_msg_handler *handlers); +void qmi_handle_release(struct qmi_handle *qmi); + +ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct); +ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct); +ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + int msg_id, size_t len, struct qmi_elem_info *ei, + const void *c_struct); + +void *qmi_encode_message(int type, unsigned int msg_id, size_t *len, + unsigned int txn_id, struct qmi_elem_info *ei, + const void *c_struct); + +int qmi_decode_message(const void *buf, size_t len, + struct qmi_elem_info *ei, void *c_struct); + +int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, + struct qmi_elem_info *ei, void *c_struct); +int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout); +void qmi_txn_cancel(struct qmi_txn *txn); + +#endif diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h new file mode 100644 index 000000000000..e91fdcf41049 --- /dev/null +++ b/include/linux/soundwire/sdw.h @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// Copyright(c) 2015-17 Intel Corporation. + +#ifndef __SOUNDWIRE_H +#define __SOUNDWIRE_H + +struct sdw_bus; +struct sdw_slave; + +/* SDW spec defines and enums, as defined by MIPI 1.1. Spec */ + +/* SDW Broadcast Device Number */ +#define SDW_BROADCAST_DEV_NUM 15 + +/* SDW Enumeration Device Number */ +#define SDW_ENUM_DEV_NUM 0 + +/* SDW Group Device Numbers */ +#define SDW_GROUP12_DEV_NUM 12 +#define SDW_GROUP13_DEV_NUM 13 + +/* SDW Master Device Number, not supported yet */ +#define SDW_MASTER_DEV_NUM 14 + +#define SDW_NUM_DEV_ID_REGISTERS 6 + +#define SDW_MAX_DEVICES 11 + +/** + * enum sdw_slave_status - Slave status + * @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus. + * @SDW_SLAVE_ATTACHED: Slave is attached with bus. + * @SDW_SLAVE_ALERT: Some alert condition on the Slave + * @SDW_SLAVE_RESERVED: Reserved for future use + */ +enum sdw_slave_status { + SDW_SLAVE_UNATTACHED = 0, + SDW_SLAVE_ATTACHED = 1, + SDW_SLAVE_ALERT = 2, + SDW_SLAVE_RESERVED = 3, +}; + +/** + * enum sdw_command_response - Command response as defined by SDW spec + * @SDW_CMD_OK: cmd was successful + * @SDW_CMD_IGNORED: cmd was ignored + * @SDW_CMD_FAIL: cmd was NACKed + * @SDW_CMD_TIMEOUT: cmd timedout + * @SDW_CMD_FAIL_OTHER: cmd failed due to other reason than above + * + * NOTE: The enum is different than actual Spec as response in the Spec is + * combination of ACK/NAK bits + * + * SDW_CMD_TIMEOUT/FAIL_OTHER is defined for SW use, not in spec + */ +enum sdw_command_response { + SDW_CMD_OK = 0, + SDW_CMD_IGNORED = 1, + SDW_CMD_FAIL = 2, + SDW_CMD_TIMEOUT = 3, + SDW_CMD_FAIL_OTHER = 4, +}; + +/* + * SDW properties, defined in MIPI DisCo spec v1.0 + */ +enum sdw_clk_stop_reset_behave { + SDW_CLK_STOP_KEEP_STATUS = 1, +}; + +/** + * enum sdw_p15_behave - Slave Port 15 behaviour when the Master attempts a + * read + * @SDW_P15_READ_IGNORED: Read is ignored + * @SDW_P15_CMD_OK: Command is ok + */ +enum sdw_p15_behave { + SDW_P15_READ_IGNORED = 0, + SDW_P15_CMD_OK = 1, +}; + +/** + * enum sdw_dpn_type - Data port types + * @SDW_DPN_FULL: Full Data Port is supported + * @SDW_DPN_SIMPLE: Simplified Data Port as defined in spec. + * DPN_SampleCtrl2, DPN_OffsetCtrl2, DPN_HCtrl and DPN_BlockCtrl3 + * are not implemented. + * @SDW_DPN_REDUCED: Reduced Data Port as defined in spec. + * DPN_SampleCtrl2, DPN_HCtrl are not implemented. + */ +enum sdw_dpn_type { + SDW_DPN_FULL = 0, + SDW_DPN_SIMPLE = 1, + SDW_DPN_REDUCED = 2, +}; + +/** + * enum sdw_clk_stop_mode - Clock Stop modes + * @SDW_CLK_STOP_MODE0: Slave can continue operation seamlessly on clock + * restart + * @SDW_CLK_STOP_MODE1: Slave may have entered a deeper power-saving mode, + * not capable of continuing operation seamlessly when the clock restarts + */ +enum sdw_clk_stop_mode { + SDW_CLK_STOP_MODE0 = 0, + SDW_CLK_STOP_MODE1 = 1, +}; + +/** + * struct sdw_dp0_prop - DP0 properties + * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 + * (inclusive) + * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 + * (inclusive) + * @num_words: number of wordlengths supported + * @words: wordlengths supported + * @flow_controlled: Slave implementation results in an OK_NotReady + * response + * @simple_ch_prep_sm: If channel prepare sequence is required + * @device_interrupts: If implementation-defined interrupts are supported + * + * The wordlengths are specified by Spec as max, min AND number of + * discrete values, implementation can define based on the wordlengths they + * support + */ +struct sdw_dp0_prop { + u32 max_word; + u32 min_word; + u32 num_words; + u32 *words; + bool flow_controlled; + bool simple_ch_prep_sm; + bool device_interrupts; +}; + +/** + * struct sdw_dpn_audio_mode - Audio mode properties for DPn + * @bus_min_freq: Minimum bus frequency, in Hz + * @bus_max_freq: Maximum bus frequency, in Hz + * @bus_num_freq: Number of discrete frequencies supported + * @bus_freq: Discrete bus frequencies, in Hz + * @min_freq: Minimum sampling frequency, in Hz + * @max_freq: Maximum sampling bus frequency, in Hz + * @num_freq: Number of discrete sampling frequency supported + * @freq: Discrete sampling frequencies, in Hz + * @prep_ch_behave: Specifies the dependencies between Channel Prepare + * sequence and bus clock configuration + * If 0, Channel Prepare can happen at any Bus clock rate + * If 1, Channel Prepare sequence shall happen only after Bus clock is + * changed to a frequency supported by this mode or compatible modes + * described by the next field + * @glitchless: Bitmap describing possible glitchless transitions from this + * Audio Mode to other Audio Modes + */ +struct sdw_dpn_audio_mode { + u32 bus_min_freq; + u32 bus_max_freq; + u32 bus_num_freq; + u32 *bus_freq; + u32 max_freq; + u32 min_freq; + u32 num_freq; + u32 *freq; + u32 prep_ch_behave; + u32 glitchless; +}; + +/** + * struct sdw_dpn_prop - Data Port DPn properties + * @num: port number + * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 + * (inclusive) + * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 + * (inclusive) + * @num_words: Number of discrete supported wordlengths + * @words: Discrete supported wordlength + * @type: Data port type. Full, Simplified or Reduced + * @max_grouping: Maximum number of samples that can be grouped together for + * a full data port + * @simple_ch_prep_sm: If the port supports simplified channel prepare state + * machine + * @ch_prep_timeout: Port-specific timeout value, in milliseconds + * @device_interrupts: If set, each bit corresponds to support for + * implementation-defined interrupts + * @max_ch: Maximum channels supported + * @min_ch: Minimum channels supported + * @num_ch: Number of discrete channels supported + * @ch: Discrete channels supported + * @num_ch_combinations: Number of channel combinations supported + * @ch_combinations: Channel combinations supported + * @modes: SDW mode supported + * @max_async_buffer: Number of samples that this port can buffer in + * asynchronous modes + * @block_pack_mode: Type of block port mode supported + * @port_encoding: Payload Channel Sample encoding schemes supported + * @audio_modes: Audio modes supported + */ +struct sdw_dpn_prop { + u32 num; + u32 max_word; + u32 min_word; + u32 num_words; + u32 *words; + enum sdw_dpn_type type; + u32 max_grouping; + bool simple_ch_prep_sm; + u32 ch_prep_timeout; + u32 device_interrupts; + u32 max_ch; + u32 min_ch; + u32 num_ch; + u32 *ch; + u32 num_ch_combinations; + u32 *ch_combinations; + u32 modes; + u32 max_async_buffer; + bool block_pack_mode; + u32 port_encoding; + struct sdw_dpn_audio_mode *audio_modes; +}; + +/** + * struct sdw_slave_prop - SoundWire Slave properties + * @mipi_revision: Spec version of the implementation + * @wake_capable: Wake-up events are supported + * @test_mode_capable: If test mode is supported + * @clk_stop_mode1: Clock-Stop Mode 1 is supported + * @simple_clk_stop_capable: Simple clock mode is supported + * @clk_stop_timeout: Worst-case latency of the Clock Stop Prepare State + * Machine transitions, in milliseconds + * @ch_prep_timeout: Worst-case latency of the Channel Prepare State Machine + * transitions, in milliseconds + * @reset_behave: Slave keeps the status of the SlaveStopClockPrepare + * state machine (P=1 SCSP_SM) after exit from clock-stop mode1 + * @high_PHY_capable: Slave is HighPHY capable + * @paging_support: Slave implements paging registers SCP_AddrPage1 and + * SCP_AddrPage2 + * @bank_delay_support: Slave implements bank delay/bridge support registers + * SCP_BankDelay and SCP_NextFrame + * @p15_behave: Slave behavior when the Master attempts a read to the Port15 + * alias + * @lane_control_support: Slave supports lane control + * @master_count: Number of Masters present on this Slave + * @source_ports: Bitmap identifying source ports + * @sink_ports: Bitmap identifying sink ports + * @dp0_prop: Data Port 0 properties + * @src_dpn_prop: Source Data Port N properties + * @sink_dpn_prop: Sink Data Port N properties + */ +struct sdw_slave_prop { + u32 mipi_revision; + bool wake_capable; + bool test_mode_capable; + bool clk_stop_mode1; + bool simple_clk_stop_capable; + u32 clk_stop_timeout; + u32 ch_prep_timeout; + enum sdw_clk_stop_reset_behave reset_behave; + bool high_PHY_capable; + bool paging_support; + bool bank_delay_support; + enum sdw_p15_behave p15_behave; + bool lane_control_support; + u32 master_count; + u32 source_ports; + u32 sink_ports; + struct sdw_dp0_prop *dp0_prop; + struct sdw_dpn_prop *src_dpn_prop; + struct sdw_dpn_prop *sink_dpn_prop; +}; + +/** + * struct sdw_master_prop - Master properties + * @revision: MIPI spec version of the implementation + * @master_count: Number of masters + * @clk_stop_mode: Bitmap for Clock Stop modes supported + * @max_freq: Maximum Bus clock frequency, in Hz + * @num_clk_gears: Number of clock gears supported + * @clk_gears: Clock gears supported + * @num_freq: Number of clock frequencies supported, in Hz + * @freq: Clock frequencies supported, in Hz + * @default_frame_rate: Controller default Frame rate, in Hz + * @default_row: Number of rows + * @default_col: Number of columns + * @dynamic_frame: Dynamic frame supported + * @err_threshold: Number of times that software may retry sending a single + * command + * @dpn_prop: Data Port N properties + */ +struct sdw_master_prop { + u32 revision; + u32 master_count; + enum sdw_clk_stop_mode clk_stop_mode; + u32 max_freq; + u32 num_clk_gears; + u32 *clk_gears; + u32 num_freq; + u32 *freq; + u32 default_frame_rate; + u32 default_row; + u32 default_col; + bool dynamic_frame; + u32 err_threshold; + struct sdw_dpn_prop *dpn_prop; +}; + +int sdw_master_read_prop(struct sdw_bus *bus); +int sdw_slave_read_prop(struct sdw_slave *slave); + +/* + * SDW Slave Structures and APIs + */ + +/** + * struct sdw_slave_id - Slave ID + * @mfg_id: MIPI Manufacturer ID + * @part_id: Device Part ID + * @class_id: MIPI Class ID, unused now. + * Currently a placeholder in MIPI SoundWire Spec + * @unique_id: Device unique ID + * @sdw_version: SDW version implemented + * + * The order of the IDs here does not follow the DisCo spec definitions + */ +struct sdw_slave_id { + __u16 mfg_id; + __u16 part_id; + __u8 class_id; + __u8 unique_id:4; + __u8 sdw_version:4; +}; + +/** + * struct sdw_slave_intr_status - Slave interrupt status + * @control_port: control port status + * @port: data port status + */ +struct sdw_slave_intr_status { + u8 control_port; + u8 port[15]; +}; + +/** + * struct sdw_slave_ops - Slave driver callback ops + * @read_prop: Read Slave properties + * @interrupt_callback: Device interrupt notification (invoked in thread + * context) + * @update_status: Update Slave status + */ +struct sdw_slave_ops { + int (*read_prop)(struct sdw_slave *sdw); + int (*interrupt_callback)(struct sdw_slave *slave, + struct sdw_slave_intr_status *status); + int (*update_status)(struct sdw_slave *slave, + enum sdw_slave_status status); +}; + +/** + * struct sdw_slave - SoundWire Slave + * @id: MIPI device ID + * @dev: Linux device + * @status: Status reported by the Slave + * @bus: Bus handle + * @ops: Slave callback ops + * @prop: Slave properties + * @node: node for bus list + * @port_ready: Port ready completion flag for each Slave port + * @dev_num: Device Number assigned by Bus + */ +struct sdw_slave { + struct sdw_slave_id id; + struct device dev; + enum sdw_slave_status status; + struct sdw_bus *bus; + const struct sdw_slave_ops *ops; + struct sdw_slave_prop prop; + struct list_head node; + struct completion *port_ready; + u16 dev_num; +}; + +#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) + +struct sdw_driver { + const char *name; + + int (*probe)(struct sdw_slave *sdw, + const struct sdw_device_id *id); + int (*remove)(struct sdw_slave *sdw); + void (*shutdown)(struct sdw_slave *sdw); + + const struct sdw_device_id *id_table; + const struct sdw_slave_ops *ops; + + struct device_driver driver; +}; + +#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \ + { .mfg_id = (_mfg_id), .part_id = (_part_id), \ + .driver_data = (unsigned long)(_drv_data) } + +int sdw_handle_slave_status(struct sdw_bus *bus, + enum sdw_slave_status status[]); + +/* + * SDW master structures and APIs + */ + +struct sdw_msg; + +/** + * struct sdw_defer - SDW deffered message + * @length: message length + * @complete: message completion + * @msg: SDW message + */ +struct sdw_defer { + int length; + struct completion complete; + struct sdw_msg *msg; +}; + +/** + * struct sdw_master_ops - Master driver ops + * @read_prop: Read Master properties + * @xfer_msg: Transfer message callback + * @xfer_msg_defer: Defer version of transfer message callback + * @reset_page_addr: Reset the SCP page address registers + */ +struct sdw_master_ops { + int (*read_prop)(struct sdw_bus *bus); + + enum sdw_command_response (*xfer_msg) + (struct sdw_bus *bus, struct sdw_msg *msg); + enum sdw_command_response (*xfer_msg_defer) + (struct sdw_bus *bus, struct sdw_msg *msg, + struct sdw_defer *defer); + enum sdw_command_response (*reset_page_addr) + (struct sdw_bus *bus, unsigned int dev_num); +}; + +/** + * struct sdw_bus - SoundWire bus + * @dev: Master linux device + * @link_id: Link id number, can be 0 to N, unique for each Master + * @slaves: list of Slaves on this bus + * @assigned: Bitmap for Slave device numbers. + * Bit set implies used number, bit clear implies unused number. + * @bus_lock: bus lock + * @msg_lock: message lock + * @ops: Master callback ops + * @prop: Master properties + * @defer_msg: Defer message + * @clk_stop_timeout: Clock stop timeout computed + */ +struct sdw_bus { + struct device *dev; + unsigned int link_id; + struct list_head slaves; + DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); + struct mutex bus_lock; + struct mutex msg_lock; + const struct sdw_master_ops *ops; + struct sdw_master_prop prop; + struct sdw_defer defer_msg; + unsigned int clk_stop_timeout; +}; + +int sdw_add_bus_master(struct sdw_bus *bus); +void sdw_delete_bus_master(struct sdw_bus *bus); + +/* messaging and data APIs */ + +int sdw_read(struct sdw_slave *slave, u32 addr); +int sdw_write(struct sdw_slave *slave, u32 addr, u8 value); +int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); +int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); + +#endif /* __SOUNDWIRE_H */ diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h new file mode 100644 index 000000000000..4b37528f592d --- /dev/null +++ b/include/linux/soundwire/sdw_intel.h @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// Copyright(c) 2015-17 Intel Corporation. + +#ifndef __SDW_INTEL_H +#define __SDW_INTEL_H + +/** + * struct sdw_intel_res - Soundwire Intel resource structure + * @mmio_base: mmio base of SoundWire registers + * @irq: interrupt number + * @handle: ACPI parent handle + * @parent: parent device + */ +struct sdw_intel_res { + void __iomem *mmio_base; + int irq; + acpi_handle handle; + struct device *parent; +}; + +void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res); +void sdw_intel_exit(void *arg); + +#endif diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h new file mode 100644 index 000000000000..df472b1ab410 --- /dev/null +++ b/include/linux/soundwire/sdw_registers.h @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) +// Copyright(c) 2015-17 Intel Corporation. + +#ifndef __SDW_REGISTERS_H +#define __SDW_REGISTERS_H + +/* + * typically we define register and shifts but if one observes carefully, + * the shift can be generated from MASKS using few bit primitaives like ffs + * etc, so we use that and avoid defining shifts + */ +#define SDW_REG_SHIFT(n) (ffs(n) - 1) + +/* + * SDW registers as defined by MIPI 1.1 Spec + */ +#define SDW_REGADDR GENMASK(14, 0) +#define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15) +#define SDW_SCP_ADDRPAGE1_MASK GENMASK(30, 23) + +#define SDW_REG_NO_PAGE 0x00008000 +#define SDW_REG_OPTIONAL_PAGE 0x00010000 +#define SDW_REG_MAX 0x80000000 + +#define SDW_DPN_SIZE 0x100 +#define SDW_BANK1_OFFSET 0x10 + +/* + * DP0 Interrupt register & bits + * + * Spec treats Status (RO) and Clear (WC) as separate but they are same + * address, so treat as same register with WC. + */ + +/* both INT and STATUS register are same */ +#define SDW_DP0_INT 0x0 +#define SDW_DP0_INTMASK 0x1 +#define SDW_DP0_PORTCTRL 0x2 +#define SDW_DP0_BLOCKCTRL1 0x3 +#define SDW_DP0_PREPARESTATUS 0x4 +#define SDW_DP0_PREPARECTRL 0x5 + +#define SDW_DP0_INT_TEST_FAIL BIT(0) +#define SDW_DP0_INT_PORT_READY BIT(1) +#define SDW_DP0_INT_BRA_FAILURE BIT(2) +#define SDW_DP0_INT_IMPDEF1 BIT(5) +#define SDW_DP0_INT_IMPDEF2 BIT(6) +#define SDW_DP0_INT_IMPDEF3 BIT(7) + +#define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2) +#define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4) +#define SDW_DP0_PORTCTRL_BPT_PAYLD GENMASK(7, 6) + +#define SDW_DP0_CHANNELEN 0x20 +#define SDW_DP0_SAMPLECTRL1 0x22 +#define SDW_DP0_SAMPLECTRL2 0x23 +#define SDW_DP0_OFFSETCTRL1 0x24 +#define SDW_DP0_OFFSETCTRL2 0x25 +#define SDW_DP0_HCTRL 0x26 +#define SDW_DP0_LANECTRL 0x28 + +/* Both INT and STATUS register are same */ +#define SDW_SCP_INT1 0x40 +#define SDW_SCP_INTMASK1 0x41 + +#define SDW_SCP_INT1_PARITY BIT(0) +#define SDW_SCP_INT1_BUS_CLASH BIT(1) +#define SDW_SCP_INT1_IMPL_DEF BIT(2) +#define SDW_SCP_INT1_SCP2_CASCADE BIT(7) +#define SDW_SCP_INT1_PORT0_3 GENMASK(6, 3) + +#define SDW_SCP_INTSTAT2 0x42 +#define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7) +#define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0) + + +#define SDW_SCP_INTSTAT3 0x43 +#define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0) + +/* Number of interrupt status registers */ +#define SDW_NUM_INT_STAT_REGISTERS 3 + +/* Number of interrupt clear registers */ +#define SDW_NUM_INT_CLEAR_REGISTERS 1 + +#define SDW_SCP_CTRL 0x44 +#define SDW_SCP_CTRL_CLK_STP_NOW BIT(1) +#define SDW_SCP_CTRL_FORCE_RESET BIT(7) + +#define SDW_SCP_STAT 0x44 +#define SDW_SCP_STAT_CLK_STP_NF BIT(0) +#define SDW_SCP_STAT_HPHY_NOK BIT(5) +#define SDW_SCP_STAT_CURR_BANK BIT(6) + +#define SDW_SCP_SYSTEMCTRL 0x45 +#define SDW_SCP_SYSTEMCTRL_CLK_STP_PREP BIT(0) +#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE BIT(2) +#define SDW_SCP_SYSTEMCTRL_WAKE_UP_EN BIT(3) +#define SDW_SCP_SYSTEMCTRL_HIGH_PHY BIT(4) + +#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE0 0 +#define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1 BIT(2) + +#define SDW_SCP_DEVNUMBER 0x46 +#define SDW_SCP_HIGH_PHY_CHECK 0x47 +#define SDW_SCP_ADDRPAGE1 0x48 +#define SDW_SCP_ADDRPAGE2 0x49 +#define SDW_SCP_KEEPEREN 0x4A +#define SDW_SCP_BANKDELAY 0x4B +#define SDW_SCP_TESTMODE 0x4F +#define SDW_SCP_DEVID_0 0x50 +#define SDW_SCP_DEVID_1 0x51 +#define SDW_SCP_DEVID_2 0x52 +#define SDW_SCP_DEVID_3 0x53 +#define SDW_SCP_DEVID_4 0x54 +#define SDW_SCP_DEVID_5 0x55 + +/* Banked Registers */ +#define SDW_SCP_FRAMECTRL_B0 0x60 +#define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET) +#define SDW_SCP_NEXTFRAME_B0 0x61 +#define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET) + +/* Both INT and STATUS register is same */ +#define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_PORTCTRL(n) (0x2 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_BLOCKCTRL1(n) (0x3 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_PREPARESTATUS(n) (0x4 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_PREPARECTRL(n) (0x5 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_INT_TEST_FAIL BIT(0) +#define SDW_DPN_INT_PORT_READY BIT(1) +#define SDW_DPN_INT_IMPDEF1 BIT(5) +#define SDW_DPN_INT_IMPDEF2 BIT(6) +#define SDW_DPN_INT_IMPDEF3 BIT(7) + +#define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0) +#define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2) +#define SDW_DPN_PORTCTRL_NXTINVBANK BIT(4) + +#define SDW_DPN_BLOCKCTRL1_WDLEN GENMASK(5, 0) + +#define SDW_DPN_PREPARECTRL_CH_PREP GENMASK(7, 0) + +#define SDW_DPN_CHANNELEN_B0(n) (0x20 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_CHANNELEN_B1(n) (0x30 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_BLOCKCTRL2_B0(n) (0x21 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_BLOCKCTRL2_B1(n) (0x31 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_SAMPLECTRL1_B0(n) (0x22 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_SAMPLECTRL1_B1(n) (0x32 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_SAMPLECTRL2_B0(n) (0x23 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_SAMPLECTRL2_B1(n) (0x33 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_OFFSETCTRL1_B0(n) (0x24 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_OFFSETCTRL1_B1(n) (0x34 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_OFFSETCTRL2_B0(n) (0x25 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_OFFSETCTRL2_B1(n) (0x35 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_HCTRL_B0(n) (0x26 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_HCTRL_B1(n) (0x36 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_BLOCKCTRL3_B0(n) (0x27 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_BLOCKCTRL3_B1(n) (0x37 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_LANECTRL_B0(n) (0x28 + SDW_DPN_SIZE * (n)) +#define SDW_DPN_LANECTRL_B1(n) (0x38 + SDW_DPN_SIZE * (n)) + +#define SDW_DPN_SAMPLECTRL_LOW GENMASK(7, 0) +#define SDW_DPN_SAMPLECTRL_HIGH GENMASK(15, 8) + +#define SDW_DPN_HCTRL_HSTART GENMASK(7, 4) +#define SDW_DPN_HCTRL_HSTOP GENMASK(3, 0) + +#define SDW_NUM_CASC_PORT_INTSTAT1 4 +#define SDW_CASC_PORT_START_INTSTAT1 0 +#define SDW_CASC_PORT_MASK_INTSTAT1 0x8 +#define SDW_CASC_PORT_REG_OFFSET_INTSTAT1 0x0 + +#define SDW_NUM_CASC_PORT_INTSTAT2 7 +#define SDW_CASC_PORT_START_INTSTAT2 4 +#define SDW_CASC_PORT_MASK_INTSTAT2 1 +#define SDW_CASC_PORT_REG_OFFSET_INTSTAT2 1 + +#define SDW_NUM_CASC_PORT_INTSTAT3 4 +#define SDW_CASC_PORT_START_INTSTAT3 11 +#define SDW_CASC_PORT_MASK_INTSTAT3 1 +#define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2 + +#endif /* __SDW_REGISTERS_H */ diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h new file mode 100644 index 000000000000..9fd553e553e9 --- /dev/null +++ b/include/linux/soundwire/sdw_type.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright(c) 2015-17 Intel Corporation. + +#ifndef __SOUNDWIRE_TYPES_H +#define __SOUNDWIRE_TYPES_H + +extern struct bus_type sdw_bus_type; + +#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver) + +#define sdw_register_driver(drv) \ + __sdw_register_driver(drv, THIS_MODULE) + +int __sdw_register_driver(struct sdw_driver *drv, struct module *); +void sdw_unregister_driver(struct sdw_driver *drv); + +int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); + +#endif /* __SOUNDWIRE_TYPES_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3bf273538840..4894d322d258 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -409,4 +409,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) +int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, + size_t max_size, unsigned int cpu_mult, + gfp_t gfp); + +void free_bucket_spinlocks(spinlock_t *locks); + #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 2181719fd907..998a4ba28eba 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -20,12 +20,20 @@ enum { #endif /** + * sizeof_field(TYPE, MEMBER) + * + * @TYPE: The structure containing the field of interest + * @MEMBER: The field to return the size of + */ +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) + +/** * offsetofend(TYPE, MEMBER) * * @TYPE: The type of the structure * @MEMBER: The member within the structure to get the end offset of */ #define offsetofend(TYPE, MEMBER) \ - (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) #endif diff --git a/include/linux/string.h b/include/linux/string.h index cfd83eb2f926..dd39a690c841 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -11,6 +11,7 @@ extern char *strndup_user(const char __user *, long); extern void *memdup_user(const void __user *, size_t); +extern void *vmemdup_user(const void __user *, size_t); extern void *memdup_user_nul(const void __user *, size_t); /* @@ -28,7 +29,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t); size_t strlcpy(char *, const char *, size_t); #endif #ifndef __HAVE_ARCH_STRSCPY -ssize_t __must_check strscpy(char *, const char *, size_t); +ssize_t strscpy(char *, const char *, size_t); #endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 71c237e8240e..ed761f751ecb 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -179,7 +179,6 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int rpc_restart_call_prepare(struct rpc_task *); int rpc_restart_call(struct rpc_task *); void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); -int rpc_protocol(struct rpc_clnt *); struct net * rpc_net_ns(struct rpc_clnt *); size_t rpc_max_payload(struct rpc_clnt *); size_t rpc_max_bc_payload(struct rpc_clnt *); diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 221b7a2e5406..5859563e3c1f 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -64,7 +64,7 @@ enum rpcrdma_memreg { RPCRDMA_MEMWINDOWS, RPCRDMA_MEMWINDOWS_ASYNC, RPCRDMA_MTHCAFMR, - RPCRDMA_FRMR, + RPCRDMA_FRWR, RPCRDMA_ALLPHYSICAL, RPCRDMA_LAST }; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index cc22a24516d6..440b62f7502e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -384,6 +384,8 @@ extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); extern unsigned long get_safe_page(gfp_t gfp_mask); +extern asmlinkage int swsusp_arch_suspend(void); +extern asmlinkage int swsusp_arch_resume(void); extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); extern int hibernate(void); diff --git a/include/linux/swap.h b/include/linux/swap.h index c2b8128799c1..7b6a59f722a3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -332,7 +332,6 @@ extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_all(void); -extern void lru_add_drain_all_cpuslocked(void); extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); extern void mark_page_lazyfree(struct page *page); @@ -345,7 +344,6 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); -extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 24ed817082ee..5b1f2a00491c 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -66,6 +66,12 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev, enum dma_sync_target target); /* Accessory functions. */ + +void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, + gfp_t flags, unsigned long attrs); +void swiotlb_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_addr, unsigned long attrs); + extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags); @@ -115,10 +121,10 @@ extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); #ifdef CONFIG_SWIOTLB -extern void __init swiotlb_free(void); +extern void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); #else -static inline void swiotlb_free(void) { } +static inline void swiotlb_exit(void) { } static inline unsigned int swiotlb_max_segment(void) { return 0; } #endif @@ -126,4 +132,6 @@ extern void swiotlb_print_info(void); extern int is_swiotlb_buffer(phys_addr_t paddr); extern void swiotlb_set_max_segment(unsigned int); +extern const struct dma_map_ops swiotlb_dma_ops; + #endif /* __LINUX_SWIOTLB_H */ diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index 09d73d0d1aa8..ec93e93371fa 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h @@ -100,6 +100,9 @@ struct sw_event_regs { u32 gpio_interrupt_hdr; u32 gpio_interrupt_data; u32 reserved16[4]; + u32 gfms_event_hdr; + u32 gfms_event_data; + u32 reserved17[4]; } __packed; enum { @@ -168,6 +171,14 @@ struct ntb_info_regs { u16 reserved1; u64 ep_map; u16 requester_id; + u16 reserved2; + u32 reserved3[4]; + struct nt_partition_info { + u32 xlink_enabled; + u32 target_part_low; + u32 target_part_high; + u32 reserved; + } ntp_info[48]; } __packed; struct part_cfg_regs { @@ -284,7 +295,20 @@ enum { struct pff_csr_regs { u16 vendor_id; u16 device_id; - u32 pci_cfg_header[15]; + u16 pcicmd; + u16 pcists; + u32 pci_class; + u32 pci_opts; + union { + u32 pci_bar[6]; + u64 pci_bar64[3]; + }; + u32 pci_cardbus; + u32 pci_subsystem_id; + u32 pci_expansion_rom; + u32 pci_cap_ptr; + u32 reserved1; + u32 pci_irq; u32 pci_cap_region[48]; u32 pcie_cap_region[448]; u32 indirect_gas_window[128]; diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 40839c02d28c..b8bfdc173ec0 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -113,7 +113,7 @@ struct attribute_group { } #define __ATTR_RO(_name) { \ - .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = _name##_show, \ } @@ -124,12 +124,11 @@ struct attribute_group { } #define __ATTR_WO(_name) { \ - .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ + .attr = { .name = __stringify(_name), .mode = 0200 }, \ .store = _name##_store, \ } -#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ - _name##_show, _name##_store) +#define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) #define __ATTR_NULL { .attr = { .name = NULL } } @@ -192,14 +191,13 @@ struct bin_attribute { } #define __BIN_ATTR_RO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ .read = _name##_read, \ .size = _size, \ } -#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \ - (S_IWUSR | S_IRUGO), _name##_read, \ - _name##_write, _size) +#define __BIN_ATTR_RW(_name, _size) \ + __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) #define __BIN_ATTR_NULL __ATTR_NULL diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ca4a6361389b..8f4c54986f97 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -335,6 +335,17 @@ struct tcp_sock { int linger2; + +/* Sock_ops bpf program related variables */ +#ifdef CONFIG_BPF + u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs + * values defined in uapi/linux/tcp.h + */ +#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) +#else +#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 +#endif + /* Receiver side RTT estimation */ struct { u32 rtt_us; @@ -344,7 +355,7 @@ struct tcp_sock { /* Receiver queue space */ struct { - int space; + u32 space; u32 seq; u64 time; } rcvq_space; diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index cb889afe576b..a2b3dfcee0b5 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -17,6 +17,7 @@ #include <linux/types.h> #include <linux/idr.h> +#include <linux/kref.h> #include <linux/list.h> #include <linux/tee.h> @@ -25,8 +26,12 @@ * specific TEE driver. */ -#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */ -#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */ +#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */ +#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */ +#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */ +#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ +#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ +#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ struct device; struct tee_device; @@ -38,11 +43,17 @@ struct tee_shm_pool; * @teedev: pointer to this drivers struct tee_device * @list_shm: List of shared memory object owned by this context * @data: driver specific context data, managed by the driver + * @refcount: reference counter for this structure + * @releasing: flag that indicates if context is being released right now. + * It is needed to break circular dependency on context during + * shared memory release. */ struct tee_context { struct tee_device *teedev; struct list_head list_shm; void *data; + struct kref refcount; + bool releasing; }; struct tee_param_memref { @@ -76,6 +87,8 @@ struct tee_param { * @cancel_req: request cancel of an ongoing invoke or open * @supp_revc: called for supplicant to get a command * @supp_send: called for supplicant to send a response + * @shm_register: register shared memory buffer in TEE + * @shm_unregister: unregister shared memory buffer in TEE */ struct tee_driver_ops { void (*get_version)(struct tee_device *teedev, @@ -94,6 +107,10 @@ struct tee_driver_ops { struct tee_param *param); int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, struct tee_param *param); + int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start); + int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm); }; /** @@ -150,6 +167,97 @@ int tee_device_register(struct tee_device *teedev); void tee_device_unregister(struct tee_device *teedev); /** + * struct tee_shm - shared memory object + * @teedev: device used to allocate the object + * @ctx: context using the object, if NULL the context is gone + * @link link element + * @paddr: physical address of the shared memory + * @kaddr: virtual address of the shared memory + * @size: size of shared memory + * @offset: offset of buffer in user space + * @pages: locked pages from userspace + * @num_pages: number of locked pages + * @dmabuf: dmabuf used to for exporting to user space + * @flags: defined by TEE_SHM_* in tee_drv.h + * @id: unique id of a shared memory object on this device + * + * This pool is only supposed to be accessed directly from the TEE + * subsystem and from drivers that implements their own shm pool manager. + */ +struct tee_shm { + struct tee_device *teedev; + struct tee_context *ctx; + struct list_head link; + phys_addr_t paddr; + void *kaddr; + size_t size; + unsigned int offset; + struct page **pages; + size_t num_pages; + struct dma_buf *dmabuf; + u32 flags; + int id; +}; + +/** + * struct tee_shm_pool_mgr - shared memory manager + * @ops: operations + * @private_data: private data for the shared memory manager + */ +struct tee_shm_pool_mgr { + const struct tee_shm_pool_mgr_ops *ops; + void *private_data; +}; + +/** + * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * @alloc: called when allocating shared memory + * @free: called when freeing shared memory + * @destroy_poolmgr: called when destroying the pool manager + */ +struct tee_shm_pool_mgr_ops { + int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, + size_t size); + void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); + void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr); +}; + +/** + * tee_shm_pool_alloc() - Create a shared memory pool from shm managers + * @priv_mgr: manager for driver private shared memory allocations + * @dmabuf_mgr: manager for dma-buf shared memory allocations + * + * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied + * in @dmabuf, others will use the range provided by @priv. + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, + struct tee_shm_pool_mgr *dmabuf_mgr); + +/* + * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved + * memory + * @vaddr: Virtual address of start of pool + * @paddr: Physical address of start of pool + * @size: Size in bytes of the pool + * + * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure. + */ +struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, + size_t size, + int min_alloc_order); + +/** + * tee_shm_pool_mgr_destroy() - Free a shared memory manager + */ +static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) +{ + poolm->ops->destroy_poolmgr(poolm); +} + +/** * struct tee_shm_pool_mem_info - holds information needed to create a shared * memory pool * @vaddr: Virtual address of start of pool @@ -211,6 +319,40 @@ void *tee_get_drvdata(struct tee_device *teedev); struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); /** + * tee_shm_priv_alloc() - Allocate shared memory privately + * @dev: Device that allocates the shared memory + * @size: Requested size of shared memory + * + * Allocates shared memory buffer that is not associated with any client + * context. Such buffers are owned by TEE driver and used for internal calls. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size); + +/** + * tee_shm_register() - Register shared memory buffer + * @ctx: Context that registers the shared memory + * @addr: Address is userspace of the shared buffer + * @length: Length of the shared buffer + * @flags: Flags setting properties for the requested shared memory. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, + size_t length, u32 flags); + +/** + * tee_shm_is_registered() - Check if shared memory object in registered in TEE + * @shm: Shared memory handle + * @returns true if object is registered in TEE + */ +static inline bool tee_shm_is_registered(struct tee_shm *shm) +{ + return shm && (shm->flags & TEE_SHM_REGISTER); +} + +/** * tee_shm_free() - Free shared memory * @shm: Handle to shared memory to free */ @@ -260,11 +402,47 @@ void *tee_shm_get_va(struct tee_shm *shm, size_t offs); int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); /** + * tee_shm_get_size() - Get size of shared memory buffer + * @shm: Shared memory handle + * @returns size of shared memory + */ +static inline size_t tee_shm_get_size(struct tee_shm *shm) +{ + return shm->size; +} + +/** + * tee_shm_get_pages() - Get list of pages that hold shared buffer + * @shm: Shared memory handle + * @num_pages: Number of pages will be stored there + * @returns pointer to pages array + */ +static inline struct page **tee_shm_get_pages(struct tee_shm *shm, + size_t *num_pages) +{ + *num_pages = shm->num_pages; + return shm->pages; +} + +/** + * tee_shm_get_page_offset() - Get shared buffer offset from page start + * @shm: Shared memory handle + * @returns page offset of shared buffer + */ +static inline size_t tee_shm_get_page_offset(struct tee_shm *shm) +{ + return shm->offset; +} + +/** * tee_shm_get_id() - Get id of a shared memory object * @shm: Shared memory handle * @returns id */ -int tee_shm_get_id(struct tee_shm *shm); +static inline int tee_shm_get_id(struct tee_shm *shm) +{ + return shm->id; +} /** * tee_shm_get_from_id() - Find shared memory object and increase reference @@ -275,4 +453,16 @@ int tee_shm_get_id(struct tee_shm *shm); */ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); +static inline bool tee_param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + #endif /*__TEE_DRV_H*/ diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h new file mode 100644 index 000000000000..45bc6b376492 --- /dev/null +++ b/include/linux/ti-emif-sram.h @@ -0,0 +1,69 @@ +/* + * TI AM33XX EMIF Routines + * + * Copyright (C) 2016-2017 Texas Instruments Inc. + * Dave Gerlach + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __LINUX_TI_EMIF_H +#define __LINUX_TI_EMIF_H + +#include <linux/kbuild.h> +#include <linux/types.h> +#ifndef __ASSEMBLY__ + +struct emif_regs_amx3 { + u32 emif_sdcfg_val; + u32 emif_timing1_val; + u32 emif_timing2_val; + u32 emif_timing3_val; + u32 emif_ref_ctrl_val; + u32 emif_zqcfg_val; + u32 emif_pmcr_val; + u32 emif_pmcr_shdw_val; + u32 emif_rd_wr_level_ramp_ctrl; + u32 emif_rd_wr_exec_thresh; + u32 emif_cos_config; + u32 emif_priority_to_cos_mapping; + u32 emif_connect_id_serv_1_map; + u32 emif_connect_id_serv_2_map; + u32 emif_ocp_config_val; + u32 emif_lpddr2_nvm_tim; + u32 emif_lpddr2_nvm_tim_shdw; + u32 emif_dll_calib_ctrl_val; + u32 emif_dll_calib_ctrl_val_shdw; + u32 emif_ddr_phy_ctlr_1; + u32 emif_ext_phy_ctrl_vals[120]; +}; + +struct ti_emif_pm_data { + void __iomem *ti_emif_base_addr_virt; + phys_addr_t ti_emif_base_addr_phys; + unsigned long ti_emif_sram_config; + struct emif_regs_amx3 *regs_virt; + phys_addr_t regs_phys; +} __packed __aligned(8); + +struct ti_emif_pm_functions { + u32 save_context; + u32 restore_context; + u32 enter_sr; + u32 exit_sr; + u32 abort_sr; +} __packed __aligned(8); + +struct gen_pool; + +int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst); +int ti_emif_get_mem_type(void); + +#endif +#endif /* __LINUX_TI_EMIF_H */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 5a090f5ab335..bcdd3790e94d 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -24,11 +24,6 @@ #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ -/* - * Chip num is this value or a valid tpm idx - */ -#define TPM_ANY_NUM 0xFFFF - struct tpm_chip; struct trusted_key_payload; struct trusted_key_options; @@ -50,46 +45,52 @@ struct tpm_class_ops { unsigned long *timeout_cap); int (*request_locality)(struct tpm_chip *chip, int loc); void (*relinquish_locality)(struct tpm_chip *chip, int loc); + void (*clk_enable)(struct tpm_chip *chip, bool value); }; #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) -extern int tpm_is_tpm2(u32 chip_num); -extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); -extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); -extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); -extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); -extern int tpm_seal_trusted(u32 chip_num, +extern int tpm_is_tpm2(struct tpm_chip *chip); +extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); +extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash); +extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); +extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); +extern int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload, struct trusted_key_options *options); -extern int tpm_unseal_trusted(u32 chip_num, +extern int tpm_unseal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload, struct trusted_key_options *options); #else -static inline int tpm_is_tpm2(u32 chip_num) +static inline int tpm_is_tpm2(struct tpm_chip *chip) { return -ENODEV; } -static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { +static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) +{ return -ENODEV; } -static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { +static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, + const u8 *hash) +{ return -ENODEV; } -static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { +static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) +{ return -ENODEV; } -static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { +static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max) +{ return -ENODEV; } -static inline int tpm_seal_trusted(u32 chip_num, +static inline int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload, struct trusted_key_options *options) { return -ENODEV; } -static inline int tpm_unseal_trusted(u32 chip_num, +static inline int tpm_unseal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload, struct trusted_key_options *options) { diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h new file mode 100644 index 000000000000..20d9da77fc11 --- /dev/null +++ b/include/linux/tpm_eventlog.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_TPM_EVENTLOG_H__ +#define __LINUX_TPM_EVENTLOG_H__ + +#include <crypto/hash_info.h> + +#define TCG_EVENT_NAME_LEN_MAX 255 +#define MAX_TEXT_EVENT 1000 /* Max event string length */ +#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */ +#define TPM2_ACTIVE_PCR_BANKS 3 + +#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1 +#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2 + +#ifdef CONFIG_PPC64 +#define do_endian_conversion(x) be32_to_cpu(x) +#else +#define do_endian_conversion(x) x +#endif + +enum bios_platform_class { + BIOS_CLIENT = 0x00, + BIOS_SERVER = 0x01, +}; + +struct tcpa_event { + u32 pcr_index; + u32 event_type; + u8 pcr_value[20]; /* SHA1 */ + u32 event_size; + u8 event_data[0]; +}; + +enum tcpa_event_types { + PREBOOT = 0, + POST_CODE, + UNUSED, + NO_ACTION, + SEPARATOR, + ACTION, + EVENT_TAG, + SCRTM_CONTENTS, + SCRTM_VERSION, + CPU_MICROCODE, + PLATFORM_CONFIG_FLAGS, + TABLE_OF_DEVICES, + COMPACT_HASH, + IPL, + IPL_PARTITION_DATA, + NONHOST_CODE, + NONHOST_CONFIG, + NONHOST_INFO, +}; + +struct tcpa_pc_event { + u32 event_id; + u32 event_size; + u8 event_data[0]; +}; + +enum tcpa_pc_event_ids { + SMBIOS = 1, + BIS_CERT, + POST_BIOS_ROM, + ESCD, + CMOS, + NVRAM, + OPTION_ROM_EXEC, + OPTION_ROM_CONFIG, + OPTION_ROM_MICROCODE = 10, + S_CRTM_VERSION, + S_CRTM_CONTENTS, + POST_CONTENTS, + HOST_TABLE_OF_DEVICES, +}; + +/* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */ + +struct tcg_efi_specid_event_algs { + u16 alg_id; + u16 digest_size; +} __packed; + +struct tcg_efi_specid_event { + u8 signature[16]; + u32 platform_class; + u8 spec_version_minor; + u8 spec_version_major; + u8 spec_errata; + u8 uintnsize; + u32 num_algs; + struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS]; + u8 vendor_info_size; + u8 vendor_info[0]; +} __packed; + +struct tcg_pcr_event { + u32 pcr_idx; + u32 event_type; + u8 digest[20]; + u32 event_size; + u8 event[0]; +} __packed; + +struct tcg_event_field { + u32 event_size; + u8 event[0]; +} __packed; + +struct tpm2_digest { + u16 alg_id; + u8 digest[SHA512_DIGEST_SIZE]; +} __packed; + +struct tcg_pcr_event2 { + u32 pcr_idx; + u32 event_type; + u32 count; + struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS]; + struct tcg_event_field event; +} __packed; + +#endif diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index af44e7c2d577..8a1442c4e513 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -467,6 +467,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); void perf_event_detach_bpf_prog(struct perf_event *event); +int perf_event_query_prog_array(struct perf_event *event, void __user *info); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -481,6 +482,11 @@ perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } +static inline int +perf_event_query_prog_array(struct perf_event *event, void __user *info) +{ + return -EOPNOTSUPP; +} #endif enum { @@ -528,6 +534,7 @@ do { \ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); +DECLARE_PER_CPU(int, bpf_kprobe_override); extern int perf_trace_init(struct perf_event *event); extern void perf_trace_destroy(struct perf_event *event); diff --git a/include/linux/tty.h b/include/linux/tty.h index 7ac8ba208b1f..0a6c71e0ad01 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty); extern struct tty_struct *tty_kopen(dev_t device); extern void tty_kclose(struct tty_struct *tty); extern int tty_dev_name_to_number(const char *name, dev_t *number); +extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout); +extern void tty_ldisc_unlock(struct tty_struct *tty); #else static inline void tty_kref_put(struct tty_struct *tty) { } diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 3bc5144b1c7e..1ef64d4ad887 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -187,7 +187,7 @@ struct tty_ldisc_ops { long (*compat_ioctl)(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, struct ktermios *old); - unsigned int (*poll)(struct tty_struct *, struct file *, + __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); int (*hangup)(struct tty_struct *tty); diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 251e655d407f..efe79c1cdd47 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -273,4 +273,12 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) #endif +#ifdef CONFIG_HARDENED_USERCOPY +void usercopy_warn(const char *name, const char *detail, bool to_user, + unsigned long offset, unsigned long len); +void __noreturn usercopy_abort(const char *name, const char *detail, + bool to_user, unsigned long offset, + unsigned long len); +#endif + #endif /* __LINUX_UACCESS_H__ */ diff --git a/include/linux/usb.h b/include/linux/usb.h index fbbe974661f2..0173597e59aa 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -609,6 +609,10 @@ struct usb3_lpm_parameters { * to keep track of the number of functions that require USB 3.0 Link Power * Management to be disabled for this usb_device. This count should only * be manipulated by those functions, with the bandwidth_mutex is held. + * @hub_delay: cached value consisting of: + * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) + * + * Will be used as wValue for SetIsochDelay requests. * * Notes: * Usbcore drivers should not set usbdev->state directly. Instead use @@ -689,6 +693,8 @@ struct usb_device { struct usb3_lpm_parameters u1_params; struct usb3_lpm_parameters u2_params; unsigned lpm_disable_count; + + u16 hub_delay; }; #define to_usb_device(d) container_of(d, struct usb_device, dev) @@ -1293,7 +1299,6 @@ extern int usb_disabled(void); #define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired * slot in the schedule */ #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ -#define URB_NO_FSBR 0x0020 /* UHCI-specific */ #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt * needed */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 0142f3af0da6..66a5cff7ee14 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -330,6 +330,7 @@ struct usb_gadget_ops { * @name: Identifies the controller hardware type. Used in diagnostics * and sometimes configuration. * @dev: Driver model state for this abstract device. + * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP * @out_epnum: last used out ep number * @in_epnum: last used in ep number * @mA: last set mA value @@ -394,6 +395,7 @@ struct usb_gadget { enum usb_device_state state; const char *name; struct device dev; + unsigned isoch_delay; unsigned out_epnum; unsigned in_epnum; unsigned mA; diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index 6cbe7a5c2b57..dba55ccb9b53 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -12,13 +12,17 @@ #include <linux/usb/otg.h> #include <linux/usb/phy.h> +struct usb_device; + #if IS_ENABLED(CONFIG_OF) enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); -struct device_node *usb_of_get_child_node(struct device_node *parent, - int portnum); +struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1); +bool usb_of_has_combined_node(struct usb_device *udev); +struct device_node *usb_of_get_interface_node(struct usb_device *udev, + u8 config, u8 ifnum); struct device *usb_of_get_companion_dev(struct device *dev); #else static inline enum usb_dr_mode @@ -35,8 +39,17 @@ static inline int of_usb_update_otg_caps(struct device_node *np, { return 0; } -static inline struct device_node *usb_of_get_child_node - (struct device_node *parent, int portnum) +static inline struct device_node * +usb_of_get_device_node(struct usb_device *hub, int port1) +{ + return NULL; +} +static inline bool usb_of_has_combined_node(struct usb_device *udev) +{ + return false; +} +static inline struct device_node * +usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum) { return NULL; } diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index e00051ced806..b3d41d7409b3 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -148,6 +148,8 @@ enum pd_pdo_type { (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \ PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma)) +#define VSAFE5V 5000 /* mv units */ + #define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */ #define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */ #define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */ diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h index d92259f8de0a..2b64d23ace5c 100644 --- a/include/linux/usb/pd_vdo.h +++ b/include/linux/usb/pd_vdo.h @@ -65,7 +65,7 @@ #define CMD_EXIT_MODE 5 #define CMD_ATTENTION 6 -#define VDO_CMD_VENDOR(x) (((10 + (x)) & 0x1f)) +#define VDO_CMD_VENDOR(x) (((0x10 + (x)) & 0x1f)) /* ChromeOS specific commands */ #define VDO_CMD_VERSION VDO_CMD_VENDOR(0) diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 67102f3d59d4..53924f8e840c 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -17,6 +17,7 @@ */ #ifndef RENESAS_USB_H #define RENESAS_USB_H +#include <linux/notifier.h> #include <linux/platform_device.h> #include <linux/usb/ch9.h> @@ -98,6 +99,13 @@ struct renesas_usbhs_platform_callback { * VBUS control is needed for Host */ int (*set_vbus)(struct platform_device *pdev, int enable); + + /* + * option: + * extcon notifier to set host/peripheral mode. + */ + int (*notifier)(struct notifier_block *nb, unsigned long event, + void *data); }; /* @@ -187,6 +195,7 @@ struct renesas_usbhs_driver_param { #define USBHS_TYPE_RCAR_GEN2 1 #define USBHS_TYPE_RCAR_GEN3 2 #define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3 +#define USBHS_TYPE_RZA1 4 /* * option: diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 073197f0d2bb..ca1c0b57f03f 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -183,14 +183,14 @@ struct tcpm_port; struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc); void tcpm_unregister_port(struct tcpm_port *port); -void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, - unsigned int nr_pdo); -void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, - unsigned int nr_pdo, - unsigned int max_snk_mv, - unsigned int max_snk_ma, - unsigned int max_snk_mw, - unsigned int operating_snk_mw); +int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, + unsigned int nr_pdo); +int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, + unsigned int nr_pdo, + unsigned int max_snk_mv, + unsigned int max_snk_ma, + unsigned int max_snk_mw, + unsigned int operating_snk_mw); void tcpm_vbus_change(struct tcpm_port *port); void tcpm_cc_change(struct tcpm_port *port); diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h new file mode 100644 index 000000000000..c71def6b310f --- /dev/null +++ b/include/linux/vbox_utils.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* Copyright (C) 2006-2016 Oracle Corporation */ + +#ifndef __VBOX_UTILS_H__ +#define __VBOX_UTILS_H__ + +#include <linux/printk.h> +#include <linux/vbox_vmmdev_types.h> + +struct vbg_dev; + +/** + * vboxguest logging functions, these log both to the backdoor and call + * the equivalent kernel pr_foo function. + */ +__printf(1, 2) void vbg_info(const char *fmt, ...); +__printf(1, 2) void vbg_warn(const char *fmt, ...); +__printf(1, 2) void vbg_err(const char *fmt, ...); + +/* Only use backdoor logging for non-dynamic debug builds */ +#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) +__printf(1, 2) void vbg_debug(const char *fmt, ...); +#else +#define vbg_debug pr_debug +#endif + +/** + * Allocate memory for generic request and initialize the request header. + * + * Return: the allocated memory + * @len: Size of memory block required for the request. + * @req_type: The generic request type. + */ +void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); + +/** + * Perform a generic request. + * + * Return: VBox status code + * @gdev: The Guest extension device. + * @req: Pointer to the request structure. + */ +int vbg_req_perform(struct vbg_dev *gdev, void *req); + +int vbg_hgcm_connect(struct vbg_dev *gdev, + struct vmmdev_hgcm_service_location *loc, + u32 *client_id, int *vbox_status); + +int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status); + +int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, + u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, + u32 parm_count, int *vbox_status); + +int vbg_hgcm_call32( + struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, + struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, + int *vbox_status); + +/** + * Convert a VirtualBox status code to a standard Linux kernel return value. + * Return: 0 or negative errno value. + * @rc: VirtualBox status code to convert. + */ +int vbg_status_code_to_errno(int rc); + +/** + * Helper for the vboxsf driver to get a reference to the guest device. + * Return: a pointer to the gdev; or a ERR_PTR value on error. + */ +struct vbg_dev *vbg_get_gdev(void); + +/** + * Helper for the vboxsf driver to put a guest device reference. + * @gdev: Reference returned by vbg_get_gdev to put. + */ +void vbg_put_gdev(struct vbg_dev *gdev); + +#endif diff --git a/include/linux/vfio.h b/include/linux/vfio.h index a47b985341d1..66741ab087c1 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -145,7 +145,8 @@ extern struct vfio_info_cap_header *vfio_info_cap_add( extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); extern int vfio_info_add_capability(struct vfio_info_cap *caps, - int cap_type_id, void *cap_type); + struct vfio_info_cap_header *cap, + size_t size); extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, int max_irq_type, diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h new file mode 100644 index 000000000000..0d8bd6769b13 --- /dev/null +++ b/include/linux/visorbus.h @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2010 - 2013 UNISYS CORPORATION + * All rights reserved. + */ + +/* + * This header file is to be included by other kernel mode components that + * implement a particular kind of visor_device. Each of these other kernel + * mode components is called a visor device driver. Refer to visortemplate + * for a minimal sample visor device driver. + * + * There should be nothing in this file that is private to the visorbus + * bus implementation itself. + */ + +#ifndef __VISORBUS_H__ +#define __VISORBUS_H__ + +#include <linux/device.h> + +#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E') + +/* + * enum channel_serverstate + * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state. + * @CHANNELSRV_READY: Channel has been initialized by server. + */ +enum channel_serverstate { + CHANNELSRV_UNINITIALIZED = 0, + CHANNELSRV_READY = 1 +}; + +/* + * enum channel_clientstate + * @CHANNELCLI_DETACHED: + * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it + * unless given TBD* explicit request + * (should actually be < DETACHED). + * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach. + * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time. + * @CHANNELCLI_BUSY: Client either wants to use or is using channel. + * @CHANNELCLI_OWNED: "No worries" state - client can access channel + * anytime. + */ +enum channel_clientstate { + CHANNELCLI_DETACHED = 0, + CHANNELCLI_DISABLED = 1, + CHANNELCLI_ATTACHING = 2, + CHANNELCLI_ATTACHED = 3, + CHANNELCLI_BUSY = 4, + CHANNELCLI_OWNED = 5 +}; + +/* + * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that + * a guest can look at the FeatureFlags in the io channel, and configure the + * driver to use interrupts or not based on this setting. All feature bits for + * all channels should be defined here. The io channel feature bits are defined + * below. + */ +#define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1) +#define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3) +#define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4) +#define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5) +#define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6) + +/* + * struct channel_header - Common Channel Header + * @signature: Signature. + * @legacy_state: DEPRECATED - being replaced by. + * @header_size: sizeof(struct channel_header). + * @size: Total size of this channel in bytes. + * @features: Flags to modify behavior. + * @chtype: Channel type: data, bus, control, etc.. + * @partition_handle: ID of guest partition. + * @handle: Device number of this channel in client. + * @ch_space_offset: Offset in bytes to channel specific area. + * @version_id: Struct channel_header Version ID. + * @partition_index: Index of guest partition. + * @zone_uuid: Guid of Channel's zone. + * @cli_str_offset: Offset from channel header to null-terminated + * ClientString (0 if ClientString not present). + * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this + * channel. + * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see + * ServerStateUp, ServerStateDown, etc). + * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel. + * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>. + * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see + * ServerStateUp, ServerStateDown, etc). + * @srv_state: CHANNEL_SERVERSTATE. + * @cli_error_boot: Bits to indicate err states for boot clients, so err + * messages can be throttled. + * @cli_error_os: Bits to indicate err states for OS clients, so err + * messages can be throttled. + * @filler: Pad out to 128 byte cacheline. + * @recover_channel: Please add all new single-byte values below here. + */ +struct channel_header { + u64 signature; + u32 legacy_state; + /* SrvState, CliStateBoot, and CliStateOS below */ + u32 header_size; + u64 size; + u64 features; + guid_t chtype; + u64 partition_handle; + u64 handle; + u64 ch_space_offset; + u32 version_id; + u32 partition_index; + guid_t zone_guid; + u32 cli_str_offset; + u32 cli_state_boot; + u32 cmd_state_cli; + u32 cli_state_os; + u32 ch_characteristic; + u32 cmd_state_srv; + u32 srv_state; + u8 cli_error_boot; + u8 cli_error_os; + u8 filler[1]; + u8 recover_channel; +} __packed; + +#define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0) + +/* + * struct signal_queue_header - Subheader for the Signal Type variation of the + * Common Channel. + * @version: SIGNAL_QUEUE_HEADER Version ID. + * @chtype: Queue type: storage, network. + * @size: Total size of this queue in bytes. + * @sig_base_offset: Offset to signal queue area. + * @features: Flags to modify behavior. + * @num_sent: Total # of signals placed in this queue. + * @num_overflows: Total # of inserts failed due to full queue. + * @signal_size: Total size of a signal for this queue. + * @max_slots: Max # of slots in queue, 1 slot is always empty. + * @max_signals: Max # of signals in queue (MaxSignalSlots-1). + * @head: Queue head signal #. + * @num_received: Total # of signals removed from this queue. + * @tail: Queue tail signal. + * @reserved1: Reserved field. + * @reserved2: Reserved field. + * @client_queue: + * @num_irq_received: Total # of Interrupts received. This is incremented by the + * ISR in the guest windows driver. + * @num_empty: Number of times that visor_signal_remove is called and + * returned Empty Status. + * @errorflags: Error bits set during SignalReinit to denote trouble with + * client's fields. + * @filler: Pad out to 64 byte cacheline. + */ +struct signal_queue_header { + /* 1st cache line */ + u32 version; + u32 chtype; + u64 size; + u64 sig_base_offset; + u64 features; + u64 num_sent; + u64 num_overflows; + u32 signal_size; + u32 max_slots; + u32 max_signals; + u32 head; + /* 2nd cache line */ + u64 num_received; + u32 tail; + u32 reserved1; + u64 reserved2; + u64 client_queue; + u64 num_irq_received; + u64 num_empty; + u32 errorflags; + u8 filler[12]; +} __packed; + +/* VISORCHANNEL Guids */ +/* {414815ed-c58c-11da-95a9-00e08161165f} */ +#define VISOR_VHBA_CHANNEL_GUID \ + GUID_INIT(0x414815ed, 0xc58c, 0x11da, \ + 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f) +#define VISOR_VHBA_CHANNEL_GUID_STR \ + "414815ed-c58c-11da-95a9-00e08161165f" +struct visorchipset_state { + u32 created:1; + u32 attached:1; + u32 configured:1; + u32 running:1; + /* Remaining bits in this 32-bit word are reserved. */ +}; + +/** + * struct visor_device - A device type for things "plugged" into the visorbus + * bus + * @visorchannel: Points to the channel that the device is + * associated with. + * @channel_type_guid: Identifies the channel type to the bus driver. + * @device: Device struct meant for use by the bus driver + * only. + * @list_all: Used by the bus driver to enumerate devices. + * @timer: Timer fired periodically to do interrupt-type + * activity. + * @being_removed: Indicates that the device is being removed from + * the bus. Private bus driver use only. + * @visordriver_callback_lock: Used by the bus driver to lock when adding and + * removing devices. + * @pausing: Indicates that a change towards a paused state. + * is in progress. Only modified by the bus driver. + * @resuming: Indicates that a change towards a running state + * is in progress. Only modified by the bus driver. + * @chipset_bus_no: Private field used by the bus driver. + * @chipset_dev_no: Private field used the bus driver. + * @state: Used to indicate the current state of the + * device. + * @inst: Unique GUID for this instance of the device. + * @name: Name of the device. + * @pending_msg_hdr: For private use by bus driver to respond to + * hypervisor requests. + * @vbus_hdr_info: A pointer to header info. Private use by bus + * driver. + * @partition_guid: Indicates client partion id. This should be the + * same across all visor_devices in the current + * guest. Private use by bus driver only. + */ +struct visor_device { + struct visorchannel *visorchannel; + guid_t channel_type_guid; + /* These fields are for private use by the bus driver only. */ + struct device device; + struct list_head list_all; + struct timer_list timer; + bool timer_active; + bool being_removed; + struct mutex visordriver_callback_lock; /* synchronize probe/remove */ + bool pausing; + bool resuming; + u32 chipset_bus_no; + u32 chipset_dev_no; + struct visorchipset_state state; + guid_t inst; + u8 *name; + struct controlvm_message_header *pending_msg_hdr; + void *vbus_hdr_info; + guid_t partition_guid; + struct dentry *debugfs_dir; + struct dentry *debugfs_bus_info; +}; + +#define to_visor_device(x) container_of(x, struct visor_device, device) + +typedef void (*visorbus_state_complete_func) (struct visor_device *dev, + int status); + +/* + * This struct describes a specific visor channel, by providing its GUID, name, + * and sizes. + */ +struct visor_channeltype_descriptor { + const guid_t guid; + const char *name; + u64 min_bytes; + u32 version; +}; + +/** + * struct visor_driver - Information provided by each visor driver when it + * registers with the visorbus driver + * @name: Name of the visor driver. + * @owner: The module owner. + * @channel_types: Types of channels handled by this driver, ending with + * a zero GUID. Our specialized BUS.match() method knows + * about this list, and uses it to determine whether this + * driver will in fact handle a new device that it has + * detected. + * @probe: Called when a new device comes online, by our probe() + * function specified by driver.probe() (triggered + * ultimately by some call to driver_register(), + * bus_add_driver(), or driver_attach()). + * @remove: Called when a new device is removed, by our remove() + * function specified by driver.remove() (triggered + * ultimately by some call to device_release_driver()). + * @channel_interrupt: Called periodically, whenever there is a possiblity + * that "something interesting" may have happened to the + * channel. + * @pause: Called to initiate a change of the device's state. If + * the return valu`e is < 0, there was an error and the + * state transition will NOT occur. If the return value + * is >= 0, then the state transition was INITIATED + * successfully, and complete_func() will be called (or + * was just called) with the final status when either the + * state transition fails or completes successfully. + * @resume: Behaves similar to pause. + * @driver: Private reference to the device driver. For use by bus + * driver only. + */ +struct visor_driver { + const char *name; + struct module *owner; + struct visor_channeltype_descriptor *channel_types; + int (*probe)(struct visor_device *dev); + void (*remove)(struct visor_device *dev); + void (*channel_interrupt)(struct visor_device *dev); + int (*pause)(struct visor_device *dev, + visorbus_state_complete_func complete_func); + int (*resume)(struct visor_device *dev, + visorbus_state_complete_func complete_func); + + /* These fields are for private use by the bus driver only. */ + struct device_driver driver; +}; + +#define to_visor_driver(x) (container_of(x, struct visor_driver, driver)) + +int visor_check_channel(struct channel_header *ch, struct device *dev, + const guid_t *expected_uuid, char *chname, + u64 expected_min_bytes, u32 expected_version, + u64 expected_signature); + +int visorbus_register_visor_driver(struct visor_driver *drv); +void visorbus_unregister_visor_driver(struct visor_driver *drv); +int visorbus_read_channel(struct visor_device *dev, + unsigned long offset, void *dest, + unsigned long nbytes); +int visorbus_write_channel(struct visor_device *dev, + unsigned long offset, void *src, + unsigned long nbytes); +int visorbus_enable_channel_interrupts(struct visor_device *dev); +void visorbus_disable_channel_interrupts(struct visor_device *dev); + +int visorchannel_signalremove(struct visorchannel *channel, u32 queue, + void *msg); +int visorchannel_signalinsert(struct visorchannel *channel, u32 queue, + void *msg); +bool visorchannel_signalempty(struct visorchannel *channel, u32 queue); +const guid_t *visorchannel_get_guid(struct visorchannel *channel); + +#define BUS_ROOT_DEVICE UINT_MAX +struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no, + struct visor_device *from); +#endif diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 1779c9817b39..a4c2317d8b9f 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -216,23 +216,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, return x; } -static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat, - enum node_stat_item item) -{ - long x = atomic_long_read(&pgdat->vm_stat[item]); - -#ifdef CONFIG_SMP - int cpu; - for_each_online_cpu(cpu) - x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item]; - - if (x < 0) - x = 0; -#endif - return x; -} - - #ifdef CONFIG_NUMA extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); extern unsigned long sum_zone_node_page_state(int node, diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h index d58594a32324..78901ecd2f95 100644 --- a/include/linux/w1-gpio.h +++ b/include/linux/w1-gpio.h @@ -10,16 +10,15 @@ #ifndef _LINUX_W1_GPIO_H #define _LINUX_W1_GPIO_H +struct gpio_desc; + /** * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio - * @pin: GPIO pin to use - * @is_open_drain: GPIO pin is configured as open drain */ struct w1_gpio_platform_data { - unsigned int pin; - unsigned int is_open_drain:1; + struct gpio_desc *gpiod; + struct gpio_desc *pullup_gpiod; void (*enable_external_pullup)(int enable); - unsigned int ext_pullup_enable_pin; unsigned int pullup_duration; }; diff --git a/include/linux/wait.h b/include/linux/wait.h index 158715445ffb..55a611486bac 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -206,14 +206,16 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); /* * Wakeup macros to be used to report events to the targets. */ +#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m)) +#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) #define wake_up_poll(x, m) \ - __wake_up(x, TASK_NORMAL, 1, (void *) (m)) + __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) #define wake_up_locked_poll(x, m) \ - __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) + __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) #define wake_up_interruptible_poll(x, m) \ - __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) + __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) #define wake_up_interruptible_sync_poll(x, m) \ - __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) + __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m)) #define ___wait_cond_timeout(condition) \ ({ \ diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 004ba807df96..7238865e75b0 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -108,4 +108,6 @@ void zpool_register_driver(struct zpool_driver *driver); int zpool_unregister_driver(struct zpool_driver *driver); +bool zpool_evictable(struct zpool *pool); + #endif diff --git a/include/media/cec.h b/include/media/cec.h index 16341210d3ba..7cdf71d7125a 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -122,6 +122,7 @@ struct cec_adap_ops { /* Low-level callbacks */ int (*adap_enable)(struct cec_adapter *adap, bool enable); int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable); + int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable); int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr); int (*adap_transmit)(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg); @@ -191,11 +192,6 @@ struct cec_adapter { u32 tx_timeouts; -#ifdef CONFIG_MEDIA_CEC_RC - bool rc_repeating; - int rc_last_scancode; - u64 rc_last_keypress; -#endif #ifdef CONFIG_CEC_NOTIFIER struct cec_notifier *notifier; #endif @@ -229,6 +225,18 @@ static inline bool cec_is_sink(const struct cec_adapter *adap) return adap->phys_addr == 0; } +/** + * cec_is_registered() - is the CEC adapter registered? + * + * @adap: the CEC adapter, may be NULL. + * + * Return: true if the adapter is registered, false otherwise. + */ +static inline bool cec_is_registered(const struct cec_adapter *adap) +{ + return adap && adap->devnode.registered; +} + #define cec_phys_addr_exp(pa) \ ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf diff --git a/include/media/demux.h b/include/media/demux.h new file mode 100644 index 000000000000..c4df6cee48e6 --- /dev/null +++ b/include/media/demux.h @@ -0,0 +1,589 @@ +/* + * demux.h + * + * The Kernel Digital TV Demux kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * demux layer. + * + * Copyright (c) 2002 Convergence GmbH + * + * based on code: + * Copyright (c) 2000 Nokia Research Center + * Tampere, FINLAND + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __DEMUX_H +#define __DEMUX_H + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/time.h> +#include <linux/dvb/dmx.h> + +/* + * Common definitions + */ + +/* + * DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter. + */ + +#ifndef DMX_MAX_FILTER_SIZE +#define DMX_MAX_FILTER_SIZE 18 +#endif + +/* + * DMX_MAX_SECFEED_SIZE: Maximum length (in bytes) of a private section feed + * filter. + */ + +#ifndef DMX_MAX_SECTION_SIZE +#define DMX_MAX_SECTION_SIZE 4096 +#endif +#ifndef DMX_MAX_SECFEED_SIZE +#define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188) +#endif + +/* + * TS packet reception + */ + +/** + * enum ts_filter_type - filter type bitmap for dmx_ts_feed.set\(\) + * + * @TS_PACKET: Send TS packets (188 bytes) to callback (default). + * @TS_PAYLOAD_ONLY: In case TS_PACKET is set, only send the TS payload + * (<=184 bytes per packet) to callback + * @TS_DECODER: Send stream to built-in decoder (if present). + * @TS_DEMUX: In case TS_PACKET is set, send the TS to the demux + * device, not to the dvr device + */ +enum ts_filter_type { + TS_PACKET = 1, + TS_PAYLOAD_ONLY = 2, + TS_DECODER = 4, + TS_DEMUX = 8, +}; + +/** + * struct dmx_ts_feed - Structure that contains a TS feed filter + * + * @is_filtering: Set to non-zero when filtering in progress + * @parent: pointer to struct dmx_demux + * @priv: pointer to private data of the API client + * @set: sets the TS filter + * @start_filtering: starts TS filtering + * @stop_filtering: stops TS filtering + * + * A TS feed is typically mapped to a hardware PID filter on the demux chip. + * Using this API, the client can set the filtering properties to start/stop + * filtering TS packets on a particular TS feed. + */ +struct dmx_ts_feed { + int is_filtering; + struct dmx_demux *parent; + void *priv; + int (*set)(struct dmx_ts_feed *feed, + u16 pid, + int type, + enum dmx_ts_pes pes_type, + ktime_t timeout); + int (*start_filtering)(struct dmx_ts_feed *feed); + int (*stop_filtering)(struct dmx_ts_feed *feed); +}; + +/* + * Section reception + */ + +/** + * struct dmx_section_filter - Structure that describes a section filter + * + * @filter_value: Contains up to 16 bytes (128 bits) of the TS section header + * that will be matched by the section filter + * @filter_mask: Contains a 16 bytes (128 bits) filter mask with the bits + * specified by @filter_value that will be used on the filter + * match logic. + * @filter_mode: Contains a 16 bytes (128 bits) filter mode. + * @parent: Pointer to struct dmx_section_feed. + * @priv: Pointer to private data of the API client. + * + * + * The @filter_mask controls which bits of @filter_value are compared with + * the section headers/payload. On a binary value of 1 in filter_mask, the + * corresponding bits are compared. The filter only accepts sections that are + * equal to filter_value in all the tested bit positions. + */ +struct dmx_section_filter { + u8 filter_value[DMX_MAX_FILTER_SIZE]; + u8 filter_mask[DMX_MAX_FILTER_SIZE]; + u8 filter_mode[DMX_MAX_FILTER_SIZE]; + struct dmx_section_feed *parent; /* Back-pointer */ + void *priv; /* Pointer to private data of the API client */ +}; + +/** + * struct dmx_section_feed - Structure that contains a section feed filter + * + * @is_filtering: Set to non-zero when filtering in progress + * @parent: pointer to struct dmx_demux + * @priv: pointer to private data of the API client + * @check_crc: If non-zero, check the CRC values of filtered sections. + * @set: sets the section filter + * @allocate_filter: This function is used to allocate a section filter on + * the demux. It should only be called when no filtering + * is in progress on this section feed. If a filter cannot + * be allocated, the function fails with -ENOSPC. + * @release_filter: This function releases all the resources of a + * previously allocated section filter. The function + * should not be called while filtering is in progress + * on this section feed. After calling this function, + * the caller should not try to dereference the filter + * pointer. + * @start_filtering: starts section filtering + * @stop_filtering: stops section filtering + * + * A TS feed is typically mapped to a hardware PID filter on the demux chip. + * Using this API, the client can set the filtering properties to start/stop + * filtering TS packets on a particular TS feed. + */ +struct dmx_section_feed { + int is_filtering; + struct dmx_demux *parent; + void *priv; + + int check_crc; + + /* private: Used internally at dvb_demux.c */ + u32 crc_val; + + u8 *secbuf; + u8 secbuf_base[DMX_MAX_SECFEED_SIZE]; + u16 secbufp, seclen, tsfeedp; + + /* public: */ + int (*set)(struct dmx_section_feed *feed, + u16 pid, + int check_crc); + int (*allocate_filter)(struct dmx_section_feed *feed, + struct dmx_section_filter **filter); + int (*release_filter)(struct dmx_section_feed *feed, + struct dmx_section_filter *filter); + int (*start_filtering)(struct dmx_section_feed *feed); + int (*stop_filtering)(struct dmx_section_feed *feed); +}; + +/** + * typedef dmx_ts_cb - DVB demux TS filter callback function prototype + * + * @buffer1: Pointer to the start of the filtered TS packets. + * @buffer1_length: Length of the TS data in buffer1. + * @buffer2: Pointer to the tail of the filtered TS packets, or NULL. + * @buffer2_length: Length of the TS data in buffer2. + * @source: Indicates which TS feed is the source of the callback. + * + * This function callback prototype, provided by the client of the demux API, + * is called from the demux code. The function is only called when filtering + * on a TS feed has been enabled using the start_filtering\(\) function at + * the &dmx_demux. + * Any TS packets that match the filter settings are copied to a circular + * buffer. The filtered TS packets are delivered to the client using this + * callback function. + * It is expected that the @buffer1 and @buffer2 callback parameters point to + * addresses within the circular buffer, but other implementations are also + * possible. Note that the called party should not try to free the memory + * the @buffer1 and @buffer2 parameters point to. + * + * When this function is called, the @buffer1 parameter typically points to + * the start of the first undelivered TS packet within a circular buffer. + * The @buffer2 buffer parameter is normally NULL, except when the received + * TS packets have crossed the last address of the circular buffer and + * "wrapped" to the beginning of the buffer. In the latter case the @buffer1 + * parameter would contain an address within the circular buffer, while the + * @buffer2 parameter would contain the first address of the circular buffer. + * The number of bytes delivered with this function (i.e. @buffer1_length + + * @buffer2_length) is usually equal to the value of callback_length parameter + * given in the set() function, with one exception: if a timeout occurs before + * receiving callback_length bytes of TS data, any undelivered packets are + * immediately delivered to the client by calling this function. The timeout + * duration is controlled by the set() function in the TS Feed API. + * + * If a TS packet is received with errors that could not be fixed by the + * TS-level forward error correction (FEC), the Transport_error_indicator + * flag of the TS packet header should be set. The TS packet should not be + * discarded, as the error can possibly be corrected by a higher layer + * protocol. If the called party is slow in processing the callback, it + * is possible that the circular buffer eventually fills up. If this happens, + * the demux driver should discard any TS packets received while the buffer + * is full and return -EOVERFLOW. + * + * The type of data returned to the callback can be selected by the + * &dmx_ts_feed.@set function. The type parameter decides if the raw + * TS packet (TS_PACKET) or just the payload (TS_PACKET|TS_PAYLOAD_ONLY) + * should be returned. If additionally the TS_DECODER bit is set the stream + * will also be sent to the hardware MPEG decoder. + * + * Return: + * + * - 0, on success; + * + * - -EOVERFLOW, on buffer overflow. + */ +typedef int (*dmx_ts_cb)(const u8 *buffer1, + size_t buffer1_length, + const u8 *buffer2, + size_t buffer2_length, + struct dmx_ts_feed *source); + +/** + * typedef dmx_section_cb - DVB demux TS filter callback function prototype + * + * @buffer1: Pointer to the start of the filtered section, e.g. + * within the circular buffer of the demux driver. + * @buffer1_len: Length of the filtered section data in @buffer1, + * including headers and CRC. + * @buffer2: Pointer to the tail of the filtered section data, + * or NULL. Useful to handle the wrapping of a + * circular buffer. + * @buffer2_len: Length of the filtered section data in @buffer2, + * including headers and CRC. + * @source: Indicates which section feed is the source of the + * callback. + * + * This function callback prototype, provided by the client of the demux API, + * is called from the demux code. The function is only called when + * filtering of sections has been enabled using the function + * &dmx_ts_feed.@start_filtering. When the demux driver has received a + * complete section that matches at least one section filter, the client + * is notified via this callback function. Normally this function is called + * for each received section; however, it is also possible to deliver + * multiple sections with one callback, for example when the system load + * is high. If an error occurs while receiving a section, this + * function should be called with the corresponding error type set in the + * success field, whether or not there is data to deliver. The Section Feed + * implementation should maintain a circular buffer for received sections. + * However, this is not necessary if the Section Feed API is implemented as + * a client of the TS Feed API, because the TS Feed implementation then + * buffers the received data. The size of the circular buffer can be + * configured using the &dmx_ts_feed.@set function in the Section Feed API. + * If there is no room in the circular buffer when a new section is received, + * the section must be discarded. If this happens, the value of the success + * parameter should be DMX_OVERRUN_ERROR on the next callback. + */ +typedef int (*dmx_section_cb)(const u8 *buffer1, + size_t buffer1_len, + const u8 *buffer2, + size_t buffer2_len, + struct dmx_section_filter *source); + +/* + * DVB Front-End + */ + +/** + * enum dmx_frontend_source - Used to identify the type of frontend + * + * @DMX_MEMORY_FE: The source of the demux is memory. It means that + * the MPEG-TS to be filtered comes from userspace, + * via write() syscall. + * + * @DMX_FRONTEND_0: The source of the demux is a frontend connected + * to the demux. + */ +enum dmx_frontend_source { + DMX_MEMORY_FE, + DMX_FRONTEND_0, +}; + +/** + * struct dmx_frontend - Structure that lists the frontends associated with + * a demux + * + * @connectivity_list: List of front-ends that can be connected to a + * particular demux; + * @source: Type of the frontend. + * + * FIXME: this structure should likely be replaced soon by some + * media-controller based logic. + */ +struct dmx_frontend { + struct list_head connectivity_list; + enum dmx_frontend_source source; +}; + +/* + * MPEG-2 TS Demux + */ + +/** + * enum dmx_demux_caps - MPEG-2 TS Demux capabilities bitmap + * + * @DMX_TS_FILTERING: set if TS filtering is supported; + * @DMX_SECTION_FILTERING: set if section filtering is supported; + * @DMX_MEMORY_BASED_FILTERING: set if write() available. + * + * Those flags are OR'ed in the &dmx_demux.capabilities field + */ +enum dmx_demux_caps { + DMX_TS_FILTERING = 1, + DMX_SECTION_FILTERING = 4, + DMX_MEMORY_BASED_FILTERING = 8, +}; + +/* + * Demux resource type identifier. + */ + +/** + * DMX_FE_ENTRY - Casts elements in the list of registered + * front-ends from the generic type struct list_head + * to the type * struct dmx_frontend + * + * @list: list of struct dmx_frontend + */ +#define DMX_FE_ENTRY(list) \ + list_entry(list, struct dmx_frontend, connectivity_list) + +/** + * struct dmx_demux - Structure that contains the demux capabilities and + * callbacks. + * + * @capabilities: Bitfield of capability flags. + * + * @frontend: Front-end connected to the demux + * + * @priv: Pointer to private data of the API client + * + * @open: This function reserves the demux for use by the caller and, if + * necessary, initializes the demux. When the demux is no longer needed, + * the function @close should be called. It should be possible for + * multiple clients to access the demux at the same time. Thus, the + * function implementation should increment the demux usage count when + * @open is called and decrement it when @close is called. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -EUSERS, if maximum usage count was reached; + * -EINVAL, on bad parameter. + * + * @close: This function reserves the demux for use by the caller and, if + * necessary, initializes the demux. When the demux is no longer needed, + * the function @close should be called. It should be possible for + * multiple clients to access the demux at the same time. Thus, the + * function implementation should increment the demux usage count when + * @open is called and decrement it when @close is called. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -ENODEV, if demux was not in use (e. g. no users); + * -EINVAL, on bad parameter. + * + * @write: This function provides the demux driver with a memory buffer + * containing TS packets. Instead of receiving TS packets from the DVB + * front-end, the demux driver software will read packets from memory. + * Any clients of this demux with active TS, PES or Section filters will + * receive filtered data via the Demux callback API (see 0). The function + * returns when all the data in the buffer has been consumed by the demux. + * Demux hardware typically cannot read TS from memory. If this is the + * case, memory-based filtering has to be implemented entirely in software. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @buf function parameter contains a pointer to the TS data in + * kernel-space memory. + * The @count function parameter contains the length of the TS data. + * It returns: + * 0 on success; + * -ERESTARTSYS, if mutex lock was interrupted; + * -EINTR, if a signal handling is pending; + * -ENODEV, if demux was removed; + * -EINVAL, on bad parameter. + * + * @allocate_ts_feed: Allocates a new TS feed, which is used to filter the TS + * packets carrying a certain PID. The TS feed normally corresponds to a + * hardware PID filter on the demux chip. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * The @callback function parameter contains a pointer to the callback + * function for passing received TS packet. + * It returns: + * 0 on success; + * -ERESTARTSYS, if mutex lock was interrupted; + * -EBUSY, if no more TS feeds is available; + * -EINVAL, on bad parameter. + * + * @release_ts_feed: Releases the resources allocated with @allocate_ts_feed. + * Any filtering in progress on the TS feed should be stopped before + * calling this function. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + * + * @allocate_section_feed: Allocates a new section feed, i.e. a demux resource + * for filtering and receiving sections. On platforms with hardware + * support for section filtering, a section feed is directly mapped to + * the demux HW. On other platforms, TS packets are first PID filtered in + * hardware and a hardware section filter then emulated in software. The + * caller obtains an API pointer of type dmx_section_feed_t as an out + * parameter. Using this API the caller can set filtering parameters and + * start receiving sections. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * The @callback function parameter contains a pointer to the callback + * function for passing received TS packet. + * It returns: + * 0 on success; + * -EBUSY, if no more TS feeds is available; + * -EINVAL, on bad parameter. + * + * @release_section_feed: Releases the resources allocated with + * @allocate_section_feed, including allocated filters. Any filtering in + * progress on the section feed should be stopped before calling this + * function. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @feed function parameter contains a pointer to the TS feed API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @add_frontend: Registers a connectivity between a demux and a front-end, + * i.e., indicates that the demux can be connected via a call to + * @connect_frontend to use the given front-end as a TS source. The + * client of this function has to allocate dynamic or static memory for + * the frontend structure and initialize its fields before calling this + * function. This function is normally called during the driver + * initialization. The caller must not free the memory of the frontend + * struct before successfully calling @remove_frontend. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @remove_frontend: Indicates that the given front-end, registered by a call + * to @add_frontend, can no longer be connected as a TS source by this + * demux. The function should be called when a front-end driver or a demux + * driver is removed from the system. If the front-end is in use, the + * function fails with the return value of -EBUSY. After successfully + * calling this function, the caller can free the memory of the frontend + * struct if it was dynamically allocated before the @add_frontend + * operation. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -ENODEV, if the front-end was not found, + * -EINVAL, on bad parameter. + * + * @get_frontends: Provides the APIs of the front-ends that have been + * registered for this demux. Any of the front-ends obtained with this + * call can be used as a parameter for @connect_frontend. The include + * file demux.h contains the macro DMX_FE_ENTRY() for converting an + * element of the generic type struct &list_head * to the type + * struct &dmx_frontend *. The caller must not free the memory of any of + * the elements obtained via this function call. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns a struct list_head pointer to the list of front-end + * interfaces, or NULL in the case of an empty list. + * + * @connect_frontend: Connects the TS output of the front-end to the input of + * the demux. A demux can only be connected to a front-end registered to + * the demux with the function @add_frontend. It may or may not be + * possible to connect multiple demuxes to the same front-end, depending + * on the capabilities of the HW platform. When not used, the front-end + * should be released by calling @disconnect_frontend. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @frontend function parameter contains a pointer to the front-end + * instance data. + * It returns: + * 0 on success; + * -EINVAL, on bad parameter. + * + * @disconnect_frontend: Disconnects the demux and a front-end previously + * connected by a @connect_frontend call. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + * + * @get_pes_pids: Get the PIDs for DMX_PES_AUDIO0, DMX_PES_VIDEO0, + * DMX_PES_TELETEXT0, DMX_PES_SUBTITLE0 and DMX_PES_PCR0. + * The @demux function parameter contains a pointer to the demux API and + * instance data. + * The @pids function parameter contains an array with five u16 elements + * where the PIDs will be stored. + * It returns: + * 0 on success; + * -EINVAL on bad parameter. + */ +struct dmx_demux { + enum dmx_demux_caps capabilities; + struct dmx_frontend *frontend; + void *priv; + int (*open)(struct dmx_demux *demux); + int (*close)(struct dmx_demux *demux); + int (*write)(struct dmx_demux *demux, const char __user *buf, + size_t count); + int (*allocate_ts_feed)(struct dmx_demux *demux, + struct dmx_ts_feed **feed, + dmx_ts_cb callback); + int (*release_ts_feed)(struct dmx_demux *demux, + struct dmx_ts_feed *feed); + int (*allocate_section_feed)(struct dmx_demux *demux, + struct dmx_section_feed **feed, + dmx_section_cb callback); + int (*release_section_feed)(struct dmx_demux *demux, + struct dmx_section_feed *feed); + int (*add_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + int (*remove_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + struct list_head *(*get_frontends)(struct dmx_demux *demux); + int (*connect_frontend)(struct dmx_demux *demux, + struct dmx_frontend *frontend); + int (*disconnect_frontend)(struct dmx_demux *demux); + + int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids); + + /* private: */ + + /* + * Only used at av7110, to read some data from firmware. + * As this was never documented, we have no clue about what's + * there, and its usage on other drivers aren't encouraged. + */ + int (*get_stc)(struct dmx_demux *demux, unsigned int num, + u64 *stc, unsigned int *base); +}; + +#endif /* #ifndef __DEMUX_H */ diff --git a/include/media/dmxdev.h b/include/media/dmxdev.h new file mode 100644 index 000000000000..2f5cb2c7b6a7 --- /dev/null +++ b/include/media/dmxdev.h @@ -0,0 +1,212 @@ +/* + * dmxdev.h + * + * Copyright (C) 2000 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DMXDEV_H_ +#define _DMXDEV_H_ + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/time.h> +#include <linux/timer.h> +#include <linux/wait.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +#include <linux/dvb/dmx.h> + +#include <media/dvbdev.h> +#include <media/demux.h> +#include <media/dvb_ringbuffer.h> +#include <media/dvb_vb2.h> + +/** + * enum dmxdev_type - type of demux filter type. + * + * @DMXDEV_TYPE_NONE: no filter set. + * @DMXDEV_TYPE_SEC: section filter. + * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter. + */ +enum dmxdev_type { + DMXDEV_TYPE_NONE, + DMXDEV_TYPE_SEC, + DMXDEV_TYPE_PES, +}; + +/** + * enum dmxdev_state - state machine for the dmxdev. + * + * @DMXDEV_STATE_FREE: indicates that the filter is freed. + * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated + * to be used. + * @DMXDEV_STATE_SET: indicates that the filter parameters are set. + * @DMXDEV_STATE_GO: indicates that the filter is running. + * @DMXDEV_STATE_DONE: indicates that a packet was already filtered + * and the filter is now disabled. + * Set only if %DMX_ONESHOT. See + * &dmx_sct_filter_params. + * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition. + */ +enum dmxdev_state { + DMXDEV_STATE_FREE, + DMXDEV_STATE_ALLOCATED, + DMXDEV_STATE_SET, + DMXDEV_STATE_GO, + DMXDEV_STATE_DONE, + DMXDEV_STATE_TIMEDOUT +}; + +/** + * struct dmxdev_feed - digital TV dmxdev feed + * + * @pid: Program ID to be filtered + * @ts: pointer to &struct dmx_ts_feed + * @next: &struct list_head pointing to the next feed. + */ + +struct dmxdev_feed { + u16 pid; + struct dmx_ts_feed *ts; + struct list_head next; +}; + +/** + * struct dmxdev_filter - digital TV dmxdev filter + * + * @filter: a union describing a dmxdev filter. + * Currently used only for section filters. + * @filter.sec: a &struct dmx_section_filter pointer. + * For section filter only. + * @feed: a union describing a dmxdev feed. + * Depending on the filter type, it can be either + * @feed.ts or @feed.sec. + * @feed.ts: a &struct list_head list. + * For TS and PES feeds. + * @feed.sec: a &struct dmx_section_feed pointer. + * For section feed only. + * @params: a union describing dmxdev filter parameters. + * Depending on the filter type, it can be either + * @params.sec or @params.pes. + * @params.sec: a &struct dmx_sct_filter_params embedded struct. + * For section filter only. + * @params.pes: a &struct dmx_pes_filter_params embedded struct. + * For PES filter only. + * @type: type of the dmxdev filter, as defined by &enum dmxdev_type. + * @state: state of the dmxdev filter, as defined by &enum dmxdev_state. + * @dev: pointer to &struct dmxdev. + * @buffer: an embedded &struct dvb_ringbuffer buffer. + * @vb2_ctx: control struct for VB2 handler + * @mutex: protects the access to &struct dmxdev_filter. + * @timer: &struct timer_list embedded timer, used to check for + * feed timeouts. + * Only for section filter. + * @todo: index for the @secheader. + * Only for section filter. + * @secheader: buffer cache to parse the section header. + * Only for section filter. + */ +struct dmxdev_filter { + union { + struct dmx_section_filter *sec; + } filter; + + union { + /* list of TS and PES feeds (struct dmxdev_feed) */ + struct list_head ts; + struct dmx_section_feed *sec; + } feed; + + union { + struct dmx_sct_filter_params sec; + struct dmx_pes_filter_params pes; + } params; + + enum dmxdev_type type; + enum dmxdev_state state; + struct dmxdev *dev; + struct dvb_ringbuffer buffer; + struct dvb_vb2_ctx vb2_ctx; + + struct mutex mutex; + + /* only for sections */ + struct timer_list timer; + int todo; + u8 secheader[3]; +}; + +/** + * struct dmxdev - Describes a digital TV demux device. + * + * @dvbdev: pointer to &struct dvb_device associated with + * the demux device node. + * @dvr_dvbdev: pointer to &struct dvb_device associated with + * the dvr device node. + * @filter: pointer to &struct dmxdev_filter. + * @demux: pointer to &struct dmx_demux. + * @filternum: number of filters. + * @capabilities: demux capabilities as defined by &enum dmx_demux_caps. + * @exit: flag to indicate that the demux is being released. + * @dvr_orig_fe: pointer to &struct dmx_frontend. + * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output. + * @dvr_vb2_ctx: control struct for VB2 handler + * @mutex: protects the usage of this structure. + * @lock: protects access to &dmxdev->filter->data. + */ +struct dmxdev { + struct dvb_device *dvbdev; + struct dvb_device *dvr_dvbdev; + + struct dmxdev_filter *filter; + struct dmx_demux *demux; + + int filternum; + int capabilities; + + unsigned int exit:1; +#define DMXDEV_CAP_DUPLEX 1 + struct dmx_frontend *dvr_orig_fe; + + struct dvb_ringbuffer dvr_buffer; +#define DVR_BUFFER_SIZE (10*188*1024) + + struct dvb_vb2_ctx dvr_vb2_ctx; + + struct mutex mutex; + spinlock_t lock; +}; + +/** + * dvb_dmxdev_init - initializes a digital TV demux and registers both demux + * and DVR devices. + * + * @dmxdev: pointer to &struct dmxdev. + * @adap: pointer to &struct dvb_adapter. + */ +int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap); + +/** + * dvb_dmxdev_release - releases a digital TV demux and unregisters it. + * + * @dmxdev: pointer to &struct dmxdev. + */ +void dvb_dmxdev_release(struct dmxdev *dmxdev); + +#endif /* _DMXDEV_H_ */ diff --git a/include/media/drv-intf/cx2341x.h b/include/media/drv-intf/cx2341x.h index 9635eebaab09..33a97bfcea58 100644 --- a/include/media/drv-intf/cx2341x.h +++ b/include/media/drv-intf/cx2341x.h @@ -29,8 +29,8 @@ enum cx2341x_port { enum cx2341x_cap { CX2341X_CAP_HAS_SLICED_VBI = 1 << 0, - CX2341X_CAP_HAS_TS = 1 << 1, - CX2341X_CAP_HAS_AC3 = 1 << 2, + CX2341X_CAP_HAS_TS = 1 << 1, + CX2341X_CAP_HAS_AC3 = 1 << 2, }; struct cx2341x_mpeg_params { @@ -204,92 +204,92 @@ void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy); /* Firmware API commands */ /* MPEG decoder API, specific to the cx23415 */ -#define CX2341X_DEC_PING_FW 0x00 -#define CX2341X_DEC_START_PLAYBACK 0x01 -#define CX2341X_DEC_STOP_PLAYBACK 0x02 -#define CX2341X_DEC_SET_PLAYBACK_SPEED 0x03 -#define CX2341X_DEC_STEP_VIDEO 0x05 -#define CX2341X_DEC_SET_DMA_BLOCK_SIZE 0x08 +#define CX2341X_DEC_PING_FW 0x00 +#define CX2341X_DEC_START_PLAYBACK 0x01 +#define CX2341X_DEC_STOP_PLAYBACK 0x02 +#define CX2341X_DEC_SET_PLAYBACK_SPEED 0x03 +#define CX2341X_DEC_STEP_VIDEO 0x05 +#define CX2341X_DEC_SET_DMA_BLOCK_SIZE 0x08 #define CX2341X_DEC_GET_XFER_INFO 0x09 #define CX2341X_DEC_GET_DMA_STATUS 0x0a #define CX2341X_DEC_SCHED_DMA_FROM_HOST 0x0b -#define CX2341X_DEC_PAUSE_PLAYBACK 0x0d -#define CX2341X_DEC_HALT_FW 0x0e -#define CX2341X_DEC_SET_STANDARD 0x10 +#define CX2341X_DEC_PAUSE_PLAYBACK 0x0d +#define CX2341X_DEC_HALT_FW 0x0e +#define CX2341X_DEC_SET_STANDARD 0x10 #define CX2341X_DEC_GET_VERSION 0x11 -#define CX2341X_DEC_SET_STREAM_INPUT 0x14 -#define CX2341X_DEC_GET_TIMING_INFO 0x15 -#define CX2341X_DEC_SET_AUDIO_MODE 0x16 +#define CX2341X_DEC_SET_STREAM_INPUT 0x14 +#define CX2341X_DEC_GET_TIMING_INFO 0x15 +#define CX2341X_DEC_SET_AUDIO_MODE 0x16 #define CX2341X_DEC_SET_EVENT_NOTIFICATION 0x17 #define CX2341X_DEC_SET_DISPLAY_BUFFERS 0x18 -#define CX2341X_DEC_EXTRACT_VBI 0x19 -#define CX2341X_DEC_SET_DECODER_SOURCE 0x1a +#define CX2341X_DEC_EXTRACT_VBI 0x19 +#define CX2341X_DEC_SET_DECODER_SOURCE 0x1a #define CX2341X_DEC_SET_PREBUFFERING 0x1e /* MPEG encoder API */ -#define CX2341X_ENC_PING_FW 0x80 -#define CX2341X_ENC_START_CAPTURE 0x81 -#define CX2341X_ENC_STOP_CAPTURE 0x82 -#define CX2341X_ENC_SET_AUDIO_ID 0x89 -#define CX2341X_ENC_SET_VIDEO_ID 0x8b -#define CX2341X_ENC_SET_PCR_ID 0x8d -#define CX2341X_ENC_SET_FRAME_RATE 0x8f -#define CX2341X_ENC_SET_FRAME_SIZE 0x91 -#define CX2341X_ENC_SET_BIT_RATE 0x95 -#define CX2341X_ENC_SET_GOP_PROPERTIES 0x97 -#define CX2341X_ENC_SET_ASPECT_RATIO 0x99 -#define CX2341X_ENC_SET_DNR_FILTER_MODE 0x9b -#define CX2341X_ENC_SET_DNR_FILTER_PROPS 0x9d -#define CX2341X_ENC_SET_CORING_LEVELS 0x9f -#define CX2341X_ENC_SET_SPATIAL_FILTER_TYPE 0xa1 -#define CX2341X_ENC_SET_VBI_LINE 0xb7 -#define CX2341X_ENC_SET_STREAM_TYPE 0xb9 -#define CX2341X_ENC_SET_OUTPUT_PORT 0xbb -#define CX2341X_ENC_SET_AUDIO_PROPERTIES 0xbd -#define CX2341X_ENC_HALT_FW 0xc3 +#define CX2341X_ENC_PING_FW 0x80 +#define CX2341X_ENC_START_CAPTURE 0x81 +#define CX2341X_ENC_STOP_CAPTURE 0x82 +#define CX2341X_ENC_SET_AUDIO_ID 0x89 +#define CX2341X_ENC_SET_VIDEO_ID 0x8b +#define CX2341X_ENC_SET_PCR_ID 0x8d +#define CX2341X_ENC_SET_FRAME_RATE 0x8f +#define CX2341X_ENC_SET_FRAME_SIZE 0x91 +#define CX2341X_ENC_SET_BIT_RATE 0x95 +#define CX2341X_ENC_SET_GOP_PROPERTIES 0x97 +#define CX2341X_ENC_SET_ASPECT_RATIO 0x99 +#define CX2341X_ENC_SET_DNR_FILTER_MODE 0x9b +#define CX2341X_ENC_SET_DNR_FILTER_PROPS 0x9d +#define CX2341X_ENC_SET_CORING_LEVELS 0x9f +#define CX2341X_ENC_SET_SPATIAL_FILTER_TYPE 0xa1 +#define CX2341X_ENC_SET_VBI_LINE 0xb7 +#define CX2341X_ENC_SET_STREAM_TYPE 0xb9 +#define CX2341X_ENC_SET_OUTPUT_PORT 0xbb +#define CX2341X_ENC_SET_AUDIO_PROPERTIES 0xbd +#define CX2341X_ENC_HALT_FW 0xc3 #define CX2341X_ENC_GET_VERSION 0xc4 -#define CX2341X_ENC_SET_GOP_CLOSURE 0xc5 -#define CX2341X_ENC_GET_SEQ_END 0xc6 -#define CX2341X_ENC_SET_PGM_INDEX_INFO 0xc7 +#define CX2341X_ENC_SET_GOP_CLOSURE 0xc5 +#define CX2341X_ENC_GET_SEQ_END 0xc6 +#define CX2341X_ENC_SET_PGM_INDEX_INFO 0xc7 #define CX2341X_ENC_SET_VBI_CONFIG 0xc8 -#define CX2341X_ENC_SET_DMA_BLOCK_SIZE 0xc9 +#define CX2341X_ENC_SET_DMA_BLOCK_SIZE 0xc9 #define CX2341X_ENC_GET_PREV_DMA_INFO_MB_10 0xca #define CX2341X_ENC_GET_PREV_DMA_INFO_MB_9 0xcb -#define CX2341X_ENC_SCHED_DMA_TO_HOST 0xcc -#define CX2341X_ENC_INITIALIZE_INPUT 0xcd -#define CX2341X_ENC_SET_FRAME_DROP_RATE 0xd0 -#define CX2341X_ENC_PAUSE_ENCODER 0xd2 -#define CX2341X_ENC_REFRESH_INPUT 0xd3 +#define CX2341X_ENC_SCHED_DMA_TO_HOST 0xcc +#define CX2341X_ENC_INITIALIZE_INPUT 0xcd +#define CX2341X_ENC_SET_FRAME_DROP_RATE 0xd0 +#define CX2341X_ENC_PAUSE_ENCODER 0xd2 +#define CX2341X_ENC_REFRESH_INPUT 0xd3 #define CX2341X_ENC_SET_COPYRIGHT 0xd4 -#define CX2341X_ENC_SET_EVENT_NOTIFICATION 0xd5 -#define CX2341X_ENC_SET_NUM_VSYNC_LINES 0xd6 -#define CX2341X_ENC_SET_PLACEHOLDER 0xd7 -#define CX2341X_ENC_MUTE_VIDEO 0xd9 -#define CX2341X_ENC_MUTE_AUDIO 0xda +#define CX2341X_ENC_SET_EVENT_NOTIFICATION 0xd5 +#define CX2341X_ENC_SET_NUM_VSYNC_LINES 0xd6 +#define CX2341X_ENC_SET_PLACEHOLDER 0xd7 +#define CX2341X_ENC_MUTE_VIDEO 0xd9 +#define CX2341X_ENC_MUTE_AUDIO 0xda #define CX2341X_ENC_SET_VERT_CROP_LINE 0xdb -#define CX2341X_ENC_MISC 0xdc +#define CX2341X_ENC_MISC 0xdc /* OSD API, specific to the cx23415 */ -#define CX2341X_OSD_GET_FRAMEBUFFER 0x41 -#define CX2341X_OSD_GET_PIXEL_FORMAT 0x42 -#define CX2341X_OSD_SET_PIXEL_FORMAT 0x43 -#define CX2341X_OSD_GET_STATE 0x44 -#define CX2341X_OSD_SET_STATE 0x45 -#define CX2341X_OSD_GET_OSD_COORDS 0x46 -#define CX2341X_OSD_SET_OSD_COORDS 0x47 -#define CX2341X_OSD_GET_SCREEN_COORDS 0x48 -#define CX2341X_OSD_SET_SCREEN_COORDS 0x49 -#define CX2341X_OSD_GET_GLOBAL_ALPHA 0x4a -#define CX2341X_OSD_SET_GLOBAL_ALPHA 0x4b -#define CX2341X_OSD_SET_BLEND_COORDS 0x4c -#define CX2341X_OSD_GET_FLICKER_STATE 0x4f -#define CX2341X_OSD_SET_FLICKER_STATE 0x50 -#define CX2341X_OSD_BLT_COPY 0x52 -#define CX2341X_OSD_BLT_FILL 0x53 -#define CX2341X_OSD_BLT_TEXT 0x54 -#define CX2341X_OSD_SET_FRAMEBUFFER_WINDOW 0x56 -#define CX2341X_OSD_SET_CHROMA_KEY 0x60 -#define CX2341X_OSD_GET_ALPHA_CONTENT_INDEX 0x61 -#define CX2341X_OSD_SET_ALPHA_CONTENT_INDEX 0x62 +#define CX2341X_OSD_GET_FRAMEBUFFER 0x41 +#define CX2341X_OSD_GET_PIXEL_FORMAT 0x42 +#define CX2341X_OSD_SET_PIXEL_FORMAT 0x43 +#define CX2341X_OSD_GET_STATE 0x44 +#define CX2341X_OSD_SET_STATE 0x45 +#define CX2341X_OSD_GET_OSD_COORDS 0x46 +#define CX2341X_OSD_SET_OSD_COORDS 0x47 +#define CX2341X_OSD_GET_SCREEN_COORDS 0x48 +#define CX2341X_OSD_SET_SCREEN_COORDS 0x49 +#define CX2341X_OSD_GET_GLOBAL_ALPHA 0x4a +#define CX2341X_OSD_SET_GLOBAL_ALPHA 0x4b +#define CX2341X_OSD_SET_BLEND_COORDS 0x4c +#define CX2341X_OSD_GET_FLICKER_STATE 0x4f +#define CX2341X_OSD_SET_FLICKER_STATE 0x50 +#define CX2341X_OSD_BLT_COPY 0x52 +#define CX2341X_OSD_BLT_FILL 0x53 +#define CX2341X_OSD_BLT_TEXT 0x54 +#define CX2341X_OSD_SET_FRAMEBUFFER_WINDOW 0x56 +#define CX2341X_OSD_SET_CHROMA_KEY 0x60 +#define CX2341X_OSD_GET_ALPHA_CONTENT_INDEX 0x61 +#define CX2341X_OSD_SET_ALPHA_CONTENT_INDEX 0x62 #endif /* CX2341X_H */ diff --git a/include/media/drv-intf/exynos-fimc.h b/include/media/drv-intf/exynos-fimc.h index 69bcd2a07d5c..f9c64338841f 100644 --- a/include/media/drv-intf/exynos-fimc.h +++ b/include/media/drv-intf/exynos-fimc.h @@ -155,7 +155,8 @@ static inline struct exynos_video_entity *vdev_to_exynos_video_entity( } #define fimc_pipeline_call(ent, op, args...) \ - (!(ent) ? -ENOENT : (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \ + ((!(ent) || !(ent)->pipe) ? -ENOENT : \ + (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \ (ent)->pipe->ops->op(((ent)->pipe), ##args) : -ENOIOCTLCMD)) \ #endif /* S5P_FIMC_H_ */ diff --git a/include/media/drv-intf/msp3400.h b/include/media/drv-intf/msp3400.h index 1e6e80213a77..db98ce49e17b 100644 --- a/include/media/drv-intf/msp3400.h +++ b/include/media/drv-intf/msp3400.h @@ -80,17 +80,17 @@ */ /* SCART input to DSP selection */ -#define MSP_IN_SCART1 0 /* Pin SC1_IN */ -#define MSP_IN_SCART2 1 /* Pin SC2_IN */ -#define MSP_IN_SCART3 2 /* Pin SC3_IN */ -#define MSP_IN_SCART4 3 /* Pin SC4_IN */ -#define MSP_IN_MONO 6 /* Pin MONO_IN */ -#define MSP_IN_MUTE 7 /* Mute DSP input */ -#define MSP_SCART_TO_DSP(in) (in) +#define MSP_IN_SCART1 0 /* Pin SC1_IN */ +#define MSP_IN_SCART2 1 /* Pin SC2_IN */ +#define MSP_IN_SCART3 2 /* Pin SC3_IN */ +#define MSP_IN_SCART4 3 /* Pin SC4_IN */ +#define MSP_IN_MONO 6 /* Pin MONO_IN */ +#define MSP_IN_MUTE 7 /* Mute DSP input */ +#define MSP_SCART_TO_DSP(in) (in) /* Tuner input to demodulator and DSP selection */ -#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */ -#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */ -#define MSP_TUNER_TO_DSP(in) ((in) << 3) +#define MSP_IN_TUNER1 0 /* Analog Sound IF input pin ANA_IN1 */ +#define MSP_IN_TUNER2 1 /* Analog Sound IF input pin ANA_IN2 */ +#define MSP_TUNER_TO_DSP(in) ((in) << 3) /* The msp has up to 5 DSP outputs, each output can independently select a DSP input. @@ -109,30 +109,30 @@ DSP. This is currently not implemented. Also not implemented is the multi-channel capable I2S3 input of the 44x0G. If someone can demonstrate a need for one of those features then additional support can be added. */ -#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */ -#define MSP_DSP_IN_SCART 2 /* SCART DSP input */ -#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */ -#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */ -#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */ -#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */ -#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */ -#define MSP_DSP_IN_AUX 13 /* AUX DSP input */ -#define MSP_DSP_TO_MAIN(in) ((in) << 4) -#define MSP_DSP_TO_AUX(in) ((in) << 8) -#define MSP_DSP_TO_SCART1(in) ((in) << 12) -#define MSP_DSP_TO_SCART2(in) ((in) << 16) -#define MSP_DSP_TO_I2S(in) ((in) << 20) +#define MSP_DSP_IN_TUNER 0 /* Tuner DSP input */ +#define MSP_DSP_IN_SCART 2 /* SCART DSP input */ +#define MSP_DSP_IN_I2S1 5 /* I2S1 DSP input */ +#define MSP_DSP_IN_I2S2 6 /* I2S2 DSP input */ +#define MSP_DSP_IN_I2S3 7 /* I2S3 DSP input */ +#define MSP_DSP_IN_MAIN_AVC 11 /* MAIN AVC processed DSP input */ +#define MSP_DSP_IN_MAIN 12 /* MAIN DSP input */ +#define MSP_DSP_IN_AUX 13 /* AUX DSP input */ +#define MSP_DSP_TO_MAIN(in) ((in) << 4) +#define MSP_DSP_TO_AUX(in) ((in) << 8) +#define MSP_DSP_TO_SCART1(in) ((in) << 12) +#define MSP_DSP_TO_SCART2(in) ((in) << 16) +#define MSP_DSP_TO_I2S(in) ((in) << 20) /* Output SCART select: the SCART outputs can select which input to use. */ -#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */ -#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */ -#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */ -#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */ -#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */ -#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */ -#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */ -#define MSP_SC_IN_MUTE 7 /* MUTE output */ +#define MSP_SC_IN_SCART1 0 /* SCART1 input, bypassing the DSP */ +#define MSP_SC_IN_SCART2 1 /* SCART2 input, bypassing the DSP */ +#define MSP_SC_IN_SCART3 2 /* SCART3 input, bypassing the DSP */ +#define MSP_SC_IN_SCART4 3 /* SCART4 input, bypassing the DSP */ +#define MSP_SC_IN_DSP_SCART1 4 /* DSP SCART1 input */ +#define MSP_SC_IN_DSP_SCART2 5 /* DSP SCART2 input */ +#define MSP_SC_IN_MONO 6 /* MONO input, bypassing the DSP */ +#define MSP_SC_IN_MUTE 7 /* MUTE output */ #define MSP_SC_TO_SCART1(in) (in) #define MSP_SC_TO_SCART2(in) ((in) << 4) diff --git a/include/media/drv-intf/saa7146.h b/include/media/drv-intf/saa7146.h index 769c6cf7eb4c..a7bf2c4a2e4d 100644 --- a/include/media/drv-intf/saa7146.h +++ b/include/media/drv-intf/saa7146.h @@ -118,7 +118,7 @@ struct saa7146_dev { struct module *module; - struct v4l2_device v4l2_dev; + struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler ctrl_handler; /* different device locks */ diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h new file mode 100644 index 000000000000..28e2be5c8a98 --- /dev/null +++ b/include/media/dvb-usb-ids.h @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* dvb-usb-ids.h is part of the DVB USB library. + * + * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see + * dvb-usb-init.c for copyright information. + * + * a header file containing define's for the USB device supported by the + * various drivers. + */ +#ifndef _DVB_USB_IDS_H_ +#define _DVB_USB_IDS_H_ + +/* Vendor IDs */ +#define USB_VID_ADSTECH 0x06e1 +#define USB_VID_AFATECH 0x15a4 +#define USB_VID_ALCOR_MICRO 0x058f +#define USB_VID_ALINK 0x05e3 +#define USB_VID_AMT 0x1c73 +#define USB_VID_ANCHOR 0x0547 +#define USB_VID_ANSONIC 0x10b9 +#define USB_VID_ANUBIS_ELECTRONIC 0x10fd +#define USB_VID_ASUS 0x0b05 +#define USB_VID_AVERMEDIA 0x07ca +#define USB_VID_COMPRO 0x185b +#define USB_VID_COMPRO_UNK 0x145f +#define USB_VID_CONEXANT 0x0572 +#define USB_VID_CYPRESS 0x04b4 +#define USB_VID_DEXATEK 0x1d19 +#define USB_VID_DIBCOM 0x10b8 +#define USB_VID_DPOSH 0x1498 +#define USB_VID_DVICO 0x0fe9 +#define USB_VID_E3C 0x18b4 +#define USB_VID_ELGATO 0x0fd9 +#define USB_VID_EMPIA 0xeb1a +#define USB_VID_GENPIX 0x09c0 +#define USB_VID_GRANDTEC 0x5032 +#define USB_VID_GTEK 0x1f4d +#define USB_VID_HANFTEK 0x15f4 +#define USB_VID_HAUPPAUGE 0x2040 +#define USB_VID_HYPER_PALTEK 0x1025 +#define USB_VID_INTEL 0x8086 +#define USB_VID_ITETECH 0x048d +#define USB_VID_KWORLD 0xeb2a +#define USB_VID_KWORLD_2 0x1b80 +#define USB_VID_KYE 0x0458 +#define USB_VID_LEADTEK 0x0413 +#define USB_VID_LITEON 0x04ca +#define USB_VID_MEDION 0x1660 +#define USB_VID_MIGLIA 0x18f3 +#define USB_VID_MSI 0x0db0 +#define USB_VID_MSI_2 0x1462 +#define USB_VID_OPERA1 0x695c +#define USB_VID_PINNACLE 0x2304 +#define USB_VID_PCTV 0x2013 +#define USB_VID_PIXELVIEW 0x1554 +#define USB_VID_REALTEK 0x0bda +#define USB_VID_TECHNOTREND 0x0b48 +#define USB_VID_TERRATEC 0x0ccd +#define USB_VID_TELESTAR 0x10b9 +#define USB_VID_VISIONPLUS 0x13d3 +#define USB_VID_SONY 0x1415 +#define USB_PID_TEVII_S421 0xd421 +#define USB_PID_TEVII_S480_1 0xd481 +#define USB_PID_TEVII_S480_2 0xd482 +#define USB_PID_TEVII_S630 0xd630 +#define USB_PID_TEVII_S632 0xd632 +#define USB_PID_TEVII_S650 0xd650 +#define USB_PID_TEVII_S660 0xd660 +#define USB_PID_TEVII_S662 0xd662 +#define USB_VID_TWINHAN 0x1822 +#define USB_VID_ULTIMA_ELECTRONIC 0x05d8 +#define USB_VID_UNIWILL 0x1584 +#define USB_VID_WIDEVIEW 0x14aa +#define USB_VID_GIGABYTE 0x1044 +#define USB_VID_YUAN 0x1164 +#define USB_VID_XTENSIONS 0x1ae7 +#define USB_VID_ZYDAS 0x0ace +#define USB_VID_HUMAX_COEX 0x10b9 +#define USB_VID_774 0x7a69 +#define USB_VID_EVOLUTEPC 0x1e59 +#define USB_VID_AZUREWAVE 0x13d3 +#define USB_VID_TECHNISAT 0x14f7 +#define USB_VID_HAMA 0x147f +#define USB_VID_MICROSOFT 0x045e + +/* Product IDs */ +#define USB_PID_ADSTECH_USB2_COLD 0xa333 +#define USB_PID_ADSTECH_USB2_WARM 0xa334 +#define USB_PID_AFATECH_AF9005 0x9020 +#define USB_PID_AFATECH_AF9015_9015 0x9015 +#define USB_PID_AFATECH_AF9015_9016 0x9016 +#define USB_PID_AFATECH_AF9035_1000 0x1000 +#define USB_PID_AFATECH_AF9035_1001 0x1001 +#define USB_PID_AFATECH_AF9035_1002 0x1002 +#define USB_PID_AFATECH_AF9035_1003 0x1003 +#define USB_PID_AFATECH_AF9035_9035 0x9035 +#define USB_PID_TREKSTOR_DVBT 0x901b +#define USB_PID_TREKSTOR_TERRES_2_0 0xC803 +#define USB_VID_ALINK_DTU 0xf170 +#define USB_PID_ANSONIC_DVBT_USB 0x6000 +#define USB_PID_ANYSEE 0x861f +#define USB_PID_AZUREWAVE_AD_TU700 0x3237 +#define USB_PID_AZUREWAVE_6007 0x0ccd +#define USB_PID_AVERMEDIA_DVBT_USB_COLD 0x0001 +#define USB_PID_AVERMEDIA_DVBT_USB_WARM 0x0002 +#define USB_PID_AVERMEDIA_DVBT_USB2_COLD 0xa800 +#define USB_PID_AVERMEDIA_DVBT_USB2_WARM 0xa801 +#define USB_PID_COMPRO_DVBU2000_COLD 0xd000 +#define USB_PID_COMPRO_DVBU2000_WARM 0xd001 +#define USB_PID_COMPRO_DVBU2000_UNK_COLD 0x010c +#define USB_PID_COMPRO_DVBU2000_UNK_WARM 0x010d +#define USB_PID_COMPRO_VIDEOMATE_U500 0x1e78 +#define USB_PID_COMPRO_VIDEOMATE_U500_PC 0x1e80 +#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397 +#define USB_PID_CONEXANT_D680_DMB 0x86d6 +#define USB_PID_CREATIX_CTX1921 0x1921 +#define USB_PID_DELOCK_USB2_DVBT 0xb803 +#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064 +#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065 +#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8 +#define USB_PID_DIBCOM_MOD3000_WARM 0x0bb9 +#define USB_PID_DIBCOM_MOD3001_COLD 0x0bc6 +#define USB_PID_DIBCOM_MOD3001_WARM 0x0bc7 +#define USB_PID_DIBCOM_STK7700P 0x1e14 +#define USB_PID_DIBCOM_STK7700P_PC 0x1e78 +#define USB_PID_DIBCOM_STK7700D 0x1ef0 +#define USB_PID_DIBCOM_STK7700_U7000 0x7001 +#define USB_PID_DIBCOM_STK7070P 0x1ebc +#define USB_PID_DIBCOM_STK7070PD 0x1ebe +#define USB_PID_DIBCOM_STK807XP 0x1f90 +#define USB_PID_DIBCOM_STK807XPVR 0x1f98 +#define USB_PID_DIBCOM_STK8096GP 0x1fa0 +#define USB_PID_DIBCOM_STK8096PVR 0x1faa +#define USB_PID_DIBCOM_NIM8096MD 0x1fa8 +#define USB_PID_DIBCOM_TFE8096P 0x1f9C +#define USB_PID_DIBCOM_ANCHOR_2135_COLD 0x2131 +#define USB_PID_DIBCOM_STK7770P 0x1e80 +#define USB_PID_DIBCOM_NIM7090 0x1bb2 +#define USB_PID_DIBCOM_TFE7090PVR 0x1bb4 +#define USB_PID_DIBCOM_TFE7790P 0x1e6e +#define USB_PID_DIBCOM_NIM9090M 0x2383 +#define USB_PID_DIBCOM_NIM9090MD 0x2384 +#define USB_PID_DPOSH_M9206_COLD 0x9206 +#define USB_PID_DPOSH_M9206_WARM 0xa090 +#define USB_PID_E3C_EC168 0x1689 +#define USB_PID_E3C_EC168_2 0xfffa +#define USB_PID_E3C_EC168_3 0xfffb +#define USB_PID_E3C_EC168_4 0x1001 +#define USB_PID_E3C_EC168_5 0x1002 +#define USB_PID_FREECOM_DVBT 0x0160 +#define USB_PID_FREECOM_DVBT_2 0x0161 +#define USB_PID_UNIWILL_STK7700P 0x6003 +#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012 +#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0 +#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1 +#define USB_PID_GOTVIEW_SAT_HD 0x5456 +#define USB_PID_INTEL_CE9500 0x9500 +#define USB_PID_ITETECH_IT9135 0x9135 +#define USB_PID_ITETECH_IT9135_9005 0x9005 +#define USB_PID_ITETECH_IT9135_9006 0x9006 +#define USB_PID_ITETECH_IT9303 0x9306 +#define USB_PID_KWORLD_399U 0xe399 +#define USB_PID_KWORLD_399U_2 0xe400 +#define USB_PID_KWORLD_395U 0xe396 +#define USB_PID_KWORLD_395U_2 0xe39b +#define USB_PID_KWORLD_395U_3 0xe395 +#define USB_PID_KWORLD_395U_4 0xe39a +#define USB_PID_KWORLD_MC810 0xc810 +#define USB_PID_KWORLD_PC160_2T 0xc160 +#define USB_PID_KWORLD_PC160_T 0xc161 +#define USB_PID_KWORLD_UB383_T 0xe383 +#define USB_PID_KWORLD_UB499_2T_T09 0xe409 +#define USB_PID_KWORLD_VSTREAM_COLD 0x17de +#define USB_PID_KWORLD_VSTREAM_WARM 0x17df +#define USB_PID_PROF_1100 0xb012 +#define USB_PID_TERRATEC_CINERGY_S 0x0064 +#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055 +#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069 +#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093 +#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097 +#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099 +#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9 +#define USB_PID_TWINHAN_VP7041_COLD 0x3201 +#define USB_PID_TWINHAN_VP7041_WARM 0x3202 +#define USB_PID_TWINHAN_VP7020_COLD 0x3203 +#define USB_PID_TWINHAN_VP7020_WARM 0x3204 +#define USB_PID_TWINHAN_VP7045_COLD 0x3205 +#define USB_PID_TWINHAN_VP7045_WARM 0x3206 +#define USB_PID_TWINHAN_VP7021_COLD 0x3207 +#define USB_PID_TWINHAN_VP7021_WARM 0x3208 +#define USB_PID_TWINHAN_VP7049 0x3219 +#define USB_PID_TINYTWIN 0x3226 +#define USB_PID_TINYTWIN_2 0xe402 +#define USB_PID_TINYTWIN_3 0x9016 +#define USB_PID_DNTV_TINYUSB2_COLD 0x3223 +#define USB_PID_DNTV_TINYUSB2_WARM 0x3224 +#define USB_PID_ULTIMA_TVBOX_COLD 0x8105 +#define USB_PID_ULTIMA_TVBOX_WARM 0x8106 +#define USB_PID_ULTIMA_TVBOX_AN2235_COLD 0x8107 +#define USB_PID_ULTIMA_TVBOX_AN2235_WARM 0x8108 +#define USB_PID_ULTIMA_TVBOX_ANCHOR_COLD 0x2235 +#define USB_PID_ULTIMA_TVBOX_USB2_COLD 0x8109 +#define USB_PID_ULTIMA_TVBOX_USB2_WARM 0x810a +#define USB_PID_ARTEC_T14_COLD 0x810b +#define USB_PID_ARTEC_T14_WARM 0x810c +#define USB_PID_ARTEC_T14BR 0x810f +#define USB_PID_ULTIMA_TVBOX_USB2_FX_COLD 0x8613 +#define USB_PID_ULTIMA_TVBOX_USB2_FX_WARM 0x1002 +#define USB_PID_UNK_HYPER_PALTEK_COLD 0x005e +#define USB_PID_UNK_HYPER_PALTEK_WARM 0x005f +#define USB_PID_HANFTEK_UMT_010_COLD 0x0001 +#define USB_PID_HANFTEK_UMT_010_WARM 0x0015 +#define USB_PID_DTT200U_COLD 0x0201 +#define USB_PID_DTT200U_WARM 0x0301 +#define USB_PID_WT220U_ZAP250_COLD 0x0220 +#define USB_PID_WT220U_COLD 0x0222 +#define USB_PID_WT220U_WARM 0x0221 +#define USB_PID_WT220U_FC_COLD 0x0225 +#define USB_PID_WT220U_FC_WARM 0x0226 +#define USB_PID_WT220U_ZL0353_COLD 0x022a +#define USB_PID_WT220U_ZL0353_WARM 0x022b +#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300 +#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301 +#define USB_PID_HAUPPAUGE_NOVA_T_500 0x9941 +#define USB_PID_HAUPPAUGE_NOVA_T_500_2 0x9950 +#define USB_PID_HAUPPAUGE_NOVA_T_500_3 0x8400 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK 0x7050 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK_2 0x7060 +#define USB_PID_HAUPPAUGE_NOVA_T_STICK_3 0x7070 +#define USB_PID_HAUPPAUGE_MYTV_T 0x7080 +#define USB_PID_HAUPPAUGE_NOVA_TD_STICK 0x9580 +#define USB_PID_HAUPPAUGE_NOVA_TD_STICK_52009 0x5200 +#define USB_PID_HAUPPAUGE_TIGER_ATSC 0xb200 +#define USB_PID_HAUPPAUGE_TIGER_ATSC_B210 0xb210 +#define USB_PID_AVERMEDIA_EXPRESS 0xb568 +#define USB_PID_AVERMEDIA_VOLAR 0xa807 +#define USB_PID_AVERMEDIA_VOLAR_2 0xb808 +#define USB_PID_AVERMEDIA_VOLAR_A868R 0xa868 +#define USB_PID_AVERMEDIA_MCE_USB_M038 0x1228 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R 0x0039 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_ATSC 0x1039 +#define USB_PID_AVERMEDIA_HYBRID_ULTRA_USB_M039R_DVBT 0x2039 +#define USB_PID_AVERMEDIA_VOLAR_X 0xa815 +#define USB_PID_AVERMEDIA_VOLAR_X_2 0x8150 +#define USB_PID_AVERMEDIA_A309 0xa309 +#define USB_PID_AVERMEDIA_A310 0xa310 +#define USB_PID_AVERMEDIA_A850 0x850a +#define USB_PID_AVERMEDIA_A850T 0x850b +#define USB_PID_AVERMEDIA_A805 0xa805 +#define USB_PID_AVERMEDIA_A815M 0x815a +#define USB_PID_AVERMEDIA_A835 0xa835 +#define USB_PID_AVERMEDIA_B835 0xb835 +#define USB_PID_AVERMEDIA_A835B_1835 0x1835 +#define USB_PID_AVERMEDIA_A835B_2835 0x2835 +#define USB_PID_AVERMEDIA_A835B_3835 0x3835 +#define USB_PID_AVERMEDIA_A835B_4835 0x4835 +#define USB_PID_AVERMEDIA_1867 0x1867 +#define USB_PID_AVERMEDIA_A867 0xa867 +#define USB_PID_AVERMEDIA_H335 0x0335 +#define USB_PID_AVERMEDIA_TD110 0xa110 +#define USB_PID_AVERMEDIA_TWINSTAR 0x0825 +#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006 +#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009 +#define USB_PID_TECHNOTREND_CONNECT_CT3650 0x300d +#define USB_PID_TECHNOTREND_CONNECT_S2_4600 0x3011 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI 0x3012 +#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2 0x3015 +#define USB_PID_TECHNOTREND_TVSTICK_CT2_4400 0x3014 +#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI 0x3017 +#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY 0x005a +#define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2 0x0081 +#define USB_PID_TERRATEC_CINERGY_HT_USB_XE 0x0058 +#define USB_PID_TERRATEC_CINERGY_HT_EXPRESS 0x0060 +#define USB_PID_TERRATEC_CINERGY_T_EXPRESS 0x0062 +#define USB_PID_TERRATEC_CINERGY_T_XXS 0x0078 +#define USB_PID_TERRATEC_CINERGY_T_XXS_2 0x00ab +#define USB_PID_TERRATEC_CINERGY_S2_R1 0x00a8 +#define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0 +#define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102 +#define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105 +#define USB_PID_TERRATEC_H7 0x10b4 +#define USB_PID_TERRATEC_H7_2 0x10a3 +#define USB_PID_TERRATEC_H7_3 0x10a5 +#define USB_PID_TERRATEC_T1 0x10ae +#define USB_PID_TERRATEC_T3 0x10a0 +#define USB_PID_TERRATEC_T5 0x10a1 +#define USB_PID_NOXON_DAB_STICK 0x00b3 +#define USB_PID_NOXON_DAB_STICK_REV2 0x00e0 +#define USB_PID_NOXON_DAB_STICK_REV3 0x00b4 +#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e +#define USB_PID_PINNACLE_PCTV2000E 0x022c +#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228 +#define USB_PID_PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T 0x0229 +#define USB_PID_PINNACLE_PCTV71E 0x022b +#define USB_PID_PINNACLE_PCTV72E 0x0236 +#define USB_PID_PINNACLE_PCTV73E 0x0237 +#define USB_PID_PINNACLE_PCTV310E 0x3211 +#define USB_PID_PINNACLE_PCTV801E 0x023a +#define USB_PID_PINNACLE_PCTV801E_SE 0x023b +#define USB_PID_PINNACLE_PCTV340E 0x023d +#define USB_PID_PINNACLE_PCTV340E_SE 0x023e +#define USB_PID_PINNACLE_PCTV73A 0x0243 +#define USB_PID_PINNACLE_PCTV73ESE 0x0245 +#define USB_PID_PINNACLE_PCTV74E 0x0246 +#define USB_PID_PINNACLE_PCTV282E 0x0248 +#define USB_PID_PIXELVIEW_SBTVD 0x5010 +#define USB_PID_PCTV_200E 0x020e +#define USB_PID_PCTV_400E 0x020f +#define USB_PID_PCTV_450E 0x0222 +#define USB_PID_PCTV_452E 0x021f +#define USB_PID_PCTV_78E 0x025a +#define USB_PID_PCTV_79E 0x0262 +#define USB_PID_REALTEK_RTL2831U 0x2831 +#define USB_PID_REALTEK_RTL2832U 0x2832 +#define USB_PID_TECHNOTREND_CONNECT_S2_3600 0x3007 +#define USB_PID_TECHNOTREND_CONNECT_S2_3650_CI 0x300a +#define USB_PID_NEBULA_DIGITV 0x0201 +#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820 +#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500 +#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501 +#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00 +#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01 +#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10 +#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11 +#define USB_PID_DVICO_BLUEBIRD_DUAL_1_COLD 0xdb50 +#define USB_PID_DVICO_BLUEBIRD_DUAL_1_WARM 0xdb51 +#define USB_PID_DVICO_BLUEBIRD_DUAL_2_COLD 0xdb58 +#define USB_PID_DVICO_BLUEBIRD_DUAL_2_WARM 0xdb59 +#define USB_PID_DVICO_BLUEBIRD_DUAL_4 0xdb78 +#define USB_PID_DVICO_BLUEBIRD_DUAL_4_REV_2 0xdb98 +#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2 0xdb70 +#define USB_PID_DVICO_BLUEBIRD_DVB_T_NANO_2_NFW_WARM 0xdb71 +#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_COLD 0xdb54 +#define USB_PID_DIGITALNOW_BLUEBIRD_DUAL_1_WARM 0xdb55 +#define USB_PID_MEDION_MD95700 0x0932 +#define USB_PID_MSI_MEGASKY580 0x5580 +#define USB_PID_MSI_MEGASKY580_55801 0x5581 +#define USB_PID_KYE_DVB_T_COLD 0x701e +#define USB_PID_KYE_DVB_T_WARM 0x701f +#define USB_PID_LITEON_DVB_T_COLD 0xf000 +#define USB_PID_LITEON_DVB_T_WARM 0xf001 +#define USB_PID_DIGIVOX_MINI_SL_COLD 0xe360 +#define USB_PID_DIGIVOX_MINI_SL_WARM 0xe361 +#define USB_PID_GRANDTEC_DVBT_USB2_COLD 0x0bc6 +#define USB_PID_GRANDTEC_DVBT_USB2_WARM 0x0bc7 +#define USB_PID_WINFAST_DTV2000DS 0x6a04 +#define USB_PID_WINFAST_DTV2000DS_PLUS 0x6f12 +#define USB_PID_WINFAST_DTV_DONGLE_COLD 0x6025 +#define USB_PID_WINFAST_DTV_DONGLE_WARM 0x6026 +#define USB_PID_WINFAST_DTV_DONGLE_STK7700P 0x6f00 +#define USB_PID_WINFAST_DTV_DONGLE_H 0x60f6 +#define USB_PID_WINFAST_DTV_DONGLE_STK7700P_2 0x6f01 +#define USB_PID_WINFAST_DTV_DONGLE_GOLD 0x6029 +#define USB_PID_WINFAST_DTV_DONGLE_MINID 0x6f0f +#define USB_PID_GENPIX_8PSK_REV_1_COLD 0x0200 +#define USB_PID_GENPIX_8PSK_REV_1_WARM 0x0201 +#define USB_PID_GENPIX_8PSK_REV_2 0x0202 +#define USB_PID_GENPIX_SKYWALKER_1 0x0203 +#define USB_PID_GENPIX_SKYWALKER_CW3K 0x0204 +#define USB_PID_GENPIX_SKYWALKER_2 0x0206 +#define USB_PID_SIGMATEK_DVB_110 0x6610 +#define USB_PID_MSI_DIGI_VOX_MINI_II 0x1513 +#define USB_PID_MSI_DIGIVOX_DUO 0x8801 +#define USB_PID_OPERA1_COLD 0x2830 +#define USB_PID_OPERA1_WARM 0x3829 +#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_COLD 0x0514 +#define USB_PID_LIFEVIEW_TV_WALKER_TWIN_WARM 0x0513 +#define USB_PID_GIGABYTE_U7000 0x7001 +#define USB_PID_GIGABYTE_U8000 0x7002 +#define USB_PID_ASUS_U3000 0x171f +#define USB_PID_ASUS_U3000H 0x1736 +#define USB_PID_ASUS_U3100 0x173f +#define USB_PID_ASUS_U3100MINI_PLUS 0x1779 +#define USB_PID_YUAN_EC372S 0x1edc +#define USB_PID_YUAN_STK7700PH 0x1f08 +#define USB_PID_YUAN_PD378S 0x2edc +#define USB_PID_YUAN_MC770 0x0871 +#define USB_PID_YUAN_STK7700D 0x1efc +#define USB_PID_YUAN_STK7700D_2 0x1e8c +#define USB_PID_DW2102 0x2102 +#define USB_PID_DW2104 0x2104 +#define USB_PID_DW3101 0x3101 +#define USB_PID_XTENSIONS_XD_380 0x0381 +#define USB_PID_TELESTAR_STARSTICK_2 0x8000 +#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807 +#define USB_PID_SONY_PLAYTV 0x0003 +#define USB_PID_MYGICA_D689 0xd811 +#define USB_PID_MYGICA_T230 0xc688 +#define USB_PID_MYGICA_T230C 0xc689 +#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011 +#define USB_PID_ELGATO_EYETV_DTT 0x0021 +#define USB_PID_ELGATO_EYETV_DTT_2 0x003f +#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020 +#define USB_PID_ELGATO_EYETV_SAT 0x002a +#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025 +#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036 +#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000 +#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_WARM 0x5001 +#define USB_PID_FRIIO_WHITE 0x0001 +#define USB_PID_TVWAY_PLUS 0x0002 +#define USB_PID_SVEON_STV20 0xe39d +#define USB_PID_SVEON_STV20_RTL2832U 0xd39d +#define USB_PID_SVEON_STV21 0xd3b0 +#define USB_PID_SVEON_STV22 0xe401 +#define USB_PID_SVEON_STV22_IT9137 0xe411 +#define USB_PID_AZUREWAVE_AZ6027 0x3275 +#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4 +#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac +#define USB_PID_TECHNISAT_USB2_HDCI_V1 0x0001 +#define USB_PID_TECHNISAT_USB2_HDCI_V2 0x0002 +#define USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI 0x0003 +#define USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2 0x0004 +#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500 +#define USB_PID_CPYTO_REDI_PC50A 0xa803 +#define USB_PID_CTVDIGDUAL_V2 0xe410 +#define USB_PID_PCTV_2002E 0x025c +#define USB_PID_PCTV_2002E_SE 0x025d +#define USB_PID_SVEON_STV27 0xd3af +#define USB_PID_TURBOX_DTT_2000 0xd3a4 +#define USB_PID_WINTV_SOLOHD 0x0264 +#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115 +#define USB_PID_HAMA_DVBT_HYBRID 0x2758 +#define USB_PID_XBOX_ONE_TUNER 0x02d5 +#endif diff --git a/include/media/dvb_ca_en50221.h b/include/media/dvb_ca_en50221.h new file mode 100644 index 000000000000..a1c014b0a837 --- /dev/null +++ b/include/media/dvb_ca_en50221.h @@ -0,0 +1,142 @@ +/* + * dvb_ca.h: generic DVB functions for EN50221 CA interfaces + * + * Copyright (C) 2004 Andrew de Quincey + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DVB_CA_EN50221_H_ +#define _DVB_CA_EN50221_H_ + +#include <linux/list.h> +#include <linux/dvb/ca.h> + +#include <media/dvbdev.h> + +#define DVB_CA_EN50221_POLL_CAM_PRESENT 1 +#define DVB_CA_EN50221_POLL_CAM_CHANGED 2 +#define DVB_CA_EN50221_POLL_CAM_READY 4 + +#define DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE 1 +#define DVB_CA_EN50221_FLAG_IRQ_FR 2 +#define DVB_CA_EN50221_FLAG_IRQ_DA 4 + +#define DVB_CA_EN50221_CAMCHANGE_REMOVED 0 +#define DVB_CA_EN50221_CAMCHANGE_INSERTED 1 + +/** + * struct dvb_ca_en50221- Structure describing a CA interface + * + * @owner: the module owning this structure + * @read_attribute_mem: function for reading attribute memory on the CAM + * @write_attribute_mem: function for writing attribute memory on the CAM + * @read_cam_control: function for reading the control interface on the CAM + * @write_cam_control: function for reading the control interface on the CAM + * @read_data: function for reading data (block mode) + * @write_data: function for writing data (block mode) + * @slot_reset: function to reset the CAM slot + * @slot_shutdown: function to shutdown a CAM slot + * @slot_ts_enable: function to enable the Transport Stream on a CAM slot + * @poll_slot_status: function to poll slot status. Only necessary if + * DVB_CA_FLAG_EN50221_IRQ_CAMCHANGE is not set. + * @data: private data, used by caller. + * @private: Opaque data used by the dvb_ca core. Do not modify! + * + * NOTE: the read_*, write_* and poll_slot_status functions will be + * called for different slots concurrently and need to use locks where + * and if appropriate. There will be no concurrent access to one slot. + */ +struct dvb_ca_en50221 { + struct module *owner; + + int (*read_attribute_mem)(struct dvb_ca_en50221 *ca, + int slot, int address); + int (*write_attribute_mem)(struct dvb_ca_en50221 *ca, + int slot, int address, u8 value); + + int (*read_cam_control)(struct dvb_ca_en50221 *ca, + int slot, u8 address); + int (*write_cam_control)(struct dvb_ca_en50221 *ca, + int slot, u8 address, u8 value); + + int (*read_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + int (*write_data)(struct dvb_ca_en50221 *ca, + int slot, u8 *ebuf, int ecount); + + int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); + int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); + int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); + + int (*poll_slot_status)(struct dvb_ca_en50221 *ca, int slot, int open); + + void *data; + + void *private; +}; + +/* + * Functions for reporting IRQ events + */ + +/** + * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + * @change_type: One of the DVB_CA_CAMCHANGE_* values + */ +void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, + int change_type); + +/** + * dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred. + * + * @pubca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot); + +/** + * dvb_ca_en50221_frda_irq - An FR or a DA IRQ has occurred. + * + * @ca: CA instance. + * @slot: Slot concerned. + */ +void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot); + +/* + * Initialisation/shutdown functions + */ + +/** + * dvb_ca_en50221_init - Initialise a new DVB CA device. + * + * @dvb_adapter: DVB adapter to attach the new CA device to. + * @ca: The dvb_ca instance. + * @flags: Flags describing the CA device (DVB_CA_EN50221_FLAG_*). + * @slot_count: Number of slots supported. + * + * @return 0 on success, nonzero on failure + */ +int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter, + struct dvb_ca_en50221 *ca, int flags, + int slot_count); + +/** + * dvb_ca_en50221_release - Release a DVB CA device. + * + * @ca: The associated dvb_ca instance. + */ +void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca); + +#endif diff --git a/include/media/dvb_demux.h b/include/media/dvb_demux.h new file mode 100644 index 000000000000..b07092038f4b --- /dev/null +++ b/include/media/dvb_demux.h @@ -0,0 +1,350 @@ +/* + * dvb_demux.h: DVB kernel demux API + * + * Copyright (C) 2000-2001 Marcus Metzler & Ralph Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVB_DEMUX_H_ +#define _DVB_DEMUX_H_ + +#include <linux/time.h> +#include <linux/timer.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> + +#include <media/demux.h> + +/** + * enum dvb_dmx_filter_type - type of demux feed. + * + * @DMX_TYPE_TS: feed is in TS mode. + * @DMX_TYPE_SEC: feed is in Section mode. + */ +enum dvb_dmx_filter_type { + DMX_TYPE_TS, + DMX_TYPE_SEC, +}; + +/** + * enum dvb_dmx_state - state machine for a demux filter. + * + * @DMX_STATE_FREE: indicates that the filter is freed. + * @DMX_STATE_ALLOCATED: indicates that the filter was allocated + * to be used. + * @DMX_STATE_READY: indicates that the filter is ready + * to be used. + * @DMX_STATE_GO: indicates that the filter is running. + */ +enum dvb_dmx_state { + DMX_STATE_FREE, + DMX_STATE_ALLOCATED, + DMX_STATE_READY, + DMX_STATE_GO, +}; + +#define DVB_DEMUX_MASK_MAX 18 + +#define MAX_PID 0x1fff + +#define SPEED_PKTS_INTERVAL 50000 + +/** + * struct dvb_demux_filter - Describes a DVB demux section filter. + * + * @filter: Section filter as defined by &struct dmx_section_filter. + * @maskandmode: logical ``and`` bit mask. + * @maskandnotmode: logical ``and not`` bit mask. + * @doneq: flag that indicates when a filter is ready. + * @next: pointer to the next section filter. + * @feed: &struct dvb_demux_feed pointer. + * @index: index of the used demux filter. + * @state: state of the filter as described by &enum dvb_dmx_state. + * @type: type of the filter as described + * by &enum dvb_dmx_filter_type. + */ + +struct dvb_demux_filter { + struct dmx_section_filter filter; + u8 maskandmode[DMX_MAX_FILTER_SIZE]; + u8 maskandnotmode[DMX_MAX_FILTER_SIZE]; + bool doneq; + + struct dvb_demux_filter *next; + struct dvb_demux_feed *feed; + int index; + enum dvb_dmx_state state; + enum dvb_dmx_filter_type type; + + /* private: used only by av7110 */ + u16 hw_handle; +}; + +/** + * struct dvb_demux_feed - describes a DVB field + * + * @feed: a union describing a digital TV feed. + * Depending on the feed type, it can be either + * @feed.ts or @feed.sec. + * @feed.ts: a &struct dmx_ts_feed pointer. + * For TS feed only. + * @feed.sec: a &struct dmx_section_feed pointer. + * For section feed only. + * @cb: a union describing digital TV callbacks. + * Depending on the feed type, it can be either + * @cb.ts or @cb.sec. + * @cb.ts: a dmx_ts_cb() calback function pointer. + * For TS feed only. + * @cb.sec: a dmx_section_cb() callback function pointer. + * For section feed only. + * @demux: pointer to &struct dvb_demux. + * @priv: private data that can optionally be used by a DVB driver. + * @type: type of the filter, as defined by &enum dvb_dmx_filter_type. + * @state: state of the filter as defined by &enum dvb_dmx_state. + * @pid: PID to be filtered. + * @timeout: feed timeout. + * @filter: pointer to &struct dvb_demux_filter. + * @ts_type: type of TS, as defined by &enum ts_filter_type. + * @pes_type: type of PES, as defined by &enum dmx_ts_pes. + * @cc: MPEG-TS packet continuity counter + * @pusi_seen: if true, indicates that a discontinuity was detected. + * it is used to prevent feeding of garbage from previous section. + * @peslen: length of the PES (Packet Elementary Stream). + * @list_head: head for the list of digital TV demux feeds. + * @index: a unique index for each feed. Can be used as hardware + * pid filter index. + */ +struct dvb_demux_feed { + union { + struct dmx_ts_feed ts; + struct dmx_section_feed sec; + } feed; + + union { + dmx_ts_cb ts; + dmx_section_cb sec; + } cb; + + struct dvb_demux *demux; + void *priv; + enum dvb_dmx_filter_type type; + enum dvb_dmx_state state; + u16 pid; + + ktime_t timeout; + struct dvb_demux_filter *filter; + + enum ts_filter_type ts_type; + enum dmx_ts_pes pes_type; + + int cc; + bool pusi_seen; + + u16 peslen; + + struct list_head list_head; + unsigned int index; +}; + +/** + * struct dvb_demux - represents a digital TV demux + * @dmx: embedded &struct dmx_demux with demux capabilities + * and callbacks. + * @priv: private data that can optionally be used by + * a DVB driver. + * @filternum: maximum amount of DVB filters. + * @feednum: maximum amount of DVB feeds. + * @start_feed: callback routine to be called in order to start + * a DVB feed. + * @stop_feed: callback routine to be called in order to stop + * a DVB feed. + * @write_to_decoder: callback routine to be called if the feed is TS and + * it is routed to an A/V decoder, when a new TS packet + * is received. + * Used only on av7110-av.c. + * @check_crc32: callback routine to check CRC. If not initialized, + * dvb_demux will use an internal one. + * @memcopy: callback routine to memcopy received data. + * If not initialized, dvb_demux will default to memcpy(). + * @users: counter for the number of demux opened file descriptors. + * Currently, it is limited to 10 users. + * @filter: pointer to &struct dvb_demux_filter. + * @feed: pointer to &struct dvb_demux_feed. + * @frontend_list: &struct list_head with frontends used by the demux. + * @pesfilter: array of &struct dvb_demux_feed with the PES types + * that will be filtered. + * @pids: list of filtered program IDs. + * @feed_list: &struct list_head with feeds. + * @tsbuf: temporary buffer used internally to store TS packets. + * @tsbufp: temporary buffer index used internally. + * @mutex: pointer to &struct mutex used to protect feed set + * logic. + * @lock: pointer to &spinlock_t, used to protect buffer handling. + * @cnt_storage: buffer used for TS/TEI continuity check. + * @speed_last_time: &ktime_t used for TS speed check. + * @speed_pkts_cnt: packets count used for TS speed check. + */ +struct dvb_demux { + struct dmx_demux dmx; + void *priv; + int filternum; + int feednum; + int (*start_feed)(struct dvb_demux_feed *feed); + int (*stop_feed)(struct dvb_demux_feed *feed); + int (*write_to_decoder)(struct dvb_demux_feed *feed, + const u8 *buf, size_t len); + u32 (*check_crc32)(struct dvb_demux_feed *feed, + const u8 *buf, size_t len); + void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst, + const u8 *src, size_t len); + + int users; +#define MAX_DVB_DEMUX_USERS 10 + struct dvb_demux_filter *filter; + struct dvb_demux_feed *feed; + + struct list_head frontend_list; + + struct dvb_demux_feed *pesfilter[DMX_PES_OTHER]; + u16 pids[DMX_PES_OTHER]; + +#define DMX_MAX_PID 0x2000 + struct list_head feed_list; + u8 tsbuf[204]; + int tsbufp; + + struct mutex mutex; + spinlock_t lock; + + uint8_t *cnt_storage; /* for TS continuity check */ + + ktime_t speed_last_time; /* for TS speed check */ + uint32_t speed_pkts_cnt; /* for TS speed check */ + + /* private: used only on av7110 */ + int playing; + int recording; +}; + +/** + * dvb_dmx_init - initialize a digital TV demux struct. + * + * @demux: &struct dvb_demux to be initialized. + * + * Before being able to register a digital TV demux struct, drivers + * should call this routine. On its typical usage, some fields should + * be initialized at the driver before calling it. + * + * A typical usecase is:: + * + * dvb->demux.dmx.capabilities = + * DMX_TS_FILTERING | DMX_SECTION_FILTERING | + * DMX_MEMORY_BASED_FILTERING; + * dvb->demux.priv = dvb; + * dvb->demux.filternum = 256; + * dvb->demux.feednum = 256; + * dvb->demux.start_feed = driver_start_feed; + * dvb->demux.stop_feed = driver_stop_feed; + * ret = dvb_dmx_init(&dvb->demux); + * if (ret < 0) + * return ret; + */ +int dvb_dmx_init(struct dvb_demux *demux); + +/** + * dvb_dmx_release - releases a digital TV demux internal buffers. + * + * @demux: &struct dvb_demux to be released. + * + * The DVB core internally allocates data at @demux. This routine + * releases those data. Please notice that the struct itelf is not + * released, as it can be embedded on other structs. + */ +void dvb_dmx_release(struct dvb_demux *demux); + +/** + * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 188 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 188. + * + * The routine will discard a DVB packet that don't start with 0x47. + * + * Use this routine if the DVB demux fills MPEG-TS buffers that are + * already aligned. + * + * NOTE: The @buf size should have size equal to ``count * 188``. + */ +void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, + size_t count); + +/** + * dvb_dmx_swfilter - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 188 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 188. + * + * If a DVB packet doesn't start with 0x47, it will seek for the first + * byte that starts with 0x47. + * + * Use this routine if the DVB demux fill buffers that may not start with + * a packet start mark (0x47). + * + * NOTE: The @buf size should have size equal to ``count * 188``. + */ +void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count); + +/** + * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with + * multiple MPEG-TS packets with 204 bytes each. + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data to be filtered + * @count: number of MPEG-TS packets with size of 204. + * + * If a DVB packet doesn't start with 0x47, it will seek for the first + * byte that starts with 0x47. + * + * Use this routine if the DVB demux fill buffers that may not start with + * a packet start mark (0x47). + * + * NOTE: The @buf size should have size equal to ``count * 204``. + */ +void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, + size_t count); + +/** + * dvb_dmx_swfilter_raw - make the raw data available to userspace without + * filtering + * + * @demux: pointer to &struct dvb_demux + * @buf: buffer with data + * @count: number of packets to be passed. The actual size of each packet + * depends on the &dvb_demux->feed->cb.ts logic. + * + * Use it if the driver needs to deliver the raw payload to userspace without + * passing through the kernel demux. That is meant to support some + * delivery systems that aren't based on MPEG-TS. + * + * This function relies on &dvb_demux->feed->cb.ts to actually handle the + * buffer. + */ +void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, + size_t count); + +#endif /* _DVB_DEMUX_H_ */ diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h new file mode 100644 index 000000000000..331c8269c00e --- /dev/null +++ b/include/media/dvb_frontend.h @@ -0,0 +1,795 @@ +/* + * dvb_frontend.h + * + * The Digital TV Frontend kABI defines a driver-internal interface for + * registering low-level, hardware specific driver to a hardware independent + * frontend layer. + * + * Copyright (C) 2001 convergence integrated media GmbH + * Copyright (C) 2004 convergence GmbH + * + * Written by Ralph Metzler + * Overhauled by Holger Waechtler + * Kernel I2C stuff by Michael Hunold <hunold@convergence.de> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + + * You should have received a copy of the GNU Lesser General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#ifndef _DVB_FRONTEND_H_ +#define _DVB_FRONTEND_H_ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/ioctl.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/slab.h> + +#include <linux/dvb/frontend.h> + +#include <media/dvbdev.h> + +/* + * Maximum number of Delivery systems per frontend. It + * should be smaller or equal to 32 + */ +#define MAX_DELSYS 8 + +/** + * struct dvb_frontend_tune_settings - parameters to adjust frontend tuning + * + * @min_delay_ms: minimum delay for tuning, in ms + * @step_size: step size between two consecutive frequencies + * @max_drift: maximum drift + * + * NOTE: step_size is in Hz, for terrestrial/cable or kHz for satellite + */ +struct dvb_frontend_tune_settings { + int min_delay_ms; + int step_size; + int max_drift; +}; + +struct dvb_frontend; + +/** + * struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths + * + * @name: name of the Frontend + * @frequency_min: minimal frequency supported + * @frequency_max: maximum frequency supported + * @frequency_step: frequency step + * @bandwidth_min: minimal frontend bandwidth supported + * @bandwidth_max: maximum frontend bandwidth supported + * @bandwidth_step: frontend bandwidth step + * + * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for + * satellite. + */ +struct dvb_tuner_info { + char name[128]; + + u32 frequency_min; + u32 frequency_max; + u32 frequency_step; + + u32 bandwidth_min; + u32 bandwidth_max; + u32 bandwidth_step; +}; + +/** + * struct analog_parameters - Parameters to tune into an analog/radio channel + * + * @frequency: Frequency used by analog TV tuner (either in 62.5 kHz step, + * for TV, or 62.5 Hz for radio) + * @mode: Tuner mode, as defined on enum v4l2_tuner_type + * @audmode: Audio mode as defined for the rxsubchans field at videodev2.h, + * e. g. V4L2_TUNER_MODE_* + * @std: TV standard bitmap as defined at videodev2.h, e. g. V4L2_STD_* + * + * Hybrid tuners should be supported by both V4L2 and DVB APIs. This + * struct contains the data that are used by the V4L2 side. To avoid + * dependencies from V4L2 headers, all enums here are declared as integers. + */ +struct analog_parameters { + unsigned int frequency; + unsigned int mode; + unsigned int audmode; + u64 std; +}; + +/** + * enum dvbfe_algo - defines the algorithm used to tune into a channel + * + * @DVBFE_ALGO_HW: Hardware Algorithm - + * Devices that support this algorithm do everything in hardware + * and no software support is needed to handle them. + * Requesting these devices to LOCK is the only thing required, + * device is supposed to do everything in the hardware. + * + * @DVBFE_ALGO_SW: Software Algorithm - + * These are dumb devices, that require software to do everything + * + * @DVBFE_ALGO_CUSTOM: Customizable Agorithm - + * Devices having this algorithm can be customized to have specific + * algorithms in the frontend driver, rather than simply doing a + * software zig-zag. In this case the zigzag maybe hardware assisted + * or it maybe completely done in hardware. In all cases, usage of + * this algorithm, in conjunction with the search and track + * callbacks, utilizes the driver specific algorithm. + * + * @DVBFE_ALGO_RECOVERY: Recovery Algorithm - + * These devices have AUTO recovery capabilities from LOCK failure + */ +enum dvbfe_algo { + DVBFE_ALGO_HW = (1 << 0), + DVBFE_ALGO_SW = (1 << 1), + DVBFE_ALGO_CUSTOM = (1 << 2), + DVBFE_ALGO_RECOVERY = (1 << 31) +}; + +/** + * enum dvbfe_search - search callback possible return status + * + * @DVBFE_ALGO_SEARCH_SUCCESS: + * The frontend search algorithm completed and returned successfully + * + * @DVBFE_ALGO_SEARCH_ASLEEP: + * The frontend search algorithm is sleeping + * + * @DVBFE_ALGO_SEARCH_FAILED: + * The frontend search for a signal failed + * + * @DVBFE_ALGO_SEARCH_INVALID: + * The frontend search algorith was probably supplied with invalid + * parameters and the search is an invalid one + * + * @DVBFE_ALGO_SEARCH_ERROR: + * The frontend search algorithm failed due to some error + * + * @DVBFE_ALGO_SEARCH_AGAIN: + * The frontend search algorithm was requested to search again + */ +enum dvbfe_search { + DVBFE_ALGO_SEARCH_SUCCESS = (1 << 0), + DVBFE_ALGO_SEARCH_ASLEEP = (1 << 1), + DVBFE_ALGO_SEARCH_FAILED = (1 << 2), + DVBFE_ALGO_SEARCH_INVALID = (1 << 3), + DVBFE_ALGO_SEARCH_AGAIN = (1 << 4), + DVBFE_ALGO_SEARCH_ERROR = (1 << 31), +}; + +/** + * struct dvb_tuner_ops - Tuner information and callbacks + * + * @info: embedded &struct dvb_tuner_info with tuner properties + * @release: callback function called when frontend is detached. + * drivers should free any allocated memory. + * @init: callback function used to initialize the tuner device. + * @sleep: callback function used to put the tuner to sleep. + * @suspend: callback function used to inform that the Kernel will + * suspend. + * @resume: callback function used to inform that the Kernel is + * resuming from suspend. + * @set_params: callback function used to inform the tuner to tune + * into a digital TV channel. The properties to be used + * are stored at &struct dvb_frontend.dtv_property_cache. + * The tuner demod can change the parameters to reflect + * the changes needed for the channel to be tuned, and + * update statistics. This is the recommended way to set + * the tuner parameters and should be used on newer + * drivers. + * @set_analog_params: callback function used to tune into an analog TV + * channel on hybrid tuners. It passes @analog_parameters + * to the driver. + * @set_config: callback function used to send some tuner-specific + * parameters. + * @get_frequency: get the actual tuned frequency + * @get_bandwidth: get the bandwitdh used by the low pass filters + * @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband, + * should return 0. + * @get_status: returns the frontend lock status + * @get_rf_strength: returns the RF signal strength. Used mostly to support + * analog TV and radio. Digital TV should report, instead, + * via DVBv5 API (&struct dvb_frontend.dtv_property_cache). + * @get_afc: Used only by analog TV core. Reports the frequency + * drift due to AFC. + * @calc_regs: callback function used to pass register data settings + * for simple tuners. Shouldn't be used on newer drivers. + * @set_frequency: Set a new frequency. Shouldn't be used on newer drivers. + * @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers. + * + * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for + * terrestrial/cable or kHz for satellite. + * + */ +struct dvb_tuner_ops { + + struct dvb_tuner_info info; + + void (*release)(struct dvb_frontend *fe); + int (*init)(struct dvb_frontend *fe); + int (*sleep)(struct dvb_frontend *fe); + int (*suspend)(struct dvb_frontend *fe); + int (*resume)(struct dvb_frontend *fe); + + /* This is the recomended way to set the tuner */ + int (*set_params)(struct dvb_frontend *fe); + int (*set_analog_params)(struct dvb_frontend *fe, struct analog_parameters *p); + + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); + + int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency); + int (*get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth); + int (*get_if_frequency)(struct dvb_frontend *fe, u32 *frequency); + +#define TUNER_STATUS_LOCKED 1 +#define TUNER_STATUS_STEREO 2 + int (*get_status)(struct dvb_frontend *fe, u32 *status); + int (*get_rf_strength)(struct dvb_frontend *fe, u16 *strength); + int (*get_afc)(struct dvb_frontend *fe, s32 *afc); + + /* + * This is support for demods like the mt352 - fills out the supplied + * buffer with what to write. + * + * Don't use on newer drivers. + */ + int (*calc_regs)(struct dvb_frontend *fe, u8 *buf, int buf_len); + + /* + * These are provided separately from set_params in order to + * facilitate silicon tuners which require sophisticated tuning loops, + * controlling each parameter separately. + * + * Don't use on newer drivers. + */ + int (*set_frequency)(struct dvb_frontend *fe, u32 frequency); + int (*set_bandwidth)(struct dvb_frontend *fe, u32 bandwidth); +}; + +/** + * struct analog_demod_info - Information struct for analog TV part of the demod + * + * @name: Name of the analog TV demodulator + */ +struct analog_demod_info { + char *name; +}; + +/** + * struct analog_demod_ops - Demodulation information and callbacks for + * analog TV and radio + * + * @info: pointer to struct analog_demod_info + * @set_params: callback function used to inform the demod to set the + * demodulator parameters needed to decode an analog or + * radio channel. The properties are passed via + * &struct analog_params. + * @has_signal: returns 0xffff if has signal, or 0 if it doesn't. + * @get_afc: Used only by analog TV core. Reports the frequency + * drift due to AFC. + * @tuner_status: callback function that returns tuner status bits, e. g. + * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO. + * @standby: set the tuner to standby mode. + * @release: callback function called when frontend is detached. + * drivers should free any allocated memory. + * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C + * mux support instead. + * @set_config: callback function used to send some tuner-specific + * parameters. + */ +struct analog_demod_ops { + + struct analog_demod_info info; + + void (*set_params)(struct dvb_frontend *fe, + struct analog_parameters *params); + int (*has_signal)(struct dvb_frontend *fe, u16 *signal); + int (*get_afc)(struct dvb_frontend *fe, s32 *afc); + void (*tuner_status)(struct dvb_frontend *fe); + void (*standby)(struct dvb_frontend *fe); + void (*release)(struct dvb_frontend *fe); + int (*i2c_gate_ctrl)(struct dvb_frontend *fe, int enable); + + /** This is to allow setting tuner-specific configuration */ + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); +}; + +struct dtv_frontend_properties; + + +/** + * struct dvb_frontend_ops - Demodulation information and callbacks for + * ditialt TV + * + * @info: embedded &struct dvb_tuner_info with tuner properties + * @delsys: Delivery systems supported by the frontend + * @detach: callback function called when frontend is detached. + * drivers should clean up, but not yet free the &struct + * dvb_frontend allocation. + * @release: callback function called when frontend is ready to be + * freed. + * drivers should free any allocated memory. + * @release_sec: callback function requesting that the Satelite Equipment + * Control (SEC) driver to release and free any memory + * allocated by the driver. + * @init: callback function used to initialize the tuner device. + * @sleep: callback function used to put the tuner to sleep. + * @write: callback function used by some demod legacy drivers to + * allow other drivers to write data into their registers. + * Should not be used on new drivers. + * @tune: callback function used by demod drivers that use + * @DVBFE_ALGO_HW to tune into a frequency. + * @get_frontend_algo: returns the desired hardware algorithm. + * @set_frontend: callback function used to inform the demod to set the + * parameters for demodulating a digital TV channel. + * The properties to be used are stored at &struct + * dvb_frontend.dtv_property_cache. The demod can change + * the parameters to reflect the changes needed for the + * channel to be decoded, and update statistics. + * @get_tune_settings: callback function + * @get_frontend: callback function used to inform the parameters + * actuall in use. The properties to be used are stored at + * &struct dvb_frontend.dtv_property_cache and update + * statistics. Please notice that it should not return + * an error code if the statistics are not available + * because the demog is not locked. + * @read_status: returns the locking status of the frontend. + * @read_ber: legacy callback function to return the bit error rate. + * Newer drivers should provide such info via DVBv5 API, + * e. g. @set_frontend;/@get_frontend, implementing this + * callback only if DVBv3 API compatibility is wanted. + * @read_signal_strength: legacy callback function to return the signal + * strength. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @read_snr: legacy callback function to return the Signal/Noise + * rate. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @read_ucblocks: legacy callback function to return the Uncorrected Error + * Blocks. Newer drivers should provide such info via + * DVBv5 API, e. g. @set_frontend/@get_frontend, + * implementing this callback only if DVBv3 API + * compatibility is wanted. + * @diseqc_reset_overload: callback function to implement the + * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite) + * @diseqc_send_master_cmd: callback function to implement the + * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite). + * @diseqc_recv_slave_reply: callback function to implement the + * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite) + * @diseqc_send_burst: callback function to implement the + * FE_DISEQC_SEND_BURST() ioctl (only Satellite). + * @set_tone: callback function to implement the + * FE_SET_TONE() ioctl (only Satellite). + * @set_voltage: callback function to implement the + * FE_SET_VOLTAGE() ioctl (only Satellite). + * @enable_high_lnb_voltage: callback function to implement the + * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite). + * @dishnetwork_send_legacy_command: callback function to implement the + * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite). + * Drivers should not use this, except when the DVB + * core emulation fails to provide proper support (e.g. + * if @set_voltage takes more than 8ms to work), and + * when backward compatibility with this legacy API is + * required. + * @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C + * mux support instead. + * @ts_bus_ctrl: callback function used to take control of the TS bus. + * @set_lna: callback function to power on/off/auto the LNA. + * @search: callback function used on some custom algo search algos. + * @tuner_ops: pointer to &struct dvb_tuner_ops + * @analog_ops: pointer to &struct analog_demod_ops + */ +struct dvb_frontend_ops { + struct dvb_frontend_info info; + + u8 delsys[MAX_DELSYS]; + + void (*detach)(struct dvb_frontend *fe); + void (*release)(struct dvb_frontend* fe); + void (*release_sec)(struct dvb_frontend* fe); + + int (*init)(struct dvb_frontend* fe); + int (*sleep)(struct dvb_frontend* fe); + + int (*write)(struct dvb_frontend* fe, const u8 buf[], int len); + + /* if this is set, it overrides the default swzigzag */ + int (*tune)(struct dvb_frontend* fe, + bool re_tune, + unsigned int mode_flags, + unsigned int *delay, + enum fe_status *status); + + /* get frontend tuning algorithm from the module */ + enum dvbfe_algo (*get_frontend_algo)(struct dvb_frontend *fe); + + /* these two are only used for the swzigzag code */ + int (*set_frontend)(struct dvb_frontend *fe); + int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings); + + int (*get_frontend)(struct dvb_frontend *fe, + struct dtv_frontend_properties *props); + + int (*read_status)(struct dvb_frontend *fe, enum fe_status *status); + int (*read_ber)(struct dvb_frontend* fe, u32* ber); + int (*read_signal_strength)(struct dvb_frontend* fe, u16* strength); + int (*read_snr)(struct dvb_frontend* fe, u16* snr); + int (*read_ucblocks)(struct dvb_frontend* fe, u32* ucblocks); + + int (*diseqc_reset_overload)(struct dvb_frontend* fe); + int (*diseqc_send_master_cmd)(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd); + int (*diseqc_recv_slave_reply)(struct dvb_frontend* fe, struct dvb_diseqc_slave_reply* reply); + int (*diseqc_send_burst)(struct dvb_frontend *fe, + enum fe_sec_mini_cmd minicmd); + int (*set_tone)(struct dvb_frontend *fe, enum fe_sec_tone_mode tone); + int (*set_voltage)(struct dvb_frontend *fe, + enum fe_sec_voltage voltage); + int (*enable_high_lnb_voltage)(struct dvb_frontend* fe, long arg); + int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd); + int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable); + int (*ts_bus_ctrl)(struct dvb_frontend* fe, int acquire); + int (*set_lna)(struct dvb_frontend *); + + /* + * These callbacks are for devices that implement their own + * tuning algorithms, rather than a simple swzigzag + */ + enum dvbfe_search (*search)(struct dvb_frontend *fe); + + struct dvb_tuner_ops tuner_ops; + struct analog_demod_ops analog_ops; +}; + +#ifdef __DVB_CORE__ +#define MAX_EVENT 8 + +/* Used only internally at dvb_frontend.c */ +struct dvb_fe_events { + struct dvb_frontend_event events[MAX_EVENT]; + int eventw; + int eventr; + int overflow; + wait_queue_head_t wait_queue; + struct mutex mtx; +}; +#endif + +/** + * struct dtv_frontend_properties - contains a list of properties that are + * specific to a digital TV standard. + * + * @frequency: frequency in Hz for terrestrial/cable or in kHz for + * Satellite + * @modulation: Frontend modulation type + * @voltage: SEC voltage (only Satellite) + * @sectone: SEC tone mode (only Satellite) + * @inversion: Spectral inversion + * @fec_inner: Forward error correction inner Code Rate + * @transmission_mode: Transmission Mode + * @bandwidth_hz: Bandwidth, in Hz. A zero value means that userspace + * wants to autodetect. + * @guard_interval: Guard Interval + * @hierarchy: Hierarchy + * @symbol_rate: Symbol Rate + * @code_rate_HP: high priority stream code rate + * @code_rate_LP: low priority stream code rate + * @pilot: Enable/disable/autodetect pilot tones + * @rolloff: Rolloff factor (alpha) + * @delivery_system: FE delivery system (e. g. digital TV standard) + * @interleaving: interleaving + * @isdbt_partial_reception: ISDB-T partial reception (only ISDB standard) + * @isdbt_sb_mode: ISDB-T Sound Broadcast (SB) mode (only ISDB standard) + * @isdbt_sb_subchannel: ISDB-T SB subchannel (only ISDB standard) + * @isdbt_sb_segment_idx: ISDB-T SB segment index (only ISDB standard) + * @isdbt_sb_segment_count: ISDB-T SB segment count (only ISDB standard) + * @isdbt_layer_enabled: ISDB Layer enabled (only ISDB standard) + * @layer: ISDB per-layer data (only ISDB standard) + * @layer.segment_count: Segment Count; + * @layer.fec: per layer code rate; + * @layer.modulation: per layer modulation; + * @layer.interleaving: per layer interleaving. + * @stream_id: If different than zero, enable substream filtering, if + * hardware supports (DVB-S2 and DVB-T2). + * @scrambling_sequence_index: Carries the index of the DVB-S2 physical layer + * scrambling sequence. + * @atscmh_fic_ver: Version number of the FIC (Fast Information Channel) + * signaling data (only ATSC-M/H) + * @atscmh_parade_id: Parade identification number (only ATSC-M/H) + * @atscmh_nog: Number of MH groups per MH subframe for a designated + * parade (only ATSC-M/H) + * @atscmh_tnog: Total number of MH groups including all MH groups + * belonging to all MH parades in one MH subframe + * (only ATSC-M/H) + * @atscmh_sgn: Start group number (only ATSC-M/H) + * @atscmh_prc: Parade repetition cycle (only ATSC-M/H) + * @atscmh_rs_frame_mode: Reed Solomon (RS) frame mode (only ATSC-M/H) + * @atscmh_rs_frame_ensemble: RS frame ensemble (only ATSC-M/H) + * @atscmh_rs_code_mode_pri: RS code mode pri (only ATSC-M/H) + * @atscmh_rs_code_mode_sec: RS code mode sec (only ATSC-M/H) + * @atscmh_sccc_block_mode: Series Concatenated Convolutional Code (SCCC) + * Block Mode (only ATSC-M/H) + * @atscmh_sccc_code_mode_a: SCCC code mode A (only ATSC-M/H) + * @atscmh_sccc_code_mode_b: SCCC code mode B (only ATSC-M/H) + * @atscmh_sccc_code_mode_c: SCCC code mode C (only ATSC-M/H) + * @atscmh_sccc_code_mode_d: SCCC code mode D (only ATSC-M/H) + * @lna: Power ON/OFF/AUTO the Linear Now-noise Amplifier (LNA) + * @strength: DVBv5 API statistics: Signal Strength + * @cnr: DVBv5 API statistics: Signal to Noise ratio of the + * (main) carrier + * @pre_bit_error: DVBv5 API statistics: pre-Viterbi bit error count + * @pre_bit_count: DVBv5 API statistics: pre-Viterbi bit count + * @post_bit_error: DVBv5 API statistics: post-Viterbi bit error count + * @post_bit_count: DVBv5 API statistics: post-Viterbi bit count + * @block_error: DVBv5 API statistics: block error count + * @block_count: DVBv5 API statistics: block count + * + * NOTE: derivated statistics like Uncorrected Error blocks (UCE) are + * calculated on userspace. + * + * Only a subset of the properties are needed for a given delivery system. + * For more info, consult the media_api.html with the documentation of the + * Userspace API. + */ +struct dtv_frontend_properties { + u32 frequency; + enum fe_modulation modulation; + + enum fe_sec_voltage voltage; + enum fe_sec_tone_mode sectone; + enum fe_spectral_inversion inversion; + enum fe_code_rate fec_inner; + enum fe_transmit_mode transmission_mode; + u32 bandwidth_hz; /* 0 = AUTO */ + enum fe_guard_interval guard_interval; + enum fe_hierarchy hierarchy; + u32 symbol_rate; + enum fe_code_rate code_rate_HP; + enum fe_code_rate code_rate_LP; + + enum fe_pilot pilot; + enum fe_rolloff rolloff; + + enum fe_delivery_system delivery_system; + + enum fe_interleaving interleaving; + + /* ISDB-T specifics */ + u8 isdbt_partial_reception; + u8 isdbt_sb_mode; + u8 isdbt_sb_subchannel; + u32 isdbt_sb_segment_idx; + u32 isdbt_sb_segment_count; + u8 isdbt_layer_enabled; + struct { + u8 segment_count; + enum fe_code_rate fec; + enum fe_modulation modulation; + u8 interleaving; + } layer[3]; + + /* Multistream specifics */ + u32 stream_id; + + /* Physical Layer Scrambling specifics */ + u32 scrambling_sequence_index; + + /* ATSC-MH specifics */ + u8 atscmh_fic_ver; + u8 atscmh_parade_id; + u8 atscmh_nog; + u8 atscmh_tnog; + u8 atscmh_sgn; + u8 atscmh_prc; + + u8 atscmh_rs_frame_mode; + u8 atscmh_rs_frame_ensemble; + u8 atscmh_rs_code_mode_pri; + u8 atscmh_rs_code_mode_sec; + u8 atscmh_sccc_block_mode; + u8 atscmh_sccc_code_mode_a; + u8 atscmh_sccc_code_mode_b; + u8 atscmh_sccc_code_mode_c; + u8 atscmh_sccc_code_mode_d; + + u32 lna; + + /* statistics data */ + struct dtv_fe_stats strength; + struct dtv_fe_stats cnr; + struct dtv_fe_stats pre_bit_error; + struct dtv_fe_stats pre_bit_count; + struct dtv_fe_stats post_bit_error; + struct dtv_fe_stats post_bit_count; + struct dtv_fe_stats block_error; + struct dtv_fe_stats block_count; +}; + +#define DVB_FE_NO_EXIT 0 +#define DVB_FE_NORMAL_EXIT 1 +#define DVB_FE_DEVICE_REMOVED 2 +#define DVB_FE_DEVICE_RESUME 3 + +/** + * struct dvb_frontend - Frontend structure to be used on drivers. + * + * @refcount: refcount to keep track of &struct dvb_frontend + * references + * @ops: embedded &struct dvb_frontend_ops + * @dvb: pointer to &struct dvb_adapter + * @demodulator_priv: demod private data + * @tuner_priv: tuner private data + * @frontend_priv: frontend private data + * @sec_priv: SEC private data + * @analog_demod_priv: Analog demod private data + * @dtv_property_cache: embedded &struct dtv_frontend_properties + * @callback: callback function used on some drivers to call + * either the tuner or the demodulator. + * @id: Frontend ID + * @exit: Used to inform the DVB core that the frontend + * thread should exit (usually, means that the hardware + * got disconnected. + */ + +struct dvb_frontend { + struct kref refcount; + struct dvb_frontend_ops ops; + struct dvb_adapter *dvb; + void *demodulator_priv; + void *tuner_priv; + void *frontend_priv; + void *sec_priv; + void *analog_demod_priv; + struct dtv_frontend_properties dtv_property_cache; +#define DVB_FRONTEND_COMPONENT_TUNER 0 +#define DVB_FRONTEND_COMPONENT_DEMOD 1 + int (*callback)(void *adapter_priv, int component, int cmd, int arg); + int id; + unsigned int exit; +}; + +/** + * dvb_register_frontend() - Registers a DVB frontend at the adapter + * + * @dvb: pointer to &struct dvb_adapter + * @fe: pointer to &struct dvb_frontend + * + * Allocate and initialize the private data needed by the frontend core to + * manage the frontend and calls dvb_register_device() to register a new + * frontend. It also cleans the property cache that stores the frontend + * parameters and selects the first available delivery system. + */ +int dvb_register_frontend(struct dvb_adapter *dvb, + struct dvb_frontend *fe); + +/** + * dvb_unregister_frontend() - Unregisters a DVB frontend + * + * @fe: pointer to &struct dvb_frontend + * + * Stops the frontend kthread, calls dvb_unregister_device() and frees the + * private frontend data allocated by dvb_register_frontend(). + * + * NOTE: This function doesn't frees the memory allocated by the demod, + * by the SEC driver and by the tuner. In order to free it, an explicit call to + * dvb_frontend_detach() is needed, after calling this function. + */ +int dvb_unregister_frontend(struct dvb_frontend *fe); + +/** + * dvb_frontend_detach() - Detaches and frees frontend specific data + * + * @fe: pointer to &struct dvb_frontend + * + * This function should be called after dvb_unregister_frontend(). It + * calls the SEC, tuner and demod release functions: + * &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release, + * &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release. + * + * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases + * the module reference count, needed to allow userspace to remove the + * previously used DVB frontend modules. + */ +void dvb_frontend_detach(struct dvb_frontend *fe); + +/** + * dvb_frontend_suspend() - Suspends a Digital TV frontend + * + * @fe: pointer to &struct dvb_frontend + * + * This function prepares a Digital TV frontend to suspend. + * + * In order to prepare the tuner to suspend, if + * &dvb_frontend_ops.tuner_ops.suspend\(\) is available, it calls it. Otherwise, + * it will call &dvb_frontend_ops.tuner_ops.sleep\(\), if available. + * + * It will also call &dvb_frontend_ops.sleep\(\) to put the demod to suspend. + * + * The drivers should also call dvb_frontend_suspend\(\) as part of their + * handler for the &device_driver.suspend\(\). + */ +int dvb_frontend_suspend(struct dvb_frontend *fe); + +/** + * dvb_frontend_resume() - Resumes a Digital TV frontend + * + * @fe: pointer to &struct dvb_frontend + * + * This function resumes the usual operation of the tuner after resume. + * + * In order to resume the frontend, it calls the demod &dvb_frontend_ops.init\(\). + * + * If &dvb_frontend_ops.tuner_ops.resume\(\) is available, It, it calls it. + * Otherwise,t will call &dvb_frontend_ops.tuner_ops.init\(\), if available. + * + * Once tuner and demods are resumed, it will enforce that the SEC voltage and + * tone are restored to their previous values and wake up the frontend's + * kthread in order to retune the frontend. + * + * The drivers should also call dvb_frontend_resume() as part of their + * handler for the &device_driver.resume\(\). + */ +int dvb_frontend_resume(struct dvb_frontend *fe); + +/** + * dvb_frontend_reinitialise() - forces a reinitialisation at the frontend + * + * @fe: pointer to &struct dvb_frontend + * + * Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\), + * and resets SEC tone and voltage (for Satellite systems). + * + * NOTE: Currently, this function is used only by one driver (budget-av). + * It seems to be due to address some special issue with that specific + * frontend. + */ +void dvb_frontend_reinitialise(struct dvb_frontend *fe); + +/** + * dvb_frontend_sleep_until() - Sleep for the amount of time given by + * add_usec parameter + * + * @waketime: pointer to &struct ktime_t + * @add_usec: time to sleep, in microseconds + * + * This function is used to measure the time required for the + * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise + * as possible, as it affects the detection of the dish tone command at the + * satellite subsystem. + * + * Its used internally by the DVB frontend core, in order to emulate + * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\) + * callback. + * + * NOTE: it should not be used at the drivers, as the emulation for the + * legacy callback is provided by the Kernel. The only situation where this + * should be at the drivers is when there are some bugs at the hardware that + * would prevent the core emulation to work. On such cases, the driver would + * be writing a &dvb_frontend_ops.dishnetwork_send_legacy_command\(\) and + * calling this function directly. + */ +void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec); + +#endif diff --git a/include/media/dvb_math.h b/include/media/dvb_math.h new file mode 100644 index 000000000000..8690ec42954d --- /dev/null +++ b/include/media/dvb_math.h @@ -0,0 +1,66 @@ +/* + * dvb-math provides some complex fixed-point math + * operations shared between the dvb related stuff + * + * Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com) + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + */ + +#ifndef __DVB_MATH_H +#define __DVB_MATH_H + +#include <linux/types.h> + +/** + * intlog2 - computes log2 of a value; the result is shifted left by 24 bits + * + * @value: The value (must be != 0) + * + * to use rational values you can use the following method: + * + * intlog2(value) = intlog2(value * 2^x) - x * 2^24 + * + * Some usecase examples: + * + * intlog2(8) will give 3 << 24 = 3 * 2^24 + * + * intlog2(9) will give 3 << 24 + ... = 3.16... * 2^24 + * + * intlog2(1.5) = intlog2(3) - 2^24 = 0.584... * 2^24 + * + * + * return: log2(value) * 2^24 + */ +extern unsigned int intlog2(u32 value); + +/** + * intlog10 - computes log10 of a value; the result is shifted left by 24 bits + * + * @value: The value (must be != 0) + * + * to use rational values you can use the following method: + * + * intlog10(value) = intlog10(value * 10^x) - x * 2^24 + * + * An usecase example: + * + * intlog10(1000) will give 3 << 24 = 3 * 2^24 + * + * due to the implementation intlog10(1000) might be not exactly 3 * 2^24 + * + * look at intlog2 for similar examples + * + * return: log10(value) * 2^24 + */ +extern unsigned int intlog10(u32 value); + +#endif diff --git a/include/media/dvb_net.h b/include/media/dvb_net.h new file mode 100644 index 000000000000..5e31d37f25fa --- /dev/null +++ b/include/media/dvb_net.h @@ -0,0 +1,93 @@ +/* + * dvb_net.h + * + * Copyright (C) 2001 Ralph Metzler for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVB_NET_H_ +#define _DVB_NET_H_ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> + +#include <media/dvbdev.h> + +#define DVB_NET_DEVICES_MAX 10 + +#ifdef CONFIG_DVB_NET + +/** + * struct dvb_net - describes a DVB network interface + * + * @dvbdev: pointer to &struct dvb_device. + * @device: array of pointers to &struct net_device. + * @state: array of integers to each net device. A value + * different than zero means that the interface is + * in usage. + * @exit: flag to indicate when the device is being removed. + * @demux: pointer to &struct dmx_demux. + * @ioctl_mutex: protect access to this struct. + * + * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network + * devices. + */ + +struct dvb_net { + struct dvb_device *dvbdev; + struct net_device *device[DVB_NET_DEVICES_MAX]; + int state[DVB_NET_DEVICES_MAX]; + unsigned int exit:1; + struct dmx_demux *demux; + struct mutex ioctl_mutex; +}; + +/** + * dvb_net_init - nitializes a digital TV network device and registers it. + * + * @adap: pointer to &struct dvb_adapter. + * @dvbnet: pointer to &struct dvb_net. + * @dmxdemux: pointer to &struct dmx_demux. + */ +int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet, + struct dmx_demux *dmxdemux); + +/** + * dvb_net_release - releases a digital TV network device and unregisters it. + * + * @dvbnet: pointer to &struct dvb_net. + */ +void dvb_net_release(struct dvb_net *dvbnet); + +#else + +struct dvb_net { + struct dvb_device *dvbdev; +}; + +static inline void dvb_net_release(struct dvb_net *dvbnet) +{ +} + +static inline int dvb_net_init(struct dvb_adapter *adap, + struct dvb_net *dvbnet, struct dmx_demux *dmx) +{ + return 0; +} + +#endif /* ifdef CONFIG_DVB_NET */ + +#endif diff --git a/include/media/dvb_ringbuffer.h b/include/media/dvb_ringbuffer.h new file mode 100644 index 000000000000..8ed6bcc3a56e --- /dev/null +++ b/include/media/dvb_ringbuffer.h @@ -0,0 +1,280 @@ +/* + * + * dvb_ringbuffer.h: ring buffer implementation for the dvb driver + * + * Copyright (C) 2003 Oliver Endriss + * Copyright (C) 2004 Andrew de Quincey + * + * based on code originally found in av7110.c & dvb_ci.c: + * Copyright (C) 1999-2003 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + */ + +#ifndef _DVB_RINGBUFFER_H_ +#define _DVB_RINGBUFFER_H_ + +#include <linux/spinlock.h> +#include <linux/wait.h> + +/** + * struct dvb_ringbuffer - Describes a ring buffer used at DVB framework + * + * @data: Area were the ringbuffer data is written + * @size: size of the ringbuffer + * @pread: next position to read + * @pwrite: next position to write + * @error: used by ringbuffer clients to indicate that an error happened. + * @queue: Wait queue used by ringbuffer clients to indicate when buffer + * was filled + * @lock: Spinlock used to protect the ringbuffer + */ +struct dvb_ringbuffer { + u8 *data; + ssize_t size; + ssize_t pread; + ssize_t pwrite; + int error; + + wait_queue_head_t queue; + spinlock_t lock; +}; + +#define DVB_RINGBUFFER_PKTHDRSIZE 3 + +/** + * dvb_ringbuffer_init - initialize ring buffer, lock and queue + * + * @rbuf: pointer to struct dvb_ringbuffer + * @data: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + */ +extern void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, + size_t len); + +/** + * dvb_ringbuffer_empty - test whether buffer is empty + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_free - returns the number of free bytes in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of free bytes in the buffer + */ +extern ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_avail - returns the number of bytes waiting in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Return: number of bytes waiting in the buffer + */ +extern ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_reset - resets the ringbuffer to initial state + * + * @rbuf: pointer to struct dvb_ringbuffer + * + * Resets the read and write pointers to zero and flush the buffer. + * + * This counts as a read and write operation + */ +extern void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf); + +/* + * read routines & macros + */ + +/** + * dvb_ringbuffer_flush - flush buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf); + +/** + * dvb_ringbuffer_flush_spinlock_wakeup- flush buffer protected by spinlock + * and wake-up waiting task(s) + * + * @rbuf: pointer to struct dvb_ringbuffer + */ +extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf); + +/** + * DVB_RINGBUFFER_PEEK - peek at byte @offs in the buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @offs: offset inside the ringbuffer + */ +#define DVB_RINGBUFFER_PEEK(rbuf, offs) \ + ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size]) + +/** + * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes + * + * @rbuf: pointer to struct dvb_ringbuffer + * @num: number of bytes to advance + */ +#define DVB_RINGBUFFER_SKIP(rbuf, num) {\ + (rbuf)->pread = ((rbuf)->pread + (num)) % (rbuf)->size;\ +} + +/** + * dvb_ringbuffer_read_user - Reads a buffer into a user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_to_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, + u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_read - Reads a buffer into a pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be stored + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * Return: number of bytes transferred or -EFAULT + */ +extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, + u8 *buf, size_t len); + +/* + * write routines & macros + */ + +/** + * DVB_RINGBUFFER_WRITE_BYTE - write single byte to ring buffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @byte: byte to write + */ +#define DVB_RINGBUFFER_WRITE_BYTE(rbuf, byte) \ + { (rbuf)->data[(rbuf)->pwrite] = (byte); \ + (rbuf)->pwrite = ((rbuf)->pwrite + 1) % (rbuf)->size; } + +/** + * dvb_ringbuffer_write - Writes a buffer into the ringbuffer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the Kernel space + * + * return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_write_user - Writes a buffer received via a user pointer + * + * @rbuf: pointer to struct dvb_ringbuffer + * @buf: pointer to the buffer where the data will be read + * @len: bytes from ring buffer into @buf + * + * This variant assumes that the buffer is a memory at the userspace. So, + * it will internally call copy_from_user(). + * + * Return: number of bytes transferred or -EFAULT + */ +extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + const u8 __user *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_write - Write a packet into the ringbuffer. + * + * @rbuf: Ringbuffer to write to. + * @buf: Buffer to write. + * @len: Length of buffer (currently limited to 65535 bytes max). + * + * Return: Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL. + */ +extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read_user - Read from a packet in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + * + * .. note:: + * + * unlike dvb_ringbuffer_read(), this does **NOT** update the read pointer + * in the ringbuffer. You must use dvb_ringbuffer_pkt_dispose() to mark a + * packet as no longer required. + */ +extern ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, + size_t idx, + int offset, u8 __user *buf, + size_t len); + +/** + * dvb_ringbuffer_pkt_read - Read from a packet in the ringbuffer. + * Note: unlike dvb_ringbuffer_read_user(), this DOES update the read pointer + * in the ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + * @offset: Offset into packet to read from. + * @buf: Destination buffer for data. + * @len: Size of destination buffer. + * + * Return: Number of bytes read, or -EFAULT. + */ +extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, + int offset, u8 *buf, size_t len); + +/** + * dvb_ringbuffer_pkt_dispose - Dispose of a packet in the ring buffer. + * + * @rbuf: Ring buffer concerned. + * @idx: Packet index as returned by dvb_ringbuffer_pkt_next(). + */ +extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx); + +/** + * dvb_ringbuffer_pkt_next - Get the index of the next packet in a ringbuffer. + * + * @rbuf: Ringbuffer concerned. + * @idx: Previous packet index, or -1 to return the first packet index. + * @pktlen: On success, will be updated to contain the length of the packet + * in bytes. + * returns Packet index (if >=0), or -1 if no packets available. + */ +extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, + size_t idx, size_t *pktlen); + +#endif /* _DVB_RINGBUFFER_H_ */ diff --git a/include/media/dvb_vb2.h b/include/media/dvb_vb2.h new file mode 100644 index 000000000000..dda61af7c4cd --- /dev/null +++ b/include/media/dvb_vb2.h @@ -0,0 +1,266 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * dvb-vb2.h - DVB driver helper framework for streaming I/O + * + * Copyright (C) 2015 Samsung Electronics + * + * Author: jh1009.sung@samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _DVB_VB2_H +#define _DVB_VB2_H + +#include <linux/mutex.h> +#include <linux/poll.h> +#include <linux/dvb/dmx.h> +#include <media/videobuf2-core.h> +#include <media/videobuf2-dma-contig.h> +#include <media/videobuf2-vmalloc.h> + +/** + * enum dvb_buf_type - types of Digital TV memory-mapped buffers + * + * @DVB_BUF_TYPE_CAPTURE: buffer is filled by the Kernel, + * with a received Digital TV stream + */ +enum dvb_buf_type { + DVB_BUF_TYPE_CAPTURE = 1, +}; + +/** + * enum dvb_vb2_states - states to control VB2 state machine + * @DVB_VB2_STATE_NONE: + * VB2 engine not initialized yet, init failed or VB2 was released. + * @DVB_VB2_STATE_INIT: + * VB2 engine initialized. + * @DVB_VB2_STATE_REQBUFS: + * Buffers were requested + * @DVB_VB2_STATE_STREAMON: + * VB2 is streaming. Callers should not check it directly. Instead, + * they should use dvb_vb2_is_streaming(). + * + * Note: + * + * Callers should not touch at the state machine directly. This + * is handled inside dvb_vb2.c. + */ +enum dvb_vb2_states { + DVB_VB2_STATE_NONE = 0x0, + DVB_VB2_STATE_INIT = 0x1, + DVB_VB2_STATE_REQBUFS = 0x2, + DVB_VB2_STATE_STREAMON = 0x4, +}; + +#define DVB_VB2_NAME_MAX (20) + +/** + * struct dvb_buffer - video buffer information for v4l2. + * + * @vb: embedded struct &vb2_buffer. + * @list: list of &struct dvb_buffer. + */ +struct dvb_buffer { + struct vb2_buffer vb; + struct list_head list; +}; + +/** + * struct dvb_vb2_ctx - control struct for VB2 handler + * @vb_q: pointer to &struct vb2_queue with videobuf2 queue. + * @mutex: mutex to serialize vb2 operations. Used by + * vb2 core %wait_prepare and %wait_finish operations. + * @slock: spin lock used to protect buffer filling at dvb_vb2.c. + * @dvb_q: List of buffers that are not filled yet. + * @buf: Pointer to the buffer that are currently being filled. + * @offset: index to the next position at the @buf to be filled. + * @remain: How many bytes are left to be filled at @buf. + * @state: bitmask of buffer states as defined by &enum dvb_vb2_states. + * @buf_siz: size of each VB2 buffer. + * @buf_cnt: number of VB2 buffers. + * @nonblocking: + * If different than zero, device is operating on non-blocking + * mode. + * @name: name of the device type. Currently, it can either be + * "dvr" or "demux_filter". + */ +struct dvb_vb2_ctx { + struct vb2_queue vb_q; + struct mutex mutex; + spinlock_t slock; + struct list_head dvb_q; + struct dvb_buffer *buf; + int offset; + int remain; + int state; + int buf_siz; + int buf_cnt; + int nonblocking; + char name[DVB_VB2_NAME_MAX + 1]; +}; + +#ifndef DVB_MMAP +static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx, + const char *name, int non_blocking) +{ + return 0; +}; +static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx) +{ + return 0; +}; +#define dvb_vb2_is_streaming(ctx) (0) +#define dvb_vb2_fill_buffer(ctx, file, wait) (0) + +static inline unsigned int dvb_vb2_poll(struct dvb_vb2_ctx *ctx, + struct file *file, + poll_table *wait) +{ + return 0; +} +#else +/** + * dvb_vb2_init - initializes VB2 handler + * + * @ctx: control struct for VB2 handler + * @name: name for the VB2 handler + * @non_blocking: + * if not zero, it means that the device is at non-blocking mode + */ +int dvb_vb2_init(struct dvb_vb2_ctx *ctx, const char *name, int non_blocking); + +/** + * dvb_vb2_release - Releases the VB2 handler allocated resources and + * put @ctx at DVB_VB2_STATE_NONE state. + * @ctx: control struct for VB2 handler + */ +int dvb_vb2_release(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_is_streaming - checks if the VB2 handler is streaming + * @ctx: control struct for VB2 handler + * + * Return: 0 if not streaming, 1 otherwise. + */ +int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_fill_buffer - fills a VB2 buffer + * @ctx: control struct for VB2 handler + * @src: place where the data is stored + * @len: number of bytes to be copied from @src + */ +int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, + const unsigned char *src, int len); + +/** + * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @file: &struct file argument passed to the poll + * file operation handler. + * @wait: &poll_table wait argument passed to the poll + * file operation handler. + * + * Implements poll syscall() logic. + */ +unsigned int dvb_vb2_poll(struct dvb_vb2_ctx *ctx, struct file *file, + poll_table *wait); +#endif + +/** + * dvb_vb2_stream_on() - Wrapper to vb2_core_streamon() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * + * Starts dvb streaming + */ +int dvb_vb2_stream_on(struct dvb_vb2_ctx *ctx); +/** + * dvb_vb2_stream_off() - Wrapper to vb2_core_streamoff() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * + * Stops dvb streaming + */ +int dvb_vb2_stream_off(struct dvb_vb2_ctx *ctx); + +/** + * dvb_vb2_reqbufs() - Wrapper to vb2_core_reqbufs() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @req: &struct dmx_requestbuffers passed from userspace in + * order to handle &DMX_REQBUFS. + * + * Initiate streaming by requesting a number of buffers. Also used to + * free previously requested buffers, is ``req->count`` is zero. + */ +int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req); + +/** + * dvb_vb2_querybuf() - Wrapper to vb2_core_querybuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_QUERYBUF. + * + * + */ +int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_expbuf() - Wrapper to vb2_core_expbuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @exp: &struct dmx_exportbuffer passed from userspace in + * order to handle &DMX_EXPBUF. + * + * Export a buffer as a file descriptor. + */ +int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp); + +/** + * dvb_vb2_qbuf() - Wrapper to vb2_core_qbuf() for Digital TV buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_QBUF. + * + * Queue a Digital TV buffer as requested by userspace + */ +int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_dqbuf() - Wrapper to vb2_core_dqbuf() for Digital TV + * buffer handling. + * + * @ctx: control struct for VB2 handler + * @b: &struct dmx_buffer passed from userspace in + * order to handle &DMX_DQBUF. + * + * Dequeue a Digital TV buffer to the userspace + */ +int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b); + +/** + * dvb_vb2_mmap() - Wrapper to vb2_mmap() for Digital TV buffer handling. + * + * @ctx: control struct for VB2 handler + * @vma: pointer to &struct vm_area_struct with the vma passed + * to the mmap file operation handler in the driver. + * + * map Digital TV video buffers into application address space. + */ +int dvb_vb2_mmap(struct dvb_vb2_ctx *ctx, struct vm_area_struct *vma); + +#endif /* _DVB_VB2_H */ diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h new file mode 100644 index 000000000000..554db879527f --- /dev/null +++ b/include/media/dvbdev.h @@ -0,0 +1,407 @@ +/* + * dvbdev.h + * + * Copyright (C) 2000 Ralph Metzler & Marcus Metzler + * for convergence integrated media GmbH + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Lesser Public License + * as published by the Free Software Foundation; either version 2.1 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _DVBDEV_H_ +#define _DVBDEV_H_ + +#include <linux/types.h> +#include <linux/poll.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <media/media-device.h> + +#define DVB_MAJOR 212 + +#if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0 + #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS +#else + #define DVB_MAX_ADAPTERS 16 +#endif + +#define DVB_UNSET (-1) + +/* List of DVB device types */ + +/** + * enum dvb_device_type - type of the Digital TV device + * + * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI) + * @DVB_DEVICE_FRONTEND: Digital TV frontend. + * @DVB_DEVICE_DEMUX: Digital TV demux. + * @DVB_DEVICE_DVR: Digital TV digital video record (DVR). + * @DVB_DEVICE_CA: Digital TV Conditional Access (CA). + * @DVB_DEVICE_NET: Digital TV network. + * + * @DVB_DEVICE_VIDEO: Digital TV video decoder. + * Deprecated. Used only on av7110-av. + * @DVB_DEVICE_AUDIO: Digital TV audio decoder. + * Deprecated. Used only on av7110-av. + * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD). + * Deprecated. Used only on av7110. + */ +enum dvb_device_type { + DVB_DEVICE_SEC, + DVB_DEVICE_FRONTEND, + DVB_DEVICE_DEMUX, + DVB_DEVICE_DVR, + DVB_DEVICE_CA, + DVB_DEVICE_NET, + + DVB_DEVICE_VIDEO, + DVB_DEVICE_AUDIO, + DVB_DEVICE_OSD, +}; + +#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \ + static short adapter_nr[] = \ + {[0 ... (DVB_MAX_ADAPTERS - 1)] = DVB_UNSET }; \ + module_param_array(adapter_nr, short, NULL, 0444); \ + MODULE_PARM_DESC(adapter_nr, "DVB adapter numbers") + +struct dvb_frontend; + +/** + * struct dvb_adapter - represents a Digital TV adapter using Linux DVB API + * + * @num: Number of the adapter + * @list_head: List with the DVB adapters + * @device_list: List with the DVB devices + * @name: Name of the adapter + * @proposed_mac: proposed MAC address for the adapter + * @priv: private data + * @device: pointer to struct device + * @module: pointer to struct module + * @mfe_shared: mfe shared: indicates mutually exclusive frontends + * Thie usage of this flag is currently deprecated + * @mfe_dvbdev: Frontend device in use, in the case of MFE + * @mfe_lock: Lock to prevent using the other frontends when MFE is + * used. + * @mdev: pointer to struct media_device, used when the media + * controller is used. + * @conn: RF connector. Used only if the device has no separate + * tuner. + * @conn_pads: pointer to struct media_pad associated with @conn; + */ +struct dvb_adapter { + int num; + struct list_head list_head; + struct list_head device_list; + const char *name; + u8 proposed_mac [6]; + void* priv; + + struct device *device; + + struct module *module; + + int mfe_shared; /* indicates mutually exclusive frontends */ + struct dvb_device *mfe_dvbdev; /* frontend device in use */ + struct mutex mfe_lock; /* access lock for thread creation */ + +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + struct media_device *mdev; + struct media_entity *conn; + struct media_pad *conn_pads; +#endif +}; + +/** + * struct dvb_device - represents a DVB device node + * + * @list_head: List head with all DVB devices + * @fops: pointer to struct file_operations + * @adapter: pointer to the adapter that holds this device node + * @type: type of the device, as defined by &enum dvb_device_type. + * @minor: devnode minor number. Major number is always DVB_MAJOR. + * @id: device ID number, inside the adapter + * @readers: Initialized by the caller. Each call to open() in Read Only mode + * decreases this counter by one. + * @writers: Initialized by the caller. Each call to open() in Read/Write + * mode decreases this counter by one. + * @users: Initialized by the caller. Each call to open() in any mode + * decreases this counter by one. + * @wait_queue: wait queue, used to wait for certain events inside one of + * the DVB API callers + * @kernel_ioctl: callback function used to handle ioctl calls from userspace. + * @name: Name to be used for the device at the Media Controller + * @entity: pointer to struct media_entity associated with the device node + * @pads: pointer to struct media_pad associated with @entity; + * @priv: private data + * @intf_devnode: Pointer to media_intf_devnode. Used by the dvbdev core to + * store the MC device node interface + * @tsout_num_entities: Number of Transport Stream output entities + * @tsout_entity: array with MC entities associated to each TS output node + * @tsout_pads: array with the source pads for each @tsout_entity + * + * This structure is used by the DVB core (frontend, CA, net, demux) in + * order to create the device nodes. Usually, driver should not initialize + * this struct diretly. + */ +struct dvb_device { + struct list_head list_head; + const struct file_operations *fops; + struct dvb_adapter *adapter; + enum dvb_device_type type; + int minor; + u32 id; + + /* in theory, 'users' can vanish now, + but I don't want to change too much now... */ + int readers; + int writers; + int users; + + wait_queue_head_t wait_queue; + /* don't really need those !? -- FIXME: use video_usercopy */ + int (*kernel_ioctl)(struct file *file, unsigned int cmd, void *arg); + + /* Needed for media controller register/unregister */ +#if defined(CONFIG_MEDIA_CONTROLLER_DVB) + const char *name; + + /* Allocated and filled inside dvbdev.c */ + struct media_intf_devnode *intf_devnode; + + unsigned tsout_num_entities; + struct media_entity *entity, *tsout_entity; + struct media_pad *pads, *tsout_pads; +#endif + + void *priv; +}; + +/** + * dvb_register_adapter - Registers a new DVB adapter + * + * @adap: pointer to struct dvb_adapter + * @name: Adapter's name + * @module: initialized with THIS_MODULE at the caller + * @device: pointer to struct device that corresponds to the device driver + * @adapter_nums: Array with a list of the numbers for @dvb_register_adapter; + * to select among them. Typically, initialized with: + * DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nums) + */ +int dvb_register_adapter(struct dvb_adapter *adap, const char *name, + struct module *module, struct device *device, + short *adapter_nums); + +/** + * dvb_unregister_adapter - Unregisters a DVB adapter + * + * @adap: pointer to struct dvb_adapter + */ +int dvb_unregister_adapter(struct dvb_adapter *adap); + +/** + * dvb_register_device - Registers a new DVB device + * + * @adap: pointer to struct dvb_adapter + * @pdvbdev: pointer to the place where the new struct dvb_device will be + * stored + * @template: Template used to create &pdvbdev; + * @priv: private data + * @type: type of the device, as defined by &enum dvb_device_type. + * @demux_sink_pads: Number of demux outputs, to be used to create the TS + * outputs via the Media Controller. + */ +int dvb_register_device(struct dvb_adapter *adap, + struct dvb_device **pdvbdev, + const struct dvb_device *template, + void *priv, + enum dvb_device_type type, + int demux_sink_pads); + +/** + * dvb_remove_device - Remove a registered DVB device + * + * This does not free memory. To do that, call dvb_free_device(). + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_remove_device(struct dvb_device *dvbdev); + +/** + * dvb_free_device - Free memory occupied by a DVB device. + * + * Call dvb_unregister_device() before calling this function. + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_free_device(struct dvb_device *dvbdev); + +/** + * dvb_unregister_device - Unregisters a DVB device + * + * This is a combination of dvb_remove_device() and dvb_free_device(). + * Using this function is usually a mistake, and is often an indicator + * for a use-after-free bug (when a userspace process keeps a file + * handle to a detached device). + * + * @dvbdev: pointer to struct dvb_device + */ +void dvb_unregister_device(struct dvb_device *dvbdev); + +#ifdef CONFIG_MEDIA_CONTROLLER_DVB +/** + * dvb_create_media_graph - Creates media graph for the Digital TV part of the + * device. + * + * @adap: pointer to &struct dvb_adapter + * @create_rf_connector: if true, it creates the RF connector too + * + * This function checks all DVB-related functions at the media controller + * entities and creates the needed links for the media graph. It is + * capable of working with multiple tuners or multiple frontends, but it + * won't create links if the device has multiple tuners and multiple frontends + * or if the device has multiple muxes. In such case, the caller driver should + * manually create the remaining links. + */ +__must_check int dvb_create_media_graph(struct dvb_adapter *adap, + bool create_rf_connector); + +/** + * dvb_register_media_controller - registers a media controller at DVB adapter + * + * @adap: pointer to &struct dvb_adapter + * @mdev: pointer to &struct media_device + */ +static inline void dvb_register_media_controller(struct dvb_adapter *adap, + struct media_device *mdev) +{ + adap->mdev = mdev; +} + +/** + * dvb_get_media_controller - gets the associated media controller + * + * @adap: pointer to &struct dvb_adapter + */ +static inline struct media_device +*dvb_get_media_controller(struct dvb_adapter *adap) +{ + return adap->mdev; +} +#else +static inline +int dvb_create_media_graph(struct dvb_adapter *adap, + bool create_rf_connector) +{ + return 0; +}; +#define dvb_register_media_controller(a, b) {} +#define dvb_get_media_controller(a) NULL +#endif + +/** + * dvb_generic_open - Digital TV open function, used by DVB devices + * + * @inode: pointer to &struct inode. + * @file: pointer to &struct file. + * + * Checks if a DVB devnode is still valid, and if the permissions are + * OK and increment negative use count. + */ +int dvb_generic_open(struct inode *inode, struct file *file); + +/** + * dvb_generic_close - Digital TV close function, used by DVB devices + * + * @inode: pointer to &struct inode. + * @file: pointer to &struct file. + * + * Checks if a DVB devnode is still valid, and if the permissions are + * OK and decrement negative use count. + */ +int dvb_generic_release(struct inode *inode, struct file *file); + +/** + * dvb_generic_ioctl - Digital TV close function, used by DVB devices + * + * @file: pointer to &struct file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * + * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid. + * If so, calls dvb_usercopy(). + */ +long dvb_generic_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); + +/** + * dvb_usercopy - copies data from/to userspace memory when an ioctl is + * issued. + * + * @file: Pointer to struct &file. + * @cmd: Ioctl name. + * @arg: Ioctl argument. + * @func: function that will actually handle the ioctl + * + * Ancillary function that uses ioctl direction and size to copy from + * userspace. Then, it calls @func, and, if needed, data is copied back + * to userspace. + */ +int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg, + int (*func)(struct file *file, unsigned int cmd, void *arg)); + +/** generic DVB attach function. */ +#ifdef CONFIG_MEDIA_ATTACH + +/** + * dvb_attach - attaches a DVB frontend into the DVB core. + * + * @FUNCTION: function on a frontend module to be called. + * @ARGS...: @FUNCTION arguments. + * + * This ancillary function loads a frontend module in runtime and runs + * the @FUNCTION function there, with @ARGS. + * As it increments symbol usage cont, at unregister, dvb_detach() + * should be called. + */ +#define dvb_attach(FUNCTION, ARGS...) ({ \ + void *__r = NULL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (void *) __a(ARGS); \ + if (__r == NULL) \ + symbol_put(FUNCTION); \ + } else { \ + printk(KERN_ERR "DVB: Unable to find symbol "#FUNCTION"()\n"); \ + } \ + __r; \ +}) + +/** + * dvb_detach - detaches a DVB frontend loaded via dvb_attach() + * + * @FUNC: attach function + * + * Decrements usage count for a function previously called via dvb_attach(). + */ + +#define dvb_detach(FUNC) symbol_put_addr(FUNC) + +#else +#define dvb_attach(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +#define dvb_detach(FUNC) {} + +#endif + +#endif /* #ifndef _DVBDEV_H_ */ diff --git a/include/media/i2c-addr.h b/include/media/i2c-addr.h deleted file mode 100644 index 1b6872f5e970..000000000000 --- a/include/media/i2c-addr.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * V4L I2C address list - * - * - * Copyright (C) 2006 Mauro Carvalho Chehab <mchehab@infradead.org> - * Based on a previous mapping by - * Ralph Metzler (rjkm@thp.uni-koeln.de) - * Gerd Knorr <kraxel@goldbach.in-berlin.de> - * - */ - -/* bttv address list */ -#define I2C_ADDR_TSA5522 0xc2 -#define I2C_ADDR_TDA7432 0x8a -#define I2C_ADDR_TDA8425 0x82 -#define I2C_ADDR_TDA9840 0x84 -#define I2C_ADDR_TDA9850 0xb6 /* also used by 9855,9873 */ -#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */ -#define I2C_ADDR_TDA9875 0xb0 -#define I2C_ADDR_HAUPEE 0xa0 -#define I2C_ADDR_STBEE 0xae -#define I2C_ADDR_VHX 0xc0 -#define I2C_ADDR_MSP3400 0x80 -#define I2C_ADDR_MSP3400_ALT 0x88 -#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */ -#define I2C_ADDR_DPL3518 0x84 -#define I2C_ADDR_TDA9887 0x86 - -/* - * i2c bus addresses for the chips supported by tvaudio.c - */ - -#define I2C_ADDR_TDA8425 0x82 -#define I2C_ADDR_TDA9840 0x84 /* also used by TA8874Z */ -#define I2C_ADDR_TDA985x_L 0xb4 /* also used by 9873 */ -#define I2C_ADDR_TDA985x_H 0xb6 -#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */ - -#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */ -#define I2C_ADDR_TEA6420 0x98 - -#define I2C_ADDR_PIC16C54 0x96 /* PV951 */ diff --git a/include/media/i2c/as3645a.h b/include/media/i2c/as3645a.h deleted file mode 100644 index fffd4b563f5a..000000000000 --- a/include/media/i2c/as3645a.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * include/media/i2c/as3645a.h - * - * Copyright (C) 2008-2011 Nokia Corporation - * - * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - */ - -#ifndef __AS3645A_H__ -#define __AS3645A_H__ - -#include <media/v4l2-subdev.h> - -#define AS3645A_NAME "as3645a" -#define AS3645A_I2C_ADDR (0x60 >> 1) /* W:0x60, R:0x61 */ - -#define AS3645A_FLASH_TIMEOUT_MIN 100000 /* us */ -#define AS3645A_FLASH_TIMEOUT_MAX 850000 -#define AS3645A_FLASH_TIMEOUT_STEP 50000 - -#define AS3645A_FLASH_INTENSITY_MIN 200 /* mA */ -#define AS3645A_FLASH_INTENSITY_MAX_1LED 500 -#define AS3645A_FLASH_INTENSITY_MAX_2LEDS 400 -#define AS3645A_FLASH_INTENSITY_STEP 20 - -#define AS3645A_TORCH_INTENSITY_MIN 20 /* mA */ -#define AS3645A_TORCH_INTENSITY_MAX 160 -#define AS3645A_TORCH_INTENSITY_STEP 20 - -#define AS3645A_INDICATOR_INTENSITY_MIN 0 /* uA */ -#define AS3645A_INDICATOR_INTENSITY_MAX 10000 -#define AS3645A_INDICATOR_INTENSITY_STEP 2500 - -/* - * as3645a_platform_data - Flash controller platform data - * @set_power: Set power callback - * @vref: VREF offset (0=0V, 1=+0.3V, 2=-0.3V, 3=+0.6V) - * @peak: Inductor peak current limit (0=1.25A, 1=1.5A, 2=1.75A, 3=2.0A) - * @ext_strobe: True if external flash strobe can be used - * @flash_max_current: Max flash current (mA, <= AS3645A_FLASH_INTENSITY_MAX) - * @torch_max_current: Max torch current (mA, >= AS3645A_TORCH_INTENSITY_MAX) - * @timeout_max: Max flash timeout (us, <= AS3645A_FLASH_TIMEOUT_MAX) - */ -struct as3645a_platform_data { - int (*set_power)(struct v4l2_subdev *subdev, int on); - unsigned int vref; - unsigned int peak; - bool ext_strobe; - - /* Flash and torch currents and timeout limits */ - unsigned int flash_max_current; - unsigned int torch_max_current; - unsigned int timeout_max; -}; - -#endif /* __AS3645A_H__ */ diff --git a/include/media/i2c/bt819.h b/include/media/i2c/bt819.h index 8025f4bc2bb6..1bcf0dbeb516 100644 --- a/include/media/i2c/bt819.h +++ b/include/media/i2c/bt819.h @@ -30,7 +30,7 @@ Note: these ioctls that internal to the kernel and are never called from userspace. */ -#define BT819_FIFO_RESET_LOW _IO('b', 0) -#define BT819_FIFO_RESET_HIGH _IO('b', 1) +#define BT819_FIFO_RESET_LOW _IO('b', 0) +#define BT819_FIFO_RESET_HIGH _IO('b', 1) #endif diff --git a/include/media/i2c/ir-kbd-i2c.h b/include/media/i2c/ir-kbd-i2c.h index 76491c62c254..9f47d6a48cff 100644 --- a/include/media/i2c/ir-kbd-i2c.h +++ b/include/media/i2c/ir-kbd-i2c.h @@ -19,11 +19,15 @@ struct IR_i2c { u32 polling_interval; /* in ms */ struct delayed_work work; - char name[32]; char phys[32]; int (*get_key)(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle); + /* tx */ + struct i2c_client *tx_c; + struct mutex lock; /* do not poll Rx during Tx */ + unsigned int carrier; + unsigned int duty_cycle; }; enum ir_kbd_get_key_fn { diff --git a/include/media/i2c/m52790.h b/include/media/i2c/m52790.h index 7ddffae31a67..8d9db3cf6fab 100644 --- a/include/media/i2c/m52790.h +++ b/include/media/i2c/m52790.h @@ -23,57 +23,57 @@ /* Input routing switch 1 */ -#define M52790_SW1_IN_MASK 0x0003 -#define M52790_SW1_IN_TUNER 0x0000 -#define M52790_SW1_IN_V2 0x0001 -#define M52790_SW1_IN_V3 0x0002 -#define M52790_SW1_IN_V4 0x0003 +#define M52790_SW1_IN_MASK 0x0003 +#define M52790_SW1_IN_TUNER 0x0000 +#define M52790_SW1_IN_V2 0x0001 +#define M52790_SW1_IN_V3 0x0002 +#define M52790_SW1_IN_V4 0x0003 /* Selects component input instead of composite */ -#define M52790_SW1_YCMIX 0x0004 +#define M52790_SW1_YCMIX 0x0004 /* Input routing switch 2 */ -#define M52790_SW2_IN_MASK 0x0300 -#define M52790_SW2_IN_TUNER 0x0000 -#define M52790_SW2_IN_V2 0x0100 -#define M52790_SW2_IN_V3 0x0200 -#define M52790_SW2_IN_V4 0x0300 +#define M52790_SW2_IN_MASK 0x0300 +#define M52790_SW2_IN_TUNER 0x0000 +#define M52790_SW2_IN_V2 0x0100 +#define M52790_SW2_IN_V3 0x0200 +#define M52790_SW2_IN_V4 0x0300 /* Selects component input instead of composite */ -#define M52790_SW2_YCMIX 0x0400 +#define M52790_SW2_YCMIX 0x0400 /* Output routing switch 1 */ /* Enable 6dB amplifier for composite out */ -#define M52790_SW1_V_AMP 0x0008 +#define M52790_SW1_V_AMP 0x0008 /* Enable 6dB amplifier for component out */ -#define M52790_SW1_YC_AMP 0x0010 +#define M52790_SW1_YC_AMP 0x0010 /* Audio output mode */ -#define M52790_SW1_AUDIO_MASK 0x00c0 -#define M52790_SW1_AUDIO_MUTE 0x0000 -#define M52790_SW1_AUDIO_R 0x0040 -#define M52790_SW1_AUDIO_L 0x0080 +#define M52790_SW1_AUDIO_MASK 0x00c0 +#define M52790_SW1_AUDIO_MUTE 0x0000 +#define M52790_SW1_AUDIO_R 0x0040 +#define M52790_SW1_AUDIO_L 0x0080 #define M52790_SW1_AUDIO_STEREO 0x00c0 /* Output routing switch 2 */ /* Enable 6dB amplifier for composite out */ -#define M52790_SW2_V_AMP 0x0800 +#define M52790_SW2_V_AMP 0x0800 /* Enable 6dB amplifier for component out */ -#define M52790_SW2_YC_AMP 0x1000 +#define M52790_SW2_YC_AMP 0x1000 /* Audio output mode */ -#define M52790_SW2_AUDIO_MASK 0xc000 -#define M52790_SW2_AUDIO_MUTE 0x0000 -#define M52790_SW2_AUDIO_R 0x4000 -#define M52790_SW2_AUDIO_L 0x8000 +#define M52790_SW2_AUDIO_MASK 0xc000 +#define M52790_SW2_AUDIO_MUTE 0x0000 +#define M52790_SW2_AUDIO_R 0x4000 +#define M52790_SW2_AUDIO_L 0x8000 #define M52790_SW2_AUDIO_STEREO 0xc000 @@ -83,9 +83,9 @@ #define M52790_IN_V3 (M52790_SW1_IN_V3 | M52790_SW2_IN_V3) #define M52790_IN_V4 (M52790_SW1_IN_V4 | M52790_SW2_IN_V4) -#define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \ +#define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \ M52790_SW2_AUDIO_STEREO) -#define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \ +#define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \ M52790_SW1_V_AMP | \ M52790_SW2_AUDIO_STEREO | \ M52790_SW2_V_AMP) diff --git a/include/media/i2c/saa7115.h b/include/media/i2c/saa7115.h index 53954c90e7f6..a0cda423509d 100644 --- a/include/media/i2c/saa7115.h +++ b/include/media/i2c/saa7115.h @@ -36,15 +36,15 @@ #define SAA7115_SVIDEO3 9 /* outputs */ -#define SAA7115_IPORT_ON 1 -#define SAA7115_IPORT_OFF 0 +#define SAA7115_IPORT_ON 1 +#define SAA7115_IPORT_OFF 0 /* SAA7111 specific outputs. */ -#define SAA7111_VBI_BYPASS 2 +#define SAA7111_VBI_BYPASS 2 #define SAA7111_FMT_YUV422 0x00 -#define SAA7111_FMT_RGB 0x40 -#define SAA7111_FMT_CCIR 0x80 -#define SAA7111_FMT_YUV411 0xc0 +#define SAA7111_FMT_RGB 0x40 +#define SAA7111_FMT_CCIR 0x80 +#define SAA7111_FMT_YUV411 0xc0 /* config flags */ /* diff --git a/include/media/i2c/tvaudio.h b/include/media/i2c/tvaudio.h index 1ac8184693f8..f13e1a386364 100644 --- a/include/media/i2c/tvaudio.h +++ b/include/media/i2c/tvaudio.h @@ -21,7 +21,22 @@ #ifndef _TVAUDIO_H #define _TVAUDIO_H -#include <media/i2c-addr.h> +/* + * i2c bus addresses for the chips supported by tvaudio.c + */ + +#define I2C_ADDR_TDA8425 0x82 +#define I2C_ADDR_TDA9840 0x84 +#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */ +#define I2C_ADDR_TDA9875 0xb0 +#define I2C_ADDR_TDA8425 0x82 +#define I2C_ADDR_TDA9840 0x84 /* also used by TA8874Z */ +#define I2C_ADDR_TDA985x_L 0xb4 /* also used by 9873 */ +#define I2C_ADDR_TDA985x_H 0xb6 +#define I2C_ADDR_TDA9874 0xb0 /* also used by 9875 */ +#define I2C_ADDR_TEA6300 0x80 /* also used by 6320 */ +#define I2C_ADDR_TEA6420 0x98 +#define I2C_ADDR_PIC16C54 0x96 /* PV951 */ /* The tvaudio module accepts the following inputs: */ #define TVAUDIO_INPUT_TUNER 0 diff --git a/include/media/i2c/upd64031a.h b/include/media/i2c/upd64031a.h index 48ec03c4ef23..1eba24dfee48 100644 --- a/include/media/i2c/upd64031a.h +++ b/include/media/i2c/upd64031a.h @@ -18,9 +18,9 @@ #define _UPD64031A_H_ /* Ghost reduction modes */ -#define UPD64031A_GR_ON 0 -#define UPD64031A_GR_OFF 1 -#define UPD64031A_GR_THROUGH 3 +#define UPD64031A_GR_ON 0 +#define UPD64031A_GR_OFF 1 +#define UPD64031A_GR_THROUGH 3 /* Direct 3D/YCS Connection */ #define UPD64031A_3DYCS_DISABLE (0 << 2) diff --git a/include/media/lirc.h b/include/media/lirc.h deleted file mode 100644 index 554988c860c1..000000000000 --- a/include/media/lirc.h +++ /dev/null @@ -1 +0,0 @@ -#include <uapi/linux/lirc.h> diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h deleted file mode 100644 index 857da67bd931..000000000000 --- a/include/media/lirc_dev.h +++ /dev/null @@ -1,192 +0,0 @@ -/* - * LIRC base driver - * - * by Artur Lipowski <alipowski@interia.pl> - * This code is licensed under GNU GPL - * - */ - -#ifndef _LINUX_LIRC_DEV_H -#define _LINUX_LIRC_DEV_H - -#define BUFLEN 16 - -#include <linux/slab.h> -#include <linux/fs.h> -#include <linux/ioctl.h> -#include <linux/poll.h> -#include <linux/kfifo.h> -#include <media/lirc.h> -#include <linux/device.h> -#include <linux/cdev.h> - -struct lirc_buffer { - wait_queue_head_t wait_poll; - spinlock_t fifo_lock; - unsigned int chunk_size; - unsigned int size; /* in chunks */ - /* Using chunks instead of bytes pretends to simplify boundary checking - * And should allow for some performance fine tunning later */ - struct kfifo fifo; -}; - -static inline void lirc_buffer_clear(struct lirc_buffer *buf) -{ - unsigned long flags; - - if (kfifo_initialized(&buf->fifo)) { - spin_lock_irqsave(&buf->fifo_lock, flags); - kfifo_reset(&buf->fifo); - spin_unlock_irqrestore(&buf->fifo_lock, flags); - } else - WARN(1, "calling %s on an uninitialized lirc_buffer\n", - __func__); -} - -static inline int lirc_buffer_init(struct lirc_buffer *buf, - unsigned int chunk_size, - unsigned int size) -{ - int ret; - - init_waitqueue_head(&buf->wait_poll); - spin_lock_init(&buf->fifo_lock); - buf->chunk_size = chunk_size; - buf->size = size; - ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL); - - return ret; -} - -static inline void lirc_buffer_free(struct lirc_buffer *buf) -{ - if (kfifo_initialized(&buf->fifo)) { - kfifo_free(&buf->fifo); - } else - WARN(1, "calling %s on an uninitialized lirc_buffer\n", - __func__); -} - -static inline int lirc_buffer_len(struct lirc_buffer *buf) -{ - int len; - unsigned long flags; - - spin_lock_irqsave(&buf->fifo_lock, flags); - len = kfifo_len(&buf->fifo); - spin_unlock_irqrestore(&buf->fifo_lock, flags); - - return len; -} - -static inline int lirc_buffer_full(struct lirc_buffer *buf) -{ - return lirc_buffer_len(buf) == buf->size * buf->chunk_size; -} - -static inline int lirc_buffer_empty(struct lirc_buffer *buf) -{ - return !lirc_buffer_len(buf); -} - -static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf, - unsigned char *dest) -{ - unsigned int ret = 0; - - if (lirc_buffer_len(buf) >= buf->chunk_size) - ret = kfifo_out_locked(&buf->fifo, dest, buf->chunk_size, - &buf->fifo_lock); - return ret; - -} - -static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf, - unsigned char *orig) -{ - unsigned int ret; - - ret = kfifo_in_locked(&buf->fifo, orig, buf->chunk_size, - &buf->fifo_lock); - - return ret; -} - -/** - * struct lirc_dev - represents a LIRC device - * - * @name: used for logging - * @minor: the minor device (/dev/lircX) number for the device - * @code_length: length of a remote control key code expressed in bits - * @features: lirc compatible hardware features, like LIRC_MODE_RAW, - * LIRC_CAN\_\*, as defined at include/media/lirc.h. - * @buffer_size: Number of FIFO buffers with @chunk_size size. - * Only used if @rbuf is NULL. - * @chunk_size: Size of each FIFO buffer. - * Only used if @rbuf is NULL. - * @data: private per-driver data - * @buf: if %NULL, lirc_dev will allocate and manage the buffer, - * otherwise allocated by the caller which will - * have to write to the buffer by other means, like irq's - * (see also lirc_serial.c). - * @buf_internal: whether lirc_dev has allocated the read buffer or not - * @rdev: &struct rc_dev associated with the device - * @fops: &struct file_operations for the device - * @owner: the module owning this struct - * @attached: if the device is still live - * @open: open count for the device's chardev - * @mutex: serialises file_operations calls - * @dev: &struct device assigned to the device - * @cdev: &struct cdev assigned to the device - */ -struct lirc_dev { - char name[40]; - unsigned int minor; - __u32 code_length; - __u32 features; - - unsigned int buffer_size; /* in chunks holding one code each */ - unsigned int chunk_size; - struct lirc_buffer *buf; - bool buf_internal; - - void *data; - struct rc_dev *rdev; - const struct file_operations *fops; - struct module *owner; - - bool attached; - int open; - - struct mutex mutex; /* protect from simultaneous accesses */ - - struct device dev; - struct cdev cdev; -}; - -struct lirc_dev *lirc_allocate_device(void); - -void lirc_free_device(struct lirc_dev *d); - -int lirc_register_device(struct lirc_dev *d); - -void lirc_unregister_device(struct lirc_dev *d); - -/* Must be called in the open fop before lirc_get_pdata() can be used */ -void lirc_init_pdata(struct inode *inode, struct file *file); - -/* Returns the private data stored in the lirc_dev - * associated with the given device file pointer. - */ -void *lirc_get_pdata(struct file *file); - -/* default file operations - * used by drivers if they override only some operations - */ -int lirc_dev_fop_open(struct inode *inode, struct file *file); -int lirc_dev_fop_close(struct inode *inode, struct file *file); -unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait); -long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length, - loff_t *ppos); -#endif diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h index 511615d3bf6f..dc2f64e1b08f 100644 --- a/include/media/media-devnode.h +++ b/include/media/media-devnode.h @@ -56,7 +56,7 @@ struct media_file_operations { struct module *owner; ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); - unsigned int (*poll) (struct file *, struct poll_table_struct *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*open) (struct file *); diff --git a/include/media/media-entity.h b/include/media/media-entity.h index 222d379960b7..a732af1dbba0 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -88,6 +88,8 @@ struct media_entity_enum { * @stack: Graph traversal stack; the stack contains information * on the path the media entities to be walked and the * links through which they were reached. + * @stack.entity: pointer to &struct media_entity at the graph. + * @stack.link: pointer to &struct list_head. * @ent_enum: Visited entities * @top: The top of the stack */ @@ -247,6 +249,9 @@ enum media_entity_type { * @pipe: Pipeline this entity belongs to. * @info: Union with devnode information. Kept just for backward * compatibility. + * @info.dev: Contains device major and minor info. + * @info.dev.major: device node major, if the device is a devnode. + * @info.dev.minor: device node minor, if the device is a devnode. * @major: Devnode major number (zero if not applicable). Kept just * for backward compatibility. * @minor: Devnode minor number (zero if not applicable). Kept just @@ -629,7 +634,11 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads, * This function must be called during the cleanup phase after unregistering * the entity (currently, it does nothing). */ -static inline void media_entity_cleanup(struct media_entity *entity) {}; +#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) +static inline void media_entity_cleanup(struct media_entity *entity) {} +#else +#define media_entity_cleanup(entity) do { } while (false) +#endif /** * media_create_pad_link() - creates a link between two entities. diff --git a/include/media/rc-core.h b/include/media/rc-core.h index 314a1edb6189..aed4272d47f5 100644 --- a/include/media/rc-core.h +++ b/include/media/rc-core.h @@ -17,6 +17,7 @@ #define _RC_CORE #include <linux/spinlock.h> +#include <linux/cdev.h> #include <linux/kfifo.h> #include <linux/time.h> #include <linux/timer.h> @@ -30,9 +31,9 @@ do { \ } while (0) /** - * enum rc_driver_type - type of the RC output + * enum rc_driver_type - type of the RC driver. * - * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode + * @RC_DRIVER_SCANCODE: Driver or hardware generates a scancode. * @RC_DRIVER_IR_RAW: Driver or hardware generates pulse/space sequences. * It needs a Infra-Red pulse/space decoder * @RC_DRIVER_IR_RAW_TX: Device transmitter only, @@ -68,6 +69,33 @@ enum rc_filter_type { }; /** + * struct lirc_fh - represents an open lirc file + * @list: list of open file handles + * @rc: rcdev for this lirc chardev + * @carrier_low: when setting the carrier range, first the low end must be + * set with an ioctl and then the high end with another ioctl + * @send_timeout_reports: report timeouts in lirc raw IR. + * @rawir: queue for incoming raw IR + * @scancodes: queue for incoming decoded scancodes + * @wait_poll: poll struct for lirc device + * @send_mode: lirc mode for sending, either LIRC_MODE_SCANCODE or + * LIRC_MODE_PULSE + * @rec_mode: lirc mode for receiving, either LIRC_MODE_SCANCODE or + * LIRC_MODE_MODE2 + */ +struct lirc_fh { + struct list_head list; + struct rc_dev *rc; + int carrier_low; + bool send_timeout_reports; + DECLARE_KFIFO_PTR(rawir, unsigned int); + DECLARE_KFIFO_PTR(scancodes, struct lirc_scancode); + wait_queue_head_t wait_poll; + u8 send_mode; + u8 rec_mode; +}; + +/** * struct rc_dev - represents a remote control device * @dev: driver model's view of this device * @managed_alloc: devm_rc_allocate_device was used to create rc_dev @@ -106,6 +134,8 @@ enum rc_filter_type { * @keypressed: whether a key is currently pressed * @keyup_jiffies: time (in jiffies) when the current keypress should be released * @timer_keyup: timer for releasing a keypress + * @timer_repeat: timer for autorepeat events. This is needed for CEC, which + * has non-standard repeats. * @last_keycode: keycode of last keypress * @last_protocol: protocol of last keypress * @last_scancode: scancode of last keypress @@ -115,6 +145,15 @@ enum rc_filter_type { * @max_timeout: maximum timeout supported by device * @rx_resolution : resolution (in ns) of input sampler * @tx_resolution: resolution (in ns) of output sampler + * @lirc_dev: lirc device + * @lirc_cdev: lirc char cdev + * @gap_start: time when gap starts + * @gap_duration: duration of initial gap + * @gap: true if we're in a gap + * @lirc_fh_lock: protects lirc_fh list + * @lirc_fh: list of open files + * @registered: set to true by rc_register_device(), false by + * rc_unregister_device * @change_protocol: allow changing the protocol used on hardware decoders * @open: callback to allow drivers to enable polling/irq when IR input device * is opened. @@ -165,6 +204,7 @@ struct rc_dev { bool keypressed; unsigned long keyup_jiffies; struct timer_list timer_keyup; + struct timer_list timer_repeat; u32 last_keycode; enum rc_proto last_protocol; u32 last_scancode; @@ -174,6 +214,16 @@ struct rc_dev { u32 max_timeout; u32 rx_resolution; u32 tx_resolution; +#ifdef CONFIG_LIRC + struct device lirc_dev; + struct cdev lirc_cdev; + ktime_t gap_start; + u64 gap_duration; + bool gap; + spinlock_t lirc_fh_lock; + struct list_head lirc_fh; +#endif + bool registered; int (*change_protocol)(struct rc_dev *dev, u64 *rc_proto); int (*open)(struct rc_dev *dev); void (*close)(struct rc_dev *dev); @@ -248,20 +298,6 @@ int devm_rc_register_device(struct device *parent, struct rc_dev *dev); */ void rc_unregister_device(struct rc_dev *dev); -/** - * rc_open - Opens a RC device - * - * @rdev: pointer to struct rc_dev. - */ -int rc_open(struct rc_dev *rdev); - -/** - * rc_close - Closes a RC device - * - * @rdev: pointer to struct rc_dev. - */ -void rc_close(struct rc_dev *rdev); - void rc_repeat(struct rc_dev *dev); void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode, u8 toggle); @@ -309,6 +345,7 @@ int ir_raw_event_store_with_filter(struct rc_dev *dev, void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max); +int ir_raw_encode_carrier(enum rc_proto protocol); static inline void ir_raw_event_reset(struct rc_dev *dev) { diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 72197cb43781..7046734b3895 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -10,59 +10,7 @@ */ #include <linux/input.h> - -/** - * enum rc_proto - the Remote Controller protocol - * - * @RC_PROTO_UNKNOWN: Protocol not known - * @RC_PROTO_OTHER: Protocol known but proprietary - * @RC_PROTO_RC5: Philips RC5 protocol - * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol - * @RC_PROTO_RC5_SZ: StreamZap variant of RC5 - * @RC_PROTO_JVC: JVC protocol - * @RC_PROTO_SONY12: Sony 12 bit protocol - * @RC_PROTO_SONY15: Sony 15 bit protocol - * @RC_PROTO_SONY20: Sony 20 bit protocol - * @RC_PROTO_NEC: NEC protocol - * @RC_PROTO_NECX: Extended NEC protocol - * @RC_PROTO_NEC32: NEC 32 bit protocol - * @RC_PROTO_SANYO: Sanyo protocol - * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard - * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse - * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol - * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol - * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol - * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol - * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol - * @RC_PROTO_SHARP: Sharp protocol - * @RC_PROTO_XMP: XMP protocol - * @RC_PROTO_CEC: CEC protocol - */ -enum rc_proto { - RC_PROTO_UNKNOWN = 0, - RC_PROTO_OTHER = 1, - RC_PROTO_RC5 = 2, - RC_PROTO_RC5X_20 = 3, - RC_PROTO_RC5_SZ = 4, - RC_PROTO_JVC = 5, - RC_PROTO_SONY12 = 6, - RC_PROTO_SONY15 = 7, - RC_PROTO_SONY20 = 8, - RC_PROTO_NEC = 9, - RC_PROTO_NECX = 10, - RC_PROTO_NEC32 = 11, - RC_PROTO_SANYO = 12, - RC_PROTO_MCIR2_KBD = 13, - RC_PROTO_MCIR2_MSE = 14, - RC_PROTO_RC6_0 = 15, - RC_PROTO_RC6_6A_20 = 16, - RC_PROTO_RC6_6A_24 = 17, - RC_PROTO_RC6_6A_32 = 18, - RC_PROTO_RC6_MCE = 19, - RC_PROTO_SHARP = 20, - RC_PROTO_XMP = 21, - RC_PROTO_CEC = 22, -}; +#include <uapi/linux/lirc.h> #define RC_PROTO_BIT_NONE 0ULL #define RC_PROTO_BIT_UNKNOWN BIT_ULL(RC_PROTO_UNKNOWN) diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 4d8cb0796bc6..b7e42a1b0910 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -117,7 +117,7 @@ struct soc_camera_host_ops { int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *); int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *); int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *); - unsigned int (*poll)(struct file *, poll_table *); + __poll_t (*poll)(struct file *, poll_table *); }; #define SOCAM_SENSOR_INVERT_PCLK (1 << 0) diff --git a/include/media/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h index 13e49d85cae3..823fadede7bf 100644 --- a/include/media/v4l2-tpg.h +++ b/include/media/tpg/v4l2-tpg.h @@ -26,8 +26,51 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/videodev2.h> -#include <media/v4l2-tpg-colors.h> +struct tpg_rbg_color8 { + unsigned char r, g, b; +}; + +struct tpg_rbg_color16 { + __u16 r, g, b; +}; + +enum tpg_color { + TPG_COLOR_CSC_WHITE, + TPG_COLOR_CSC_YELLOW, + TPG_COLOR_CSC_CYAN, + TPG_COLOR_CSC_GREEN, + TPG_COLOR_CSC_MAGENTA, + TPG_COLOR_CSC_RED, + TPG_COLOR_CSC_BLUE, + TPG_COLOR_CSC_BLACK, + TPG_COLOR_75_YELLOW, + TPG_COLOR_75_CYAN, + TPG_COLOR_75_GREEN, + TPG_COLOR_75_MAGENTA, + TPG_COLOR_75_RED, + TPG_COLOR_75_BLUE, + TPG_COLOR_100_WHITE, + TPG_COLOR_100_YELLOW, + TPG_COLOR_100_CYAN, + TPG_COLOR_100_GREEN, + TPG_COLOR_100_MAGENTA, + TPG_COLOR_100_RED, + TPG_COLOR_100_BLUE, + TPG_COLOR_100_BLACK, + TPG_COLOR_TEXTFG, + TPG_COLOR_TEXTBG, + TPG_COLOR_RANDOM, + TPG_COLOR_RAMP, + TPG_COLOR_MAX = TPG_COLOR_RAMP + 256 +}; + +extern const struct tpg_rbg_color8 tpg_colors[TPG_COLOR_MAX]; +extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1]; +extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1]; +extern const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1] + [V4L2_XFER_FUNC_SMPTE2084 + 1] + [TPG_COLOR_CSC_BLACK + 1]; enum tpg_pattern { TPG_PAT_75_COLORBAR, TPG_PAT_100_COLORBAR, diff --git a/include/media/tuner-types.h b/include/media/tuner-types.h index 78f0654d9c3d..df76ac8e658c 100644 --- a/include/media/tuner-types.h +++ b/include/media/tuner-types.h @@ -171,6 +171,21 @@ struct tuner_params { struct tuner_range *ranges; }; +/** + * struct tunertype - describes the known tuners. + * + * @name: string with the tuner's name. + * @count: size of &struct tuner_params array. + * @params: pointer to &struct tuner_params array. + * + * @min: minimal tuner frequency, in 62.5 kHz step. + * should be multiplied to 16 to convert to MHz. + * @max: minimal tuner frequency, in 62.5 kHz step. + * Should be multiplied to 16 to convert to MHz. + * @stepsize: frequency step, in Hz. + * @initdata: optional byte sequence to initialize the tuner. + * @sleepdata: optional byte sequence to power down the tuner. + */ struct tunertype { char *name; unsigned int count; diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h index 6152434cbe82..1592d323c577 100644 --- a/include/media/v4l2-async.h +++ b/include/media/v4l2-async.h @@ -28,7 +28,7 @@ struct v4l2_async_notifier; * in order to identify a match * * @V4L2_ASYNC_MATCH_CUSTOM: Match will use the logic provided by &struct - * v4l2_async_subdev.match ops + * v4l2_async_subdev.match ops * @V4L2_ASYNC_MATCH_DEVNAME: Match will use the device name * @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node @@ -48,6 +48,31 @@ enum v4l2_async_match_type { * * @match_type: type of match that will be used * @match: union of per-bus type matching data sets + * @match.fwnode: + * pointer to &struct fwnode_handle to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE. + * @match.device_name: + * string containing the device name to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_DEVNAME. + * @match.i2c: embedded struct with I2C parameters to be matched. + * Both @match.i2c.adapter_id and @match.i2c.address + * should be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_I2C. + * @match.i2c.adapter_id: + * I2C adapter ID to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_I2C. + * @match.i2c.address: + * I2C address to be matched. + * Used if @match_type is %V4L2_ASYNC_MATCH_I2C. + * @match.custom: + * Driver-specific match criteria. + * Used if @match_type is %V4L2_ASYNC_MATCH_CUSTOM. + * @match.custom.match: + * Driver-specific match function to be used if + * %V4L2_ASYNC_MATCH_CUSTOM. + * @match.custom.priv: + * Driver-specific private struct with match parameters + * to be used if %V4L2_ASYNC_MATCH_CUSTOM. * @list: used to link struct v4l2_async_subdev objects, waiting to be * probed, to a notifier->waiting list * @@ -58,12 +83,8 @@ enum v4l2_async_match_type { struct v4l2_async_subdev { enum v4l2_async_match_type match_type; union { - struct { - struct fwnode_handle *fwnode; - } fwnode; - struct { - const char *name; - } device_name; + struct fwnode_handle *fwnode; + const char *device_name; struct { int adapter_id; unsigned short address; @@ -167,7 +188,7 @@ void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier); /** * v4l2_async_register_subdev - registers a sub-device to the asynchronous - * subdevice framework + * subdevice framework * * @sd: pointer to &struct v4l2_subdev */ @@ -197,7 +218,7 @@ int __must_check v4l2_async_register_subdev_sensor_common( /** * v4l2_async_unregister_subdev - unregisters a sub-device to the asynchronous - * subdevice framework + * subdevice framework * * @sd: pointer to &struct v4l2_subdev */ diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index aac8b7b6e691..e0d95a7c5d48 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -28,7 +28,7 @@ #include <media/v4l2-dev.h> -/* Common printk constucts for v4l-i2c drivers. These macros create a unique +/* Common printk constructs for v4l-i2c drivers. These macros create a unique prefix consisting of the driver name, the adapter number and the i2c address. */ #define v4l_printk(level, name, adapter, addr, fmt, arg...) \ @@ -50,7 +50,7 @@ /* These three macros assume that the debug level is set with a module parameter called 'debug'. */ #define v4l_dbg(level, debug, client, fmt, arg...) \ - do { \ + do { \ if (debug >= (level)) \ v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \ } while (0) @@ -80,9 +80,9 @@ /* These three macros assume that the debug level is set with a module parameter called 'debug'. */ #define v4l2_dbg(level, debug, dev, fmt, arg...) \ - do { \ + do { \ if (debug >= (level)) \ - v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \ + v4l2_printk(KERN_DEBUG, dev, fmt , ## arg); \ } while (0) /** @@ -127,7 +127,7 @@ struct v4l2_subdev_ops; * @client_type: name of the chip that's on the adapter. * @addr: I2C address. If zero, it will use @probe_addrs * @probe_addrs: array with a list of address. The last entry at such - * array should be %I2C_CLIENT_END. + * array should be %I2C_CLIENT_END. * * returns a &struct v4l2_subdev pointer. */ @@ -146,7 +146,7 @@ struct i2c_board_info; * @info: pointer to struct i2c_board_info used to replace the irq, * platform_data and addr arguments. * @probe_addrs: array with a list of address. The last entry at such - * array should be %I2C_CLIENT_END. + * array should be %I2C_CLIENT_END. * * returns a &struct v4l2_subdev pointer. */ @@ -174,17 +174,43 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, */ unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd); +/** + * enum v4l2_i2c_tuner_type - specifies the range of tuner address that + * should be used when seeking for I2C devices. + * + * @ADDRS_RADIO: Radio tuner addresses. + * Represent the following I2C addresses: + * 0x10 (if compiled with tea5761 support) + * and 0x60. + * @ADDRS_DEMOD: Demod tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a and 0x4b. + * @ADDRS_TV: TV tuner addresses. + * Represent the following I2C addresses: + * 0x42, 0x43, 0x4a, 0x4b, 0x60, 0x61, 0x62, + * 0x63 and 0x64. + * @ADDRS_TV_WITH_DEMOD: TV tuner addresses if demod is present, this + * excludes addresses used by the demodulator + * from the list of candidates. + * Represent the following I2C addresses: + * 0x60, 0x61, 0x62, 0x63 and 0x64. + * + * NOTE: All I2C addresses above use the 7-bit notation. + */ enum v4l2_i2c_tuner_type { - ADDRS_RADIO, /* Radio tuner addresses */ - ADDRS_DEMOD, /* Demod tuner addresses */ - ADDRS_TV, /* TV tuner addresses */ - /* TV tuner addresses if demod is present, this excludes - addresses used by the demodulator from the list of - candidates. */ + ADDRS_RADIO, + ADDRS_DEMOD, + ADDRS_TV, ADDRS_TV_WITH_DEMOD, }; -/* Return a list of I2C tuner addresses to probe. Use only if the tuner - addresses are unknown. */ +/** + * v4l2_i2c_tuner_addrs - Return a list of I2C tuner addresses to probe. + * + * @type: type of the tuner to seek, as defined by + * &enum v4l2_i2c_tuner_type. + * + * NOTE: Use only if the tuner addresses are unknown. + */ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type); /* ------------------------------------------------------------------------- */ @@ -224,10 +250,14 @@ void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, /* ------------------------------------------------------------------------- */ -/* Note: these remaining ioctls/structs should be removed as well, but they are - still used in tuner-simple.c (TUNER_SET_CONFIG), cx18/ivtv (RESET) and - v4l2-int-device.h (v4l2_routing). To remove these ioctls some more cleanup - is needed in those modules. */ +/* + * FIXME: these remaining ioctls/structs should be removed as well, but they + * are still used in tuner-simple.c (TUNER_SET_CONFIG) and cx18/ivtv (RESET). + * To remove these ioctls some more cleanup is needed in those modules. + * + * It doesn't make much sense on documenting them, as what we really want is + * to get rid of them. + */ /* s_config */ struct v4l2_priv_tun_config { @@ -236,32 +266,79 @@ struct v4l2_priv_tun_config { }; #define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) -#define VIDIOC_INT_RESET _IOW ('d', 102, u32) - -struct v4l2_routing { - u32 input; - u32 output; -}; +#define VIDIOC_INT_RESET _IOW ('d', 102, u32) /* ------------------------------------------------------------------------- */ /* Miscellaneous helper functions */ -void v4l_bound_align_image(unsigned int *w, unsigned int wmin, +/** + * v4l_bound_align_image - adjust video dimensions according to + * a given constraints. + * + * @width: pointer to width that will be adjusted if needed. + * @wmin: minimum width. + * @wmax: maximum width. + * @walign: least significant bit on width. + * @height: pointer to height that will be adjusted if needed. + * @hmin: minimum height. + * @hmax: maximum height. + * @halign: least significant bit on width. + * @salign: least significant bit for the image size (e. g. + * :math:`width * height`). + * + * Clip an image to have @width between @wmin and @wmax, and @height between + * @hmin and @hmax, inclusive. + * + * Additionally, the @width will be a multiple of :math:`2^{walign}`, + * the @height will be a multiple of :math:`2^{halign}`, and the overall + * size :math:`width * height` will be a multiple of :math:`2^{salign}`. + * + * .. note:: + * + * #. The clipping rectangle may be shrunk or enlarged to fit the alignment + * constraints. + * #. @wmax must not be smaller than @wmin. + * #. @hmax must not be smaller than @hmin. + * #. The alignments must not be so high there are no possible image + * sizes within the allowed bounds. + * #. @wmin and @hmin must be at least 1 (don't use 0). + * #. For @walign, @halign and @salign, if you don't care about a certain + * alignment, specify ``0``, as :math:`2^0 = 1` and one byte alignment + * is equivalent to no alignment. + * #. If you only want to adjust downward, specify a maximum that's the + * same as the initial value. + */ +void v4l_bound_align_image(unsigned int *width, unsigned int wmin, unsigned int wmax, unsigned int walign, - unsigned int *h, unsigned int hmin, + unsigned int *height, unsigned int hmin, unsigned int hmax, unsigned int halign, unsigned int salign); -struct v4l2_discrete_probe { - const struct v4l2_frmsize_discrete *sizes; - int num_sizes; -}; - -const struct v4l2_frmsize_discrete *v4l2_find_nearest_format( - const struct v4l2_discrete_probe *probe, - s32 width, s32 height); +/** + * v4l2_find_nearest_format - find the nearest format size among a discrete + * set of resolutions. + * + * @sizes: array of &struct v4l2_frmsize_discrete image sizes. + * @num_sizes: length of @sizes array. + * @width: desired width. + * @height: desired height. + * + * Finds the closest resolution to minimize the width and height differences + * between what requested and the supported resolutions. + */ +const struct v4l2_frmsize_discrete * +v4l2_find_nearest_format(const struct v4l2_frmsize_discrete *sizes, + const size_t num_sizes, + s32 width, s32 height); +/** + * v4l2_get_timestamp - helper routine to get a timestamp to be used when + * filling streaming metadata. Internally, it uses ktime_get_ts(), + * which is the recommended way to get it. + * + * @tv: pointer to &struct timeval to be filled. + */ void v4l2_get_timestamp(struct timeval *tv); #endif /* V4L2_COMMON_H_ */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index dacfe54057f8..05ebb9ef9e73 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -166,8 +166,15 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); * empty strings ("") correspond to non-existing menu items (this * is in addition to the menu_skip_mask above). The last entry * must be NULL. + * Used only if the @type is %V4L2_CTRL_TYPE_MENU. + * @qmenu_int: A 64-bit integer array for with integer menu items. + * The size of array must be equal to the menu size, e. g.: + * :math:`ceil(\frac{maximum - minimum}{step}) + 1`. + * Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU. * @flags: The control's flags. - * @cur: The control's current value. + * @cur: Structure to store the current value. + * @cur.val: The control's current value, if the @type is represented via + * a u32 integer (see &enum v4l2_ctrl_type). * @val: The control's new s32 value. * @priv: The control's private pointer. For use by the driver. It is * untouched by the control framework. Note that this pointer is @@ -1037,7 +1044,7 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, * @file: pointer to struct file * @wait: pointer to struct poll_table_struct */ -unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); +__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait); /* Helpers for ioctl_ops */ @@ -1139,7 +1146,7 @@ int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, /** * v4l2_ctrl_subdev_subscribe_event - Helper function to implement - * as a &struct v4l2_subdev_core_ops subscribe_event function + * as a &struct v4l2_subdev_core_ops subscribe_event function * that just subscribes control events. * * @sd: pointer to &struct v4l2_subdev diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 28a686eb7d09..53f32022fabe 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -21,31 +21,63 @@ #define VIDEO_MAJOR 81 -#define VFL_TYPE_GRABBER 0 -#define VFL_TYPE_VBI 1 -#define VFL_TYPE_RADIO 2 -#define VFL_TYPE_SUBDEV 3 -#define VFL_TYPE_SDR 4 -#define VFL_TYPE_TOUCH 5 -#define VFL_TYPE_MAX 6 - -/* Is this a receiver, transmitter or mem-to-mem? */ -/* Ignored for VFL_TYPE_SUBDEV. */ -#define VFL_DIR_RX 0 -#define VFL_DIR_TX 1 -#define VFL_DIR_M2M 2 +/** + * enum vfl_devnode_type - type of V4L2 device node + * + * @VFL_TYPE_GRABBER: for video input/output devices + * @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext) + * @VFL_TYPE_RADIO: for radio tuners + * @VFL_TYPE_SUBDEV: for V4L2 subdevices + * @VFL_TYPE_SDR: for Software Defined Radio tuners + * @VFL_TYPE_TOUCH: for touch sensors + */ +enum vfl_devnode_type { + VFL_TYPE_GRABBER = 0, + VFL_TYPE_VBI = 1, + VFL_TYPE_RADIO = 2, + VFL_TYPE_SUBDEV = 3, + VFL_TYPE_SDR = 4, + VFL_TYPE_TOUCH = 5, +}; +#define VFL_TYPE_MAX VFL_TYPE_TOUCH + +/** + * enum vfl_direction - Identifies if a &struct video_device corresponds + * to a receiver, a transmitter or a mem-to-mem device. + * + * @VFL_DIR_RX: device is a receiver. + * @VFL_DIR_TX: device is a transmitter. + * @VFL_DIR_M2M: device is a memory to memory device. + * + * Note: Ignored if &enum vfl_devnode_type is %VFL_TYPE_SUBDEV. + */ +enum vfl_devnode_direction { + VFL_DIR_RX, + VFL_DIR_TX, + VFL_DIR_M2M, +}; struct v4l2_ioctl_callbacks; struct video_device; struct v4l2_device; struct v4l2_ctrl_handler; -/* Flag to mark the video_device struct as registered. - Drivers can clear this flag if they want to block all future - device access. It is cleared by video_unregister_device. */ -#define V4L2_FL_REGISTERED (0) -/* file->private_data points to struct v4l2_fh */ -#define V4L2_FL_USES_V4L2_FH (1) +/** + * enum v4l2_video_device_flags - Flags used by &struct video_device + * + * @V4L2_FL_REGISTERED: + * indicates that a &struct video_device is registered. + * Drivers can clear this flag if they want to block all future + * device access. It is cleared by video_unregister_device. + * @V4L2_FL_USES_V4L2_FH: + * indicates that file->private_data points to &struct v4l2_fh. + * This flag is set by the core when v4l2_fh_init() is called. + * All new drivers should use it. + */ +enum v4l2_video_device_flags { + V4L2_FL_REGISTERED = 0, + V4L2_FL_USES_V4L2_FH = 1, +}; /* Priority helper functions */ @@ -152,7 +184,7 @@ struct v4l2_file_operations { struct module *owner; ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); - unsigned int (*poll) (struct file *, struct poll_table_struct *); + __poll_t (*poll) (struct file *, struct poll_table_struct *); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); #ifdef CONFIG_COMPAT long (*compat_ioctl32) (struct file *, unsigned int, unsigned long); @@ -166,7 +198,7 @@ struct v4l2_file_operations { /* * Newer version of video_device, handled by videodev2.c - * This version moves redundant code from video device code to + * This version moves redundant code from video device code to * the common handler */ @@ -189,11 +221,12 @@ struct v4l2_file_operations { * @prio: pointer to &struct v4l2_prio_state with device's Priority state. * If NULL, then v4l2_dev->prio will be used. * @name: video device name - * @vfl_type: V4L device type + * @vfl_type: V4L device type, as defined by &enum vfl_devnode_type * @vfl_dir: V4L receiver, transmitter or m2m * @minor: device node 'minor'. It is set to -1 if the registration failed * @num: number of the video device node - * @flags: video device flags. Use bitops to set/clear/test flags + * @flags: video device flags. Use bitops to set/clear/test flags. + * Contains a set of &enum v4l2_video_device_flags. * @index: attribute to differentiate multiple indices on one physical device * @fh_lock: Lock for all v4l2_fhs * @fh_list: List of &struct v4l2_fh @@ -237,8 +270,8 @@ struct video_device /* device info */ char name[32]; - int vfl_type; - int vfl_dir; + enum vfl_devnode_type vfl_type; + enum vfl_devnode_direction vfl_dir; int minor; u16 num; unsigned long flags; @@ -261,18 +294,30 @@ struct video_device struct mutex *lock; }; -#define media_entity_to_video_device(__e) \ - container_of(__e, struct video_device, entity) -/* dev to video-device */ +/** + * media_entity_to_video_device - Returns a &struct video_device from + * the &struct media_entity embedded on it. + * + * @entity: pointer to &struct media_entity + */ +#define media_entity_to_video_device(entity) \ + container_of(entity, struct video_device, entity) + +/** + * to_video_device - Returns a &struct video_device from the + * &struct device embedded on it. + * + * @cd: pointer to &struct device + */ #define to_video_device(cd) container_of(cd, struct video_device, dev) /** * __video_register_device - register video4linux devices * * @vdev: struct video_device to register - * @type: type of device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type * @nr: which device node number is desired: - * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) * @warn_if_nr_in_use: warn if the desired device node number * was already in use and another number was chosen instead. * @owner: module that owns the video device node @@ -289,43 +334,37 @@ struct video_device * * Returns 0 on success. * - * Valid values for @type are: - * - * - %VFL_TYPE_GRABBER - A frame grabber - * - %VFL_TYPE_VBI - Vertical blank data (undecoded) - * - %VFL_TYPE_RADIO - A radio card - * - %VFL_TYPE_SUBDEV - A subdevice - * - %VFL_TYPE_SDR - Software Defined Radio - * - %VFL_TYPE_TOUCH - A touch sensor - * * .. note:: * * This function is meant to be used only inside the V4L2 core. * Drivers should use video_register_device() or * video_register_device_no_warn(). */ -int __must_check __video_register_device(struct video_device *vdev, int type, - int nr, int warn_if_nr_in_use, struct module *owner); +int __must_check __video_register_device(struct video_device *vdev, + enum vfl_devnode_type type, + int nr, int warn_if_nr_in_use, + struct module *owner); /** * video_register_device - register video4linux devices * * @vdev: struct video_device to register - * @type: type of device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type * @nr: which device node number is desired: - * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) * * Internally, it calls __video_register_device(). Please see its * documentation for more details. * * .. note:: - * if video_register_device fails, the release() callback of + * if video_register_device fails, the release() callback of * &struct video_device structure is *not* called, so the caller * is responsible for freeing any data. Usually that means that * you video_device_release() should be called on failure. */ static inline int __must_check video_register_device(struct video_device *vdev, - int type, int nr) + enum vfl_devnode_type type, + int nr) { return __video_register_device(vdev, type, nr, 1, vdev->fops->owner); } @@ -334,9 +373,9 @@ static inline int __must_check video_register_device(struct video_device *vdev, * video_register_device_no_warn - register video4linux devices * * @vdev: struct video_device to register - * @type: type of device to register + * @type: type of device to register, as defined by &enum vfl_devnode_type * @nr: which device node number is desired: - * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) + * (0 == /dev/video0, 1 == /dev/video1, ..., -1 == first free) * * This function is identical to video_register_device() except that no * warning is issued if the desired device node number was already in use. @@ -345,13 +384,14 @@ static inline int __must_check video_register_device(struct video_device *vdev, * documentation for more details. * * .. note:: - * if video_register_device fails, the release() callback of + * if video_register_device fails, the release() callback of * &struct video_device structure is *not* called, so the caller * is responsible for freeing any data. Usually that means that * you video_device_release() should be called on failure. */ -static inline int __must_check video_register_device_no_warn( - struct video_device *vdev, int type, int nr) +static inline int __must_check +video_register_device_no_warn(struct video_device *vdev, + enum vfl_devnode_type type, int nr) { return __video_register_device(vdev, type, nr, 0, vdev->fops->owner); } @@ -383,7 +423,7 @@ void video_device_release(struct video_device *vdev); /** * video_device_release_empty - helper function to implement the - * video_device->release\(\) callback. + * video_device->release\(\) callback. * * @vdev: pointer to &struct video_device * diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h index 8ffa94009d1a..0c9e4da55499 100644 --- a/include/media/v4l2-device.h +++ b/include/media/v4l2-device.h @@ -38,7 +38,7 @@ struct v4l2_ctrl_handler; * @lock: lock this struct; can be used by the driver as well * if this struct is embedded into a larger struct. * @name: unique device name, by default the driver name + bus ID - * @notify: notify callback called by some sub-devices. + * @notify: notify operation called by some sub-devices. * @ctrl_handler: The control handler. May be %NULL. * @prio: Device's priority state * @ref: Keep track of the references to this struct. @@ -56,7 +56,6 @@ struct v4l2_ctrl_handler; * #) @dev->driver_data points to this struct. * #) @dev might be %NULL if there is no parent device */ - struct v4l2_device { struct device *dev; #if defined(CONFIG_MEDIA_CONTROLLER) @@ -166,7 +165,7 @@ void v4l2_device_unregister(struct v4l2_device *v4l2_dev); * v4l2_device_register_subdev - Registers a subdev with a v4l2 device. * * @v4l2_dev: pointer to struct &v4l2_device - * @sd: pointer to struct &v4l2_subdev + * @sd: pointer to &struct v4l2_subdev * * While registered, the subdev module is marked as in-use. * @@ -179,7 +178,7 @@ int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, /** * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device. * - * @sd: pointer to struct &v4l2_subdev + * @sd: pointer to &struct v4l2_subdev * * .. note :: * @@ -201,7 +200,7 @@ v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev); /** * v4l2_subdev_notify - Sends a notification to v4l2_device. * - * @sd: pointer to struct &v4l2_subdev + * @sd: pointer to &struct v4l2_subdev * @notification: type of notification. Please notice that the notification * type is driver-specific. * @arg: arguments for the notification. Those are specific to each @@ -214,13 +213,43 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, sd->v4l2_dev->notify(sd, notification, arg); } -/* Iterate over all subdevs. */ +/* Helper macros to iterate over all subdevs. */ + +/** + * v4l2_device_for_each_subdev - Helper macro that interates over all + * sub-devices of a given &v4l2_device. + * + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev pointer used as an iterator by the loop. + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * + * This macro iterates over all sub-devices owned by the @v4l2_dev device. + * It acts as a for loop iterator and executes the next statement with + * the @sd variable pointing to each sub-device in turn. + */ #define v4l2_device_for_each_subdev(sd, v4l2_dev) \ list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) -/* Call the specified callback for all subdevs matching the condition. - Ignore any errors. Note that you cannot add or delete a subdev - while walking the subdevs list. */ +/** + * __v4l2_device_call_subdevs_p - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev pointer used as an iterator by the loop. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args...: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ #define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \ do { \ list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \ @@ -228,6 +257,24 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, (sd)->ops->o->f((sd) , ##args); \ } while (0) +/** + * __v4l2_device_call_subdevs - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args...: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ #define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \ do { \ struct v4l2_subdev *__sd; \ @@ -236,10 +283,30 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, f , ##args); \ } while (0) -/* Call the specified callback for all subdevs matching the condition. - If the callback returns an error other than 0 or -ENOIOCTLCMD, then - return with that error code. Note that you cannot add or delete a - subdev while walking the subdevs list. */ +/** + * __v4l2_device_call_subdevs_until_err_p - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @sd: pointer that will be filled by the macro with all + * &struct v4l2_subdev sub-devices associated with @v4l2_dev. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args...: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, zero + * otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ #define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \ ({ \ long __err = 0; \ @@ -253,6 +320,28 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, (__err == -ENOIOCTLCMD) ? 0 : __err; \ }) +/** + * __v4l2_device_call_subdevs_until_err - Calls the specified operation for + * all subdevs matching the condition. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @cond: condition to be match + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args...: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ #define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \ ({ \ struct v4l2_subdev *__sd; \ @@ -260,9 +349,26 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, f , ##args); \ }) -/* Call the specified callback for all subdevs matching grp_id (if 0, then - match them all). Ignore any errors. Note that you cannot add or delete - a subdev while walking the subdevs list. */ +/** + * v4l2_device_call_all - Calls the specified operation for + * all subdevs matching the &v4l2_subdev.grp_id, as assigned + * by the bridge driver. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args...: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ #define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \ do { \ struct v4l2_subdev *__sd; \ @@ -272,10 +378,30 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, ##args); \ } while (0) -/* Call the specified callback for all subdevs matching grp_id (if 0, then - match them all). If the callback returns an error other than 0 or - -ENOIOCTLCMD, then return with that error code. Note that you cannot - add or delete a subdev while walking the subdevs list. */ +/** + * v4l2_device_call_until_err - Calls the specified operation for + * all subdevs matching the &v4l2_subdev.grp_id, as assigned + * by the bridge driver, until an error occurs. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args...: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. + */ #define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \ ({ \ struct v4l2_subdev *__sd; \ @@ -284,10 +410,24 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, ##args); \ }) -/* - * Call the specified callback for all subdevs where grp_id & grpmsk != 0 - * (if grpmsk == `0, then match them all). Ignore any errors. Note that you - * cannot add or delete a subdev while walking the subdevs list. +/** + * v4l2_device_mask_call_all - Calls the specified operation for + * all subdevices where a group ID matches a specified bitmask. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args...: arguments for @f. + * + * Ignore any errors. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. */ #define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \ do { \ @@ -298,11 +438,28 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, ##args); \ } while (0) -/* - * Call the specified callback for all subdevs where grp_id & grpmsk != 0 - * (if grpmsk == 0, then match them all). If the callback returns an error - * other than 0 or %-ENOIOCTLCMD, then return with that error code. Note that - * you cannot add or delete a subdev while walking the subdevs list. +/** + * v4l2_device_mask_call_until_err - Calls the specified operation for + * all subdevices where a group ID matches a specified bitmask. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args...: arguments for @f. + * + * Return: + * + * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` + * for any subdevice, then abort and return with that error code, + * zero otherwise. + * + * Note: subdevs cannot be added or deleted while walking + * the subdevs list. */ #define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \ ({ \ @@ -312,9 +469,19 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, ##args); \ }) -/* - * Does any subdev with matching grpid (or all if grpid == 0) has the given - * op? + +/** + * v4l2_device_has_op - checks if any subdev with matching grpid has a + * given ops. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpid: &struct v4l2_subdev->grp_id group ID to match. + * Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. */ #define v4l2_device_has_op(v4l2_dev, grpid, o, f) \ ({ \ @@ -331,9 +498,18 @@ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, __result; \ }) -/* - * Does any subdev with matching grpmsk (or all if grpmsk == 0) has the given - * op? +/** + * v4l2_device_mask_has_op - checks if any subdev with matching group + * mask has a given ops. + * + * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. + * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id + * group ID to be matched. Use 0 to match them all. + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of operations functions. + * @f: operation function that will be called if @cond matches. + * The operation functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. */ #define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \ ({ \ diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h index 61a18893e004..ebf00e07a515 100644 --- a/include/media/v4l2-dv-timings.h +++ b/include/media/v4l2-dv-timings.h @@ -203,13 +203,15 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait); */ struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t); -/* - * reduce_fps - check if conditions for reduced fps are true. - * bt - v4l2 timing structure - * For different timings reduced fps is allowed if following conditions - * are met - - * For CVT timings: if reduced blanking v2 (vsync == 8) is true. - * For CEA861 timings: if V4L2_DV_FL_CAN_REDUCE_FPS flag is true. +/** + * can_reduce_fps - check if conditions for reduced fps are true. + * @bt: v4l2 timing structure + * + * For different timings reduced fps is allowed if the following conditions + * are met: + * + * - For CVT timings: if reduced blanking v2 (vsync == 8) is true. + * - For CEA861 timings: if %V4L2_DV_FL_CAN_REDUCE_FPS flag is true. */ static inline bool can_reduce_fps(struct v4l2_bt_timings *bt) { diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h index 6741910c3a18..17833e886e11 100644 --- a/include/media/v4l2-event.h +++ b/include/media/v4l2-event.h @@ -24,40 +24,6 @@ #include <linux/videodev2.h> #include <linux/wait.h> -/* - * Overview: - * - * Events are subscribed per-filehandle. An event specification consists of a - * type and is optionally associated with an object identified through the - * 'id' field. So an event is uniquely identified by the (type, id) tuple. - * - * The v4l2-fh struct has a list of subscribed events. The v4l2_subscribed_event - * struct is added to that list, one for every subscribed event. - * - * Each v4l2_subscribed_event struct ends with an array of v4l2_kevent structs. - * This array (ringbuffer, really) is used to store any events raised by the - * driver. The v4l2_kevent struct links into the 'available' list of the - * v4l2_fh struct so VIDIOC_DQEVENT will know which event to dequeue first. - * - * Finally, if the event subscription is associated with a particular object - * such as a V4L2 control, then that object needs to know about that as well - * so that an event can be raised by that object. So the 'node' field can - * be used to link the v4l2_subscribed_event struct into a list of that - * object. - * - * So to summarize: - * - * struct v4l2_fh has two lists: one of the subscribed events, and one of the - * pending events. - * - * struct v4l2_subscribed_event has a ringbuffer of raised (pending) events of - * that particular type. - * - * If struct v4l2_subscribed_event is associated with a specific object, then - * that object will have an internal list of struct v4l2_subscribed_event so - * it knows who subscribed an event to that object. - */ - struct v4l2_fh; struct v4l2_subdev; struct v4l2_subscribed_event; @@ -218,7 +184,7 @@ int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_event_subscription *sub); /** * v4l2_src_change_event_subscribe - helper function that calls - * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE. + * v4l2_event_subscribe() if the event is %V4L2_EVENT_SOURCE_CHANGE. * * @fh: pointer to struct v4l2_fh * @sub: pointer to &struct v4l2_event_subscription diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h index 5c1d50f78e12..0a5e4518ca11 100644 --- a/include/media/v4l2-flash-led-class.h +++ b/include/media/v4l2-flash-led-class.h @@ -91,12 +91,24 @@ struct v4l2_flash { struct v4l2_ctrl **ctrls; }; +/** + * v4l2_subdev_to_v4l2_flash - Returns a &struct v4l2_flash from the + * &struct v4l2_subdev embedded on it. + * + * @sd: pointer to &struct v4l2_subdev + */ static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash( struct v4l2_subdev *sd) { return container_of(sd, struct v4l2_flash, sd); } +/** + * v4l2_ctrl_to_v4l2_flash - Returns a &struct v4l2_flash from the + * &struct v4l2_ctrl embedded on it. + * + * @c: pointer to &struct v4l2_ctrl + */ static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c) { return container_of(c->handler, struct v4l2_flash, hdl); diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h index b5b465677d28..c228ec1c77cf 100644 --- a/include/media/v4l2-fwnode.h +++ b/include/media/v4l2-fwnode.h @@ -81,7 +81,17 @@ struct v4l2_fwnode_bus_mipi_csi1 { * struct v4l2_fwnode_endpoint - the endpoint data structure * @base: fwnode endpoint of the v4l2_fwnode * @bus_type: bus type - * @bus: bus configuration data structure + * @bus: union with bus configuration data structure + * @bus.parallel: embedded &struct v4l2_fwnode_bus_parallel. + * Used if the bus is parallel. + * @bus.mipi_csi1: embedded &struct v4l2_fwnode_bus_mipi_csi1. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 1 (MIPI CSI1) or Standard + * Mobile Imaging Architecture's Compact Camera Port 2 + * (SMIA CCP2). + * @bus.mipi_csi2: embedded &struct v4l2_fwnode_bus_mipi_csi2. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 2 (MIPI CSI2). * @link_frequencies: array of supported link frequencies * @nr_of_link_frequencies: number of elements in link_frequenccies array */ diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 93f8afcb7a22..4d8626c468bc 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -12,6 +12,8 @@ #define V4L2_MEDIABUS_H #include <linux/v4l2-mediabus.h> +#include <linux/bitops.h> + /* Parallel flags */ /* @@ -20,44 +22,44 @@ * horizontal and vertical synchronisation. In "Slave mode" the host is * providing these signals to the slave. */ -#define V4L2_MBUS_MASTER (1 << 0) -#define V4L2_MBUS_SLAVE (1 << 1) +#define V4L2_MBUS_MASTER BIT(0) +#define V4L2_MBUS_SLAVE BIT(1) /* * Signal polarity flags * Note: in BT.656 mode HSYNC, FIELD, and VSYNC are unused * V4L2_MBUS_[HV]SYNC* flags should be also used for specifying * configuration of hardware that uses [HV]REF signals */ -#define V4L2_MBUS_HSYNC_ACTIVE_HIGH (1 << 2) -#define V4L2_MBUS_HSYNC_ACTIVE_LOW (1 << 3) -#define V4L2_MBUS_VSYNC_ACTIVE_HIGH (1 << 4) -#define V4L2_MBUS_VSYNC_ACTIVE_LOW (1 << 5) -#define V4L2_MBUS_PCLK_SAMPLE_RISING (1 << 6) -#define V4L2_MBUS_PCLK_SAMPLE_FALLING (1 << 7) -#define V4L2_MBUS_DATA_ACTIVE_HIGH (1 << 8) -#define V4L2_MBUS_DATA_ACTIVE_LOW (1 << 9) +#define V4L2_MBUS_HSYNC_ACTIVE_HIGH BIT(2) +#define V4L2_MBUS_HSYNC_ACTIVE_LOW BIT(3) +#define V4L2_MBUS_VSYNC_ACTIVE_HIGH BIT(4) +#define V4L2_MBUS_VSYNC_ACTIVE_LOW BIT(5) +#define V4L2_MBUS_PCLK_SAMPLE_RISING BIT(6) +#define V4L2_MBUS_PCLK_SAMPLE_FALLING BIT(7) +#define V4L2_MBUS_DATA_ACTIVE_HIGH BIT(8) +#define V4L2_MBUS_DATA_ACTIVE_LOW BIT(9) /* FIELD = 0/1 - Field1 (odd)/Field2 (even) */ -#define V4L2_MBUS_FIELD_EVEN_HIGH (1 << 10) +#define V4L2_MBUS_FIELD_EVEN_HIGH BIT(10) /* FIELD = 1/0 - Field1 (odd)/Field2 (even) */ -#define V4L2_MBUS_FIELD_EVEN_LOW (1 << 11) +#define V4L2_MBUS_FIELD_EVEN_LOW BIT(11) /* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */ -#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH (1 << 12) -#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW (1 << 13) +#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12) +#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13) /* Serial flags */ /* How many lanes the client can use */ -#define V4L2_MBUS_CSI2_1_LANE (1 << 0) -#define V4L2_MBUS_CSI2_2_LANE (1 << 1) -#define V4L2_MBUS_CSI2_3_LANE (1 << 2) -#define V4L2_MBUS_CSI2_4_LANE (1 << 3) +#define V4L2_MBUS_CSI2_1_LANE BIT(0) +#define V4L2_MBUS_CSI2_2_LANE BIT(1) +#define V4L2_MBUS_CSI2_3_LANE BIT(2) +#define V4L2_MBUS_CSI2_4_LANE BIT(3) /* On which channels it can send video data */ -#define V4L2_MBUS_CSI2_CHANNEL_0 (1 << 4) -#define V4L2_MBUS_CSI2_CHANNEL_1 (1 << 5) -#define V4L2_MBUS_CSI2_CHANNEL_2 (1 << 6) -#define V4L2_MBUS_CSI2_CHANNEL_3 (1 << 7) +#define V4L2_MBUS_CSI2_CHANNEL_0 BIT(4) +#define V4L2_MBUS_CSI2_CHANNEL_1 BIT(5) +#define V4L2_MBUS_CSI2_CHANNEL_2 BIT(6) +#define V4L2_MBUS_CSI2_CHANNEL_3 BIT(7) /* Does it support only continuous or also non-continuous clock mode */ -#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK (1 << 8) -#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK (1 << 9) +#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK BIT(8) +#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(9) #define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | V4L2_MBUS_CSI2_2_LANE | \ V4L2_MBUS_CSI2_3_LANE | V4L2_MBUS_CSI2_4_LANE) @@ -91,6 +93,13 @@ struct v4l2_mbus_config { unsigned int flags; }; +/** + * v4l2_fill_pix_format - Ancillary routine that fills a &struct + * v4l2_pix_format fields from a &struct v4l2_mbus_framefmt. + * + * @pix_fmt: pointer to &struct v4l2_pix_format to be filled + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model + */ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, const struct v4l2_mbus_framefmt *mbus_fmt) { @@ -103,6 +112,15 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, pix_fmt->xfer_func = mbus_fmt->xfer_func; } +/** + * v4l2_fill_pix_format - Ancillary routine that fills a &struct + * v4l2_mbus_framefmt from a &struct v4l2_pix_format and a + * data format code. + * + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled + * @pix_fmt: pointer to &struct v4l2_pix_format to be used as model + * @code: data format code (from &enum v4l2_mbus_pixelcode) + */ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, const struct v4l2_pix_format *pix_fmt, u32 code) @@ -117,6 +135,13 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, mbus_fmt->code = code; } +/** + * v4l2_fill_pix_format - Ancillary routine that fills a &struct + * v4l2_pix_format_mplane fields from a media bus structure. + * + * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be filled + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be used as model + */ static inline void v4l2_fill_pix_format_mplane( struct v4l2_pix_format_mplane *pix_mp_fmt, const struct v4l2_mbus_framefmt *mbus_fmt) @@ -130,6 +155,13 @@ static inline void v4l2_fill_pix_format_mplane( pix_mp_fmt->xfer_func = mbus_fmt->xfer_func; } +/** + * v4l2_fill_pix_format - Ancillary routine that fills a &struct + * v4l2_mbus_framefmt from a &struct v4l2_pix_format_mplane. + * + * @mbus_fmt: pointer to &struct v4l2_mbus_framefmt to be filled + * @pix_mp_fmt: pointer to &struct v4l2_pix_format_mplane to be used as model + */ static inline void v4l2_fill_mbus_format_mplane( struct v4l2_mbus_framefmt *mbus_fmt, const struct v4l2_pix_format_mplane *pix_mp_fmt) diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index e157d5c9b224..3d07ba3a8262 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -297,7 +297,7 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, * indicate that a non-blocking write can be performed, while read will be * returned in case of the destination queue. */ -unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, +__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct poll_table_struct *wait); /** @@ -601,7 +601,7 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh, int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma); -unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait); +__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait); #endif /* _MEDIA_V4L2_MEM2MEM_H */ diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index ec399c770301..980a86c08fce 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -108,22 +108,31 @@ struct v4l2_decode_vbi_line { * not yet implemented) since ops provide proper type-checking. */ -/* Subdevice external IO pin configuration */ -#define V4L2_SUBDEV_IO_PIN_DISABLE (1 << 0) /* ENABLE assumed */ -#define V4L2_SUBDEV_IO_PIN_OUTPUT (1 << 1) -#define V4L2_SUBDEV_IO_PIN_INPUT (1 << 2) -#define V4L2_SUBDEV_IO_PIN_SET_VALUE (1 << 3) /* Set output value */ -#define V4L2_SUBDEV_IO_PIN_ACTIVE_LOW (1 << 4) /* ACTIVE HIGH assumed */ +/** + * enum v4l2_subdev_io_pin_bits - Subdevice external IO pin configuration + * bits + * + * @V4L2_SUBDEV_IO_PIN_DISABLE: disables a pin config. ENABLE assumed. + * @V4L2_SUBDEV_IO_PIN_OUTPUT: set it if pin is an output. + * @V4L2_SUBDEV_IO_PIN_INPUT: set it if pin is an input. + * @V4L2_SUBDEV_IO_PIN_SET_VALUE: to set the output value via + * &struct v4l2_subdev_io_pin_config->value. + * @V4L2_SUBDEV_IO_PIN_ACTIVE_LOW: pin active is bit 0. + * Otherwise, ACTIVE HIGH is assumed. + */ +enum v4l2_subdev_io_pin_bits { + V4L2_SUBDEV_IO_PIN_DISABLE = 0, + V4L2_SUBDEV_IO_PIN_OUTPUT = 1, + V4L2_SUBDEV_IO_PIN_INPUT = 2, + V4L2_SUBDEV_IO_PIN_SET_VALUE = 3, + V4L2_SUBDEV_IO_PIN_ACTIVE_LOW = 4, +}; /** * struct v4l2_subdev_io_pin_config - Subdevice external IO pin configuration * - * @flags: bitmask with flags for this pin's config: - * %V4L2_SUBDEV_IO_PIN_DISABLE - disables a pin config, - * %V4L2_SUBDEV_IO_PIN_OUTPUT - if pin is an output, - * %V4L2_SUBDEV_IO_PIN_INPUT - if pin is an input, - * %V4L2_SUBDEV_IO_PIN_SET_VALUE - to set the output value via @value - * and %V4L2_SUBDEV_IO_PIN_ACTIVE_LOW - if active is 0. + * @flags: bitmask with flags for this pin's config, whose bits are defined by + * &enum v4l2_subdev_io_pin_bits. * @pin: Chip external IO pin to configure * @function: Internal signal pad/function to route to IO pin * @value: Initial value for pin - e.g. GPIO output value @@ -140,7 +149,7 @@ struct v4l2_subdev_io_pin_config { /** * struct v4l2_subdev_core_ops - Define core ops callbacks for subdevs * - * @log_status: callback for %VIDIOC_LOG_STATUS ioctl handler code. + * @log_status: callback for VIDIOC_LOG_STATUS() ioctl handler code. * * @s_io_pin_config: configure one or more chip I/O pins for chips that * multiplex different internal signal pads out to IO pins. This function @@ -168,9 +177,9 @@ struct v4l2_subdev_io_pin_config { * @compat_ioctl32: called when a 32 bits application uses a 64 bits Kernel, * in order to fix data passed from/to userspace. * - * @g_register: callback for %VIDIOC_G_REGISTER ioctl handler code. + * @g_register: callback for VIDIOC_DBG_G_REGISTER() ioctl handler code. * - * @s_register: callback for %VIDIOC_G_REGISTER ioctl handler code. + * @s_register: callback for VIDIOC_DBG_S_REGISTER() ioctl handler code. * * @s_power: puts subdevice in power saving mode (on == 0) or normal operation * mode (on == 1). @@ -215,29 +224,48 @@ struct v4l2_subdev_core_ops { * struct v4l2_subdev_tuner_ops - Callbacks used when v4l device was opened * in radio mode. * - * @s_radio: callback for %VIDIOC_S_RADIO ioctl handler code. + * @s_radio: callback that switches the tuner to radio mode. + * drivers should explicitly call it when a tuner ops should + * operate on radio mode, before being able to handle it. + * Used on devices that have both AM/FM radio receiver and TV. * - * @s_frequency: callback for %VIDIOC_S_FREQUENCY ioctl handler code. + * @s_frequency: callback for VIDIOC_S_FREQUENCY() ioctl handler code. * - * @g_frequency: callback for %VIDIOC_G_FREQUENCY ioctl handler code. + * @g_frequency: callback for VIDIOC_G_FREQUENCY() ioctl handler code. * freq->type must be filled in. Normally done by video_ioctl2() * or the bridge driver. * - * @enum_freq_bands: callback for %VIDIOC_ENUM_FREQ_BANDS ioctl handler code. + * @enum_freq_bands: callback for VIDIOC_ENUM_FREQ_BANDS() ioctl handler code. * - * @g_tuner: callback for %VIDIOC_G_TUNER ioctl handler code. + * @g_tuner: callback for VIDIOC_G_TUNER() ioctl handler code. * - * @s_tuner: callback for %VIDIOC_S_TUNER ioctl handler code. @vt->type must be + * @s_tuner: callback for VIDIOC_S_TUNER() ioctl handler code. @vt->type must be * filled in. Normally done by video_ioctl2 or the * bridge driver. * - * @g_modulator: callback for %VIDIOC_G_MODULATOR ioctl handler code. + * @g_modulator: callback for VIDIOC_G_MODULATOR() ioctl handler code. * - * @s_modulator: callback for %VIDIOC_S_MODULATOR ioctl handler code. + * @s_modulator: callback for VIDIOC_S_MODULATOR() ioctl handler code. * * @s_type_addr: sets tuner type and its I2C addr. * * @s_config: sets tda9887 specific stuff, like port1, port2 and qss + * + * .. note:: + * + * On devices that have both AM/FM and TV, it is up to the driver + * to explicitly call s_radio when the tuner should be switched to + * radio mode, before handling other &struct v4l2_subdev_tuner_ops + * that would require it. An example of such usage is:: + * + * static void s_frequency(void *priv, const struct v4l2_frequency *f) + * { + * ... + * if (f.type == V4L2_TUNER_RADIO) + * v4l2_device_call_all(v4l2_dev, 0, tuner, s_radio); + * ... + * v4l2_device_call_all(v4l2_dev, 0, tuner, s_frequency); + * } */ struct v4l2_subdev_tuner_ops { int (*s_radio)(struct v4l2_subdev *sd); @@ -285,25 +313,32 @@ struct v4l2_subdev_audio_ops { int (*s_stream)(struct v4l2_subdev *sd, int enable); }; -/* Indicates the @length field specifies maximum data length. */ -#define V4L2_MBUS_FRAME_DESC_FL_LEN_MAX (1U << 0) -/* - * Indicates that the format does not have line offsets, i.e. the - * receiver should use 1D DMA. +/** + * enum v4l2_mbus_frame_desc_entry - media bus frame description flags + * + * @V4L2_MBUS_FRAME_DESC_FL_LEN_MAX: + * Indicates that &struct v4l2_mbus_frame_desc_entry->length field + * specifies maximum data length. + * @V4L2_MBUS_FRAME_DESC_FL_BLOB: + * Indicates that the format does not have line offsets, i.e. + * the receiver should use 1D DMA. */ -#define V4L2_MBUS_FRAME_DESC_FL_BLOB (1U << 1) +enum v4l2_mbus_frame_desc_flags { + V4L2_MBUS_FRAME_DESC_FL_LEN_MAX = BIT(0), + V4L2_MBUS_FRAME_DESC_FL_BLOB = BIT(1), +}; /** * struct v4l2_mbus_frame_desc_entry - media bus frame description structure * - * @flags: bitmask flags: %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX and - * %V4L2_MBUS_FRAME_DESC_FL_BLOB. - * @pixelcode: media bus pixel code, valid if FRAME_DESC_FL_BLOB is not set - * @length: number of octets per frame, valid if V4L2_MBUS_FRAME_DESC_FL_BLOB - * is set + * @flags: bitmask flags, as defined by &enum v4l2_mbus_frame_desc_flags. + * @pixelcode: media bus pixel code, valid if @flags + * %FRAME_DESC_FL_BLOB is not set. + * @length: number of octets per frame, valid if @flags + * %V4L2_MBUS_FRAME_DESC_FL_LEN_MAX is set. */ struct v4l2_mbus_frame_desc_entry { - u16 flags; + enum v4l2_mbus_frame_desc_flags flags; u32 pixelcode; u32 length; }; @@ -332,9 +367,9 @@ struct v4l2_mbus_frame_desc { * regarding clock frequency dividers, etc. If not used, then set flags * to 0. If the frequency is not supported, then -EINVAL is returned. * - * @g_std: callback for %VIDIOC_G_STD ioctl handler code. + * @g_std: callback for VIDIOC_G_STD() ioctl handler code. * - * @s_std: callback for %VIDIOC_S_STD ioctl handler code. + * @s_std: callback for VIDIOC_S_STD() ioctl handler code. * * @s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by * video input devices. @@ -342,7 +377,7 @@ struct v4l2_mbus_frame_desc { * @g_std_output: get current standard for video OUTPUT devices. This is ignored * by video input devices. * - * @querystd: callback for %VIDIOC_QUERYSTD ioctl handler code. + * @querystd: callback for VIDIOC_QUERYSTD() ioctl handler code. * * @g_tvnorms: get &v4l2_std_id with all standards supported by the video * CAPTURE device. This is ignored by video output devices. @@ -358,13 +393,15 @@ struct v4l2_mbus_frame_desc { * * @g_pixelaspect: callback to return the pixelaspect ratio. * - * @g_parm: callback for %VIDIOC_G_PARM ioctl handler code. + * @g_parm: callback for VIDIOC_G_PARM() ioctl handler code. * - * @s_parm: callback for %VIDIOC_S_PARM ioctl handler code. + * @s_parm: callback for VIDIOC_S_PARM() ioctl handler code. * - * @g_frame_interval: callback for %VIDIOC_G_FRAMEINTERVAL ioctl handler code. + * @g_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL() + * ioctl handler code. * - * @s_frame_interval: callback for %VIDIOC_S_FRAMEINTERVAL ioctl handler code. + * @s_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL() + * ioctl handler code. * * @s_dv_timings: Set custom dv timings in the sub device. This is used * when sub device is capable of setting detailed timing information @@ -372,7 +409,7 @@ struct v4l2_mbus_frame_desc { * * @g_dv_timings: Get custom dv timings in the sub device. * - * @query_dv_timings: callback for %VIDIOC_QUERY_DV_TIMINGS ioctl handler code. + * @query_dv_timings: callback for VIDIOC_QUERY_DV_TIMINGS() ioctl handler code. * * @g_mbus_config: get supported mediabus configurations * @@ -443,7 +480,8 @@ struct v4l2_subdev_video_ops { * member (to determine whether CC data from the first or second field * should be obtained). * - * @g_sliced_vbi_cap: callback for %VIDIOC_SLICED_VBI_CAP ioctl handler code. + * @g_sliced_vbi_cap: callback for VIDIOC_G_SLICED_VBI_CAP() ioctl handler + * code. * * @s_raw_fmt: setup the video encoder/decoder for raw VBI. * @@ -610,30 +648,30 @@ struct v4l2_subdev_pad_config { * struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations * * @init_cfg: initialize the pad config to default values - * @enum_mbus_code: callback for %VIDIOC_SUBDEV_ENUM_MBUS_CODE ioctl handler + * @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler * code. - * @enum_frame_size: callback for %VIDIOC_SUBDEV_ENUM_FRAME_SIZE ioctl handler + * @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler * code. * - * @enum_frame_interval: callback for %VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL ioctl + * @enum_frame_interval: callback for VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL() ioctl * handler code. * - * @get_fmt: callback for %VIDIOC_SUBDEV_G_FMT ioctl handler code. + * @get_fmt: callback for VIDIOC_SUBDEV_G_FMT() ioctl handler code. * - * @set_fmt: callback for %VIDIOC_SUBDEV_S_FMT ioctl handler code. + * @set_fmt: callback for VIDIOC_SUBDEV_S_FMT() ioctl handler code. * - * @get_selection: callback for %VIDIOC_SUBDEV_G_SELECTION ioctl handler code. + * @get_selection: callback for VIDIOC_SUBDEV_G_SELECTION() ioctl handler code. * - * @set_selection: callback for %VIDIOC_SUBDEV_S_SELECTION ioctl handler code. + * @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code. * - * @get_edid: callback for %VIDIOC_SUBDEV_G_EDID ioctl handler code. + * @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code. * - * @set_edid: callback for %VIDIOC_SUBDEV_S_EDID ioctl handler code. + * @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code. * - * @dv_timings_cap: callback for %VIDIOC_SUBDEV_DV_TIMINGS_CAP ioctl handler + * @dv_timings_cap: callback for VIDIOC_SUBDEV_DV_TIMINGS_CAP() ioctl handler * code. * - * @enum_dv_timings: callback for %VIDIOC_SUBDEV_ENUM_DV_TIMINGS ioctl handler + * @enum_dv_timings: callback for VIDIOC_SUBDEV_ENUM_DV_TIMINGS() ioctl handler * code. * * @link_validate: used by the media controller code to check if the links @@ -766,7 +804,7 @@ struct v4l2_subdev_platform_data { * @list: List of sub-devices * @owner: The owner is the same as the driver's &struct device owner. * @owner_v4l2_dev: true if the &sd->owner matches the owner of @v4l2_dev->dev - * ownner. Initialized by v4l2_device_register_subdev(). + * owner. Initialized by v4l2_device_register_subdev(). * @flags: subdev flags. Can be: * %V4L2_SUBDEV_FL_IS_I2C - Set this flag if this subdev is a i2c device; * %V4L2_SUBDEV_FL_IS_SPI - Set this flag if this subdev is a spi device; diff --git a/include/media/v4l2-tpg-colors.h b/include/media/v4l2-tpg-colors.h deleted file mode 100644 index 2a88d1fae0cd..000000000000 --- a/include/media/v4l2-tpg-colors.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * v4l2-tpg-colors.h - Color definitions for the test pattern generator - * - * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _V4L2_TPG_COLORS_H_ -#define _V4L2_TPG_COLORS_H_ - -struct color { - unsigned char r, g, b; -}; - -struct color16 { - int r, g, b; -}; - -enum tpg_color { - TPG_COLOR_CSC_WHITE, - TPG_COLOR_CSC_YELLOW, - TPG_COLOR_CSC_CYAN, - TPG_COLOR_CSC_GREEN, - TPG_COLOR_CSC_MAGENTA, - TPG_COLOR_CSC_RED, - TPG_COLOR_CSC_BLUE, - TPG_COLOR_CSC_BLACK, - TPG_COLOR_75_YELLOW, - TPG_COLOR_75_CYAN, - TPG_COLOR_75_GREEN, - TPG_COLOR_75_MAGENTA, - TPG_COLOR_75_RED, - TPG_COLOR_75_BLUE, - TPG_COLOR_100_WHITE, - TPG_COLOR_100_YELLOW, - TPG_COLOR_100_CYAN, - TPG_COLOR_100_GREEN, - TPG_COLOR_100_MAGENTA, - TPG_COLOR_100_RED, - TPG_COLOR_100_BLUE, - TPG_COLOR_100_BLACK, - TPG_COLOR_TEXTFG, - TPG_COLOR_TEXTBG, - TPG_COLOR_RANDOM, - TPG_COLOR_RAMP, - TPG_COLOR_MAX = TPG_COLOR_RAMP + 256 -}; - -extern const struct color tpg_colors[TPG_COLOR_MAX]; -extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1]; -extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1]; -extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1] - [V4L2_XFER_FUNC_SMPTE2084 + 1] - [TPG_COLOR_CSC_BLACK + 1]; - -#endif diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h index d760aa73ebbb..0bda0adc744f 100644 --- a/include/media/videobuf-core.h +++ b/include/media/videobuf-core.h @@ -219,7 +219,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q, ssize_t videobuf_read_one(struct videobuf_queue *q, char __user *data, size_t count, loff_t *ppos, int nonblocking); -unsigned int videobuf_poll_stream(struct file *file, +__poll_t videobuf_poll_stream(struct file *file, struct videobuf_queue *q, poll_table *wait); diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h index a14ac7711c92..c9c81990a56c 100644 --- a/include/media/videobuf-dvb.h +++ b/include/media/videobuf-dvb.h @@ -1,9 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#include <dvbdev.h> -#include <dmxdev.h> -#include <dvb_demux.h> -#include <dvb_net.h> -#include <dvb_frontend.h> +#include <media/dvbdev.h> +#include <media/dmxdev.h> +#include <media/dvb_demux.h> +#include <media/dvb_net.h> +#include <media/dvb_frontend.h> #ifndef _VIDEOBUF_DVB_H_ #define _VIDEOBUF_DVB_H_ diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index ef9b64398c8c..aa16c064294f 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -16,6 +16,7 @@ #include <linux/mutex.h> #include <linux/poll.h> #include <linux/dma-buf.h> +#include <linux/bitops.h> #define VB2_MAX_FRAME (32) #define VB2_MAX_PLANES (8) @@ -45,7 +46,7 @@ struct vb2_fileio_data; struct vb2_threadio_data; /** - * struct vb2_mem_ops - memory handling/memory allocator operations + * struct vb2_mem_ops - memory handling/memory allocator operations. * @alloc: allocate video memory and, optionally, allocator private data, * return ERR_PTR() on failure or a pointer to allocator private, * per-buffer data on success; the returned private structure @@ -69,7 +70,7 @@ struct vb2_threadio_data; * argument to other ops in this structure. * @put_userptr: inform the allocator that a USERPTR buffer will no longer * be used. - * @attach_dmabuf: attach a shared struct dma_buf for a hardware operation; + * @attach_dmabuf: attach a shared &struct dma_buf for a hardware operation; * used for DMABUF memory types; dev is the alloc device * dbuf is the shared dma_buf; returns ERR_PTR() on failure; * allocator private per-buffer structure on success; @@ -145,28 +146,28 @@ struct vb2_mem_ops { }; /** - * struct vb2_plane - plane information - * @mem_priv: private data with this plane - * @dbuf: dma_buf - shared buffer object + * struct vb2_plane - plane information. + * @mem_priv: private data with this plane. + * @dbuf: dma_buf - shared buffer object. * @dbuf_mapped: flag to show whether dbuf is mapped or not - * @bytesused: number of bytes occupied by data in the plane (payload) - * @length: size of this plane (NOT the payload) in bytes + * @bytesused: number of bytes occupied by data in the plane (payload). + * @length: size of this plane (NOT the payload) in bytes. * @min_length: minimum required size of this plane (NOT the payload) in bytes. * @length is always greater or equal to @min_length. - * @offset: when memory in the associated struct vb2_buffer is - * VB2_MEMORY_MMAP, equals the offset from the start of + * @m: Union with memtype-specific data. + * @m.offset: when memory in the associated struct vb2_buffer is + * %VB2_MEMORY_MMAP, equals the offset from the start of * the device memory for this plane (or is a "cookie" that - * should be passed to mmap() called on the video node) - * @userptr: when memory is VB2_MEMORY_USERPTR, a userspace pointer - * pointing to this plane - * @fd: when memory is VB2_MEMORY_DMABUF, a userspace file - * descriptor associated with this plane - * @m: Union with memtype-specific data (@offset, @userptr or - * @fd). + * should be passed to mmap() called on the video node). + * @m.userptr: when memory is %VB2_MEMORY_USERPTR, a userspace pointer + * pointing to this plane. + * @m.fd: when memory is %VB2_MEMORY_DMABUF, a userspace file + * descriptor associated with this plane. * @data_offset: offset in the plane to the start of data; usually 0, - * unless there is a header in front of the data + * unless there is a header in front of the data. + * * Should contain enough information to be able to cover all the fields - * of struct v4l2_plane at videodev2.h + * of &struct v4l2_plane at videodev2.h. */ struct vb2_plane { void *mem_priv; @@ -184,35 +185,35 @@ struct vb2_plane { }; /** - * enum vb2_io_modes - queue access methods - * @VB2_MMAP: driver supports MMAP with streaming API - * @VB2_USERPTR: driver supports USERPTR with streaming API - * @VB2_READ: driver supports read() style access - * @VB2_WRITE: driver supports write() style access - * @VB2_DMABUF: driver supports DMABUF with streaming API + * enum vb2_io_modes - queue access methods. + * @VB2_MMAP: driver supports MMAP with streaming API. + * @VB2_USERPTR: driver supports USERPTR with streaming API. + * @VB2_READ: driver supports read() style access. + * @VB2_WRITE: driver supports write() style access. + * @VB2_DMABUF: driver supports DMABUF with streaming API. */ enum vb2_io_modes { - VB2_MMAP = (1 << 0), - VB2_USERPTR = (1 << 1), - VB2_READ = (1 << 2), - VB2_WRITE = (1 << 3), - VB2_DMABUF = (1 << 4), + VB2_MMAP = BIT(0), + VB2_USERPTR = BIT(1), + VB2_READ = BIT(2), + VB2_WRITE = BIT(3), + VB2_DMABUF = BIT(4), }; /** - * enum vb2_buffer_state - current video buffer state - * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control - * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf - * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver - * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver - * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver + * enum vb2_buffer_state - current video buffer state. + * @VB2_BUF_STATE_DEQUEUED: buffer under userspace control. + * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf. + * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver. + * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver. + * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver. * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used - * in a hardware operation + * in a hardware operation. * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but - * not yet dequeued to userspace + * not yet dequeued to userspace. * @VB2_BUF_STATE_ERROR: same as above, but the operation on the buffer * has ended with an error, which will be reported - * to the userspace when it is dequeued + * to the userspace when it is dequeued. */ enum vb2_buffer_state { VB2_BUF_STATE_DEQUEUED, @@ -228,15 +229,15 @@ enum vb2_buffer_state { struct vb2_queue; /** - * struct vb2_buffer - represents a video buffer - * @vb2_queue: the queue to which this driver belongs - * @index: id number of the buffer - * @type: buffer type - * @memory: the method, in which the actual data is passed + * struct vb2_buffer - represents a video buffer. + * @vb2_queue: pointer to &struct vb2_queue with the queue to + * which this driver belongs. + * @index: id number of the buffer. + * @type: buffer type. + * @memory: the method, in which the actual data is passed. * @num_planes: number of planes in the buffer - * on an internal driver queue - * @planes: private per-plane information; do not change - * @timestamp: frame timestamp in ns + * on an internal driver queue. + * @timestamp: frame timestamp in ns. */ struct vb2_buffer { struct vb2_queue *vb2_queue; @@ -244,7 +245,6 @@ struct vb2_buffer { unsigned int type; unsigned int memory; unsigned int num_planes; - struct vb2_plane planes[VB2_MAX_PLANES]; u64 timestamp; /* private: internal use only @@ -254,9 +254,11 @@ struct vb2_buffer { * all buffers queued from userspace * done_entry: entry on the list that stores all buffers ready * to be dequeued to userspace + * vb2_plane: per-plane information; do not change */ enum vb2_buffer_state state; + struct vb2_plane planes[VB2_MAX_PLANES]; struct list_head queued_entry; struct list_head done_entry; #ifdef CONFIG_VIDEO_ADV_DEBUG @@ -292,7 +294,7 @@ struct vb2_buffer { }; /** - * struct vb2_ops - driver-specific callbacks + * struct vb2_ops - driver-specific callbacks. * * @queue_setup: called from VIDIOC_REQBUFS() and VIDIOC_CREATE_BUFS() * handlers before memory allocation. It can be called @@ -358,8 +360,8 @@ struct vb2_buffer { * the @buf_queue callback are to be returned by the driver * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED. * If you need a minimum number of buffers before you can - * start streaming, then set @min_buffers_needed in the - * vb2_queue structure. If that is non-zero then + * start streaming, then set + * &vb2_queue->min_buffers_needed. If that is non-zero then * @start_streaming won't be called until at least that * many buffers have been queued up by userspace. * @stop_streaming: called when 'streaming' state must be disabled; driver @@ -396,18 +398,18 @@ struct vb2_ops { }; /** - * struct vb2_buf_ops - driver-specific callbacks + * struct vb2_buf_ops - driver-specific callbacks. * * @verify_planes_array: Verify that a given user space structure contains * enough planes for the buffer. This is called * for each dequeued buffer. - * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. - * For V4L2 this is a struct v4l2_buffer. - * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. + * @fill_user_buffer: given a &vb2_buffer fill in the userspace structure. + * For V4L2 this is a &struct v4l2_buffer. + * @fill_vb2_buffer: given a userspace structure, fill in the &vb2_buffer. * If the userspace structure is invalid, then this op * will return an error. * @copy_timestamp: copy the timestamp from a userspace structure to - * the vb2_buffer struct. + * the &struct vb2_buffer. */ struct vb2_buf_ops { int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); @@ -418,20 +420,21 @@ struct vb2_buf_ops { }; /** - * struct vb2_queue - a videobuf queue + * struct vb2_queue - a videobuf queue. * * @type: private buffer type whose content is defined by the vb2-core * caller. For example, for V4L2, it should match - * the types defined on enum &v4l2_buf_type - * @io_modes: supported io methods (see vb2_io_modes enum) + * the types defined on &enum v4l2_buf_type. + * @io_modes: supported io methods (see &enum vb2_io_modes). + * @alloc_devs: &struct device memory type/allocator-specific per-plane device * @dev: device to use for the default allocation context if the driver * doesn't fill in the @alloc_devs array. * @dma_attrs: DMA attributes to use for the DMA. * @bidirectional: when this flag is set the DMA direction for the buffers of - * this queue will be overridden with DMA_BIDIRECTIONAL direction. + * this queue will be overridden with %DMA_BIDIRECTIONAL direction. * This is useful in cases where the hardware (firmware) writes to - * a buffer which is mapped as read (DMA_TO_DEVICE), or reads from - * buffer which is mapped for write (DMA_FROM_DEVICE) in order + * a buffer which is mapped as read (%DMA_TO_DEVICE), or reads from + * buffer which is mapped for write (%DMA_FROM_DEVICE) in order * to satisfy some internal hardware restrictions or adds a padding * needed by the processing algorithm. In case the DMA mapping is * not bidirectional but the hardware (firmware) trying to access @@ -440,10 +443,10 @@ struct vb2_buf_ops { * @fileio_read_once: report EOF after reading the first buffer * @fileio_write_immediately: queue buffer after each write() call * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver - * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF + * @quirk_poll_must_check_waiting_for_buffers: Return %POLLERR at poll when QBUF * has not been called. This is a vb1 idiom that has been adopted * also by vb2. - * @lock: pointer to a mutex that protects the vb2_queue struct. The + * @lock: pointer to a mutex that protects the &struct vb2_queue. The * driver can set this to a mutex to let the v4l2 core serialize * the queuing ioctls. If the driver wants to handle locking * itself, then this should be set to NULL. This lock is not used @@ -454,17 +457,17 @@ struct vb2_buf_ops { * drivers to easily associate an owner filehandle with the queue. * @ops: driver-specific callbacks * @mem_ops: memory allocator specific callbacks - * @buf_ops: callbacks to deliver buffer information - * between user-space and kernel-space - * @drv_priv: driver private data + * @buf_ops: callbacks to deliver buffer information. + * between user-space and kernel-space. + * @drv_priv: driver private data. * @buf_struct_size: size of the driver-specific buffer structure; * "0" indicates the driver doesn't want to use a custom buffer - * structure type. for example, sizeof(struct vb2_v4l2_buffer) + * structure type. for example, ``sizeof(struct vb2_v4l2_buffer)`` * will be used for v4l2. - * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and - * V4L2_BUF_FLAG_TSTAMP_SRC_* + * @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and + * ``V4L2_BUF_FLAG_TSTAMP_SRC_*`` * @gfp_flags: additional gfp flags used when allocating the buffers. - * Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32 + * Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32 * to force the buffer allocation to a specific memory zone. * @min_buffers_needed: the minimum number of buffers needed before * @start_streaming can be called. Used when a DMA engine @@ -484,20 +487,19 @@ struct vb2_buf_ops { * @done_list: list of buffers ready to be dequeued to userspace * @done_lock: lock to protect done_list list * @done_wq: waitqueue for processes waiting for buffers ready to be dequeued - * @alloc_devs: memory type/allocator-specific per-plane device * @streaming: current streaming state * @start_streaming_called: @start_streaming was called successfully and we * started streaming. * @error: a fatal error occurred on the queue * @waiting_for_buffers: used in poll() to check if vb2 is still waiting for * buffers. Only set for capture queues if qbuf has not yet been - * called since poll() needs to return POLLERR in that situation. + * called since poll() needs to return %POLLERR in that situation. * @is_multiplanar: set if buffer type is multiplanar * @is_output: set if buffer type is output * @copy_timestamp: set if vb2-core should set timestamps * @last_buffer_dequeued: used in poll() and DQBUF to immediately return if the * last decoded buffer was already dequeued. Set for capture queues - * when a buffer with the V4L2_BUF_FLAG_LAST is dequeued. + * when a buffer with the %V4L2_BUF_FLAG_LAST is dequeued. * @fileio: file io emulator internal data, used only if emulator is active * @threadio: thread io internal data, used only if thread is active */ @@ -525,6 +527,8 @@ struct vb2_queue { gfp_t gfp_flags; u32 min_buffers_needed; + struct device *alloc_devs[VB2_MAX_PLANES]; + /* private: internal use only */ struct mutex mmap_lock; unsigned int memory; @@ -540,8 +544,6 @@ struct vb2_queue { spinlock_t done_lock; wait_queue_head_t done_wq; - struct device *alloc_devs[VB2_MAX_PLANES]; - unsigned int streaming:1; unsigned int start_streaming_called:1; unsigned int error:1; @@ -568,9 +570,10 @@ struct vb2_queue { }; /** - * vb2_plane_vaddr() - Return a kernel virtual address of a given plane - * @vb: vb2_buffer to which the plane in question belongs to - * @plane_no: plane number for which the address is to be returned + * vb2_plane_vaddr() - Return a kernel virtual address of a given plane. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which the address is to be returned. * * This function returns a kernel virtual address of a given plane if * such a mapping exist, NULL otherwise. @@ -578,9 +581,10 @@ struct vb2_queue { void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); /** - * vb2_plane_cookie() - Return allocator specific cookie for the given plane - * @vb: vb2_buffer to which the plane in question belongs to - * @plane_no: plane number for which the cookie is to be returned + * vb2_plane_cookie() - Return allocator specific cookie for the given plane. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which the cookie is to be returned. * * This function returns an allocator specific cookie for a given plane if * available, NULL otherwise. The allocator should provide some simple static @@ -591,9 +595,11 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no); void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); /** - * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished - * @vb: vb2_buffer returned from the driver - * @state: either %VB2_BUF_STATE_DONE if the operation finished + * vb2_buffer_done() - inform videobuf that an operation on a buffer + * is finished. + * @vb: pointer to &struct vb2_buffer to be used. + * @state: state of the buffer, as defined by &enum vb2_buffer_state. + * Either %VB2_BUF_STATE_DONE if the operation finished * successfully, %VB2_BUF_STATE_ERROR if the operation finished * with an error or %VB2_BUF_STATE_QUEUED if the driver wants to * requeue buffers. If start_streaming fails then it should return @@ -614,8 +620,8 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no); void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); /** - * vb2_discard_done() - discard all buffers marked as DONE - * @q: videobuf2 queue + * vb2_discard_done() - discard all buffers marked as DONE. + * @q: pointer to &struct vb2_queue with videobuf2 queue. * * This function is intended to be used with suspend/resume operations. It * discards all 'done' buffers as they would be too old to be requested after @@ -628,74 +634,83 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state); void vb2_discard_done(struct vb2_queue *q); /** - * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2 - * @q: videobuf2 queue + * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2. + * @q: pointer to &struct vb2_queue with videobuf2 queue. * * This function will wait until all buffers that have been given to the driver * by &vb2_ops->buf_queue are given back to vb2 with vb2_buffer_done(). It - * doesn't call wait_prepare()/wait_finish() pair. It is intended to be called - * with all locks taken, for example from &vb2_ops->stop_streaming callback. + * doesn't call &vb2_ops->wait_prepare/&vb2_ops->wait_finish pair. + * It is intended to be called with all locks taken, for example from + * &vb2_ops->stop_streaming callback. */ int vb2_wait_for_all_buffers(struct vb2_queue *q); /** - * vb2_core_querybuf() - query video buffer information - * @q: videobuf queue - * @index: id number of the buffer - * @pb: buffer struct passed from userspace + * vb2_core_querybuf() - query video buffer information. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @index: id number of the buffer. + * @pb: buffer struct passed from userspace. + * + * Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. * - * Should be called from vidioc_querybuf ioctl handler in driver. * The passed buffer should have been verified. + * * This function fills the relevant information for the userspace. + * + * Return: returns zero on success; an error code otherwise. */ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); /** - * vb2_core_reqbufs() - Initiate streaming - * @q: videobuf2 queue - * @memory: memory type - * @count: requested buffer count + * vb2_core_reqbufs() - Initiate streaming. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory type, as defined by &enum vb2_memory. + * @count: requested buffer count. * - * Should be called from vidioc_reqbufs ioctl handler of a driver. + * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. * * This function: * - * #) verifies streaming parameters passed from the userspace, - * #) sets up the queue, + * #) verifies streaming parameters passed from the userspace; + * #) sets up the queue; * #) negotiates number of buffers and planes per buffer with the driver - * to be used during streaming, - * #) allocates internal buffer structures (struct vb2_buffer), according to - * the agreed parameters, + * to be used during streaming; + * #) allocates internal buffer structures (&struct vb2_buffer), according to + * the agreed parameters; * #) for MMAP memory type, allocates actual video memory, using the - * memory handling/allocation routines provided during queue initialization + * memory handling/allocation routines provided during queue initialization. * * If req->count is 0, all the memory will be freed instead. - * If the queue has been allocated previously (by a previous vb2_reqbufs) call - * and the queue is not busy, memory will be reallocated. * - * The return values from this function are intended to be directly returned - * from vidioc_reqbufs handler in driver. + * If the queue has been allocated previously by a previous vb2_core_reqbufs() + * call and the queue is not busy, memory will be reallocated. + * + * Return: returns zero on success; an error code otherwise. */ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int *count); /** * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs - * @q: videobuf2 queue - * @memory: memory type - * @count: requested buffer count - * @requested_planes: number of planes requested - * @requested_sizes: array with the size of the planes + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @memory: memory type, as defined by &enum vb2_memory. + * @count: requested buffer count. + * @requested_planes: number of planes requested. + * @requested_sizes: array with the size of the planes. + * + * Videobuf2 core helper to implement VIDIOC_CREATE_BUFS() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. * - * Should be called from VIDIOC_CREATE_BUFS() ioctl handler of a driver. * This function: * - * #) verifies parameter sanity - * #) calls the .queue_setup() queue operation - * #) performs any necessary memory allocations + * #) verifies parameter sanity; + * #) calls the &vb2_ops->queue_setup queue operation; + * #) performs any necessary memory allocations. * - * Return: the return values from this function are intended to be directly - * returned from VIDIOC_CREATE_BUFS() handler in driver. + * Return: returns zero on success; an error code otherwise. */ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, unsigned int *count, unsigned int requested_planes, @@ -703,57 +718,61 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, /** * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace - * to the kernel - * @q: videobuf2 queue - * @index: id number of the buffer - * @pb: buffer structure passed from userspace to vidioc_prepare_buf - * handler in driver + * to the kernel. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @index: id number of the buffer. + * @pb: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. + * + * Videobuf2 core helper to implement VIDIOC_PREPARE_BUF() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. * - * Should be called from vidioc_prepare_buf ioctl handler of a driver. * The passed buffer should have been verified. - * This function calls buf_prepare callback in the driver (if provided), - * in which driver-specific buffer initialization can be performed, * - * The return values from this function are intended to be directly returned - * from vidioc_prepare_buf handler in driver. + * This function calls vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed. + * + * Return: returns zero on success; an error code otherwise. */ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb); /** * vb2_core_qbuf() - Queue a buffer from userspace * - * @q: videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. * @index: id number of the buffer - * @pb: buffer structure passed from userspace to vidioc_qbuf handler - * in driver + * @pb: buffer structure passed from userspace to + * v4l2_ioctl_ops->vidioc_qbuf handler in driver * - * Should be called from vidioc_qbuf ioctl handler of a driver. - * The passed buffer should have been verified. + * Videobuf2 core helper to implement VIDIOC_QBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. * * This function: * - * #) if necessary, calls buf_prepare callback in the driver (if provided), in - * which driver-specific buffer initialization can be performed, + * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed; * #) if streaming is on, queues the buffer in driver by the means of * &vb2_ops->buf_queue callback for processing. * - * The return values from this function are intended to be directly returned - * from vidioc_qbuf handler in driver. + * Return: returns zero on success; an error code otherwise. */ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb); /** * vb2_core_dqbuf() - Dequeue a buffer to the userspace - * @q: videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue * @pindex: pointer to the buffer index. May be NULL - * @pb: buffer structure passed from userspace to vidioc_dqbuf handler - * in driver + * @pb: buffer structure passed from userspace to + * v4l2_ioctl_ops->vidioc_dqbuf handler in driver. * @nonblocking: if true, this call will not sleep waiting for a buffer if no * buffers ready for dequeuing are present. Normally the driver - * would be passing (file->f_flags & O_NONBLOCK) here + * would be passing (file->f_flags & O_NONBLOCK) here. * - * Should be called from vidioc_dqbuf ioctl handler of a driver. - * The passed buffer should have been verified. + * Videobuf2 core helper to implement VIDIOC_DQBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. * * This function: * @@ -763,73 +782,108 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb); * #) the buffer struct members are filled with relevant information for * the userspace. * - * The return values from this function are intended to be directly returned - * from vidioc_dqbuf handler in driver. + * Return: returns zero on success; an error code otherwise. */ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, bool nonblocking); +/** + * vb2_core_streamon() - Implements VB2 stream ON logic + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @type: type of the queue to be started. + * For V4L2, this is defined by &enum v4l2_buf_type type. + * + * Videobuf2 core helper to implement VIDIOC_STREAMON() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ int vb2_core_streamon(struct vb2_queue *q, unsigned int type); + +/** + * vb2_core_streamoff() - Implements VB2 stream OFF logic + * + * @q: pointer to &struct vb2_queue with videobuf2 queue + * @type: type of the queue to be started. + * For V4L2, this is defined by &enum v4l2_buf_type type. + * + * Videobuf2 core helper to implement VIDIOC_STREAMOFF() operation. It is + * called internally by VB2 by an API-specific handler, like + * ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. + */ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); /** - * vb2_core_expbuf() - Export a buffer as a file descriptor - * @q: videobuf2 queue - * @fd: file descriptor associated with DMABUF (set by driver) * - * @type: buffer type - * @index: id number of the buffer + * vb2_core_expbuf() - Export a buffer as a file descriptor. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @fd: pointer to the file descriptor associated with DMABUF + * (set by driver). + * @type: buffer type. + * @index: id number of the buffer. * @plane: index of the plane to be exported, 0 for single plane queues - * @flags: flags for newly created file, currently only O_CLOEXEC is - * supported, refer to manual of open syscall for more details + * @flags: file flags for newly created file, as defined at + * include/uapi/asm-generic/fcntl.h. + * Currently, the only used flag is %O_CLOEXEC. + * is supported, refer to manual of open syscall for more details. * - * The return values from this function are intended to be directly returned - * from vidioc_expbuf handler in driver. + * + * Videobuf2 core helper to implement VIDIOC_EXPBUF() operation. It is called + * internally by VB2 by an API-specific handler, like ``videobuf2-v4l2.h``. + * + * Return: returns zero on success; an error code otherwise. */ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, unsigned int index, unsigned int plane, unsigned int flags); /** * vb2_core_queue_init() - initialize a videobuf2 queue - * @q: videobuf2 queue; this structure should be allocated in driver + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * This structure should be allocated in driver * - * The vb2_queue structure should be allocated by the driver. The driver is + * The &vb2_queue structure should be allocated by the driver. The driver is * responsible of clearing it's content and setting initial values for some * required entries before calling this function. - * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer - * to the struct vb2_queue description in include/media/videobuf2-core.h - * for more information. + * + * .. note:: + * + * The following fields at @q should be set before calling this function: + * &vb2_queue->ops, &vb2_queue->mem_ops, &vb2_queue->type. */ int vb2_core_queue_init(struct vb2_queue *q); /** * vb2_core_queue_release() - stop streaming, release the queue and free memory - * @q: videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. * * This function stops streaming and performs necessary clean ups, including * freeing video buffer memory. The driver is responsible for freeing - * the vb2_queue structure itself. + * the &struct vb2_queue itself. */ void vb2_core_queue_release(struct vb2_queue *q); /** * vb2_queue_error() - signal a fatal error on the queue - * @q: videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. * * Flag that a fatal unrecoverable error has occurred and wake up all processes - * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing - * buffers will return -EIO. + * waiting on the queue. Polling will now set %POLLERR and queuing and dequeuing + * buffers will return %-EIO. * - * The error flag will be cleared when cancelling the queue, either from - * vb2_streamoff or vb2_queue_release. Drivers should thus not call this + * The error flag will be cleared when canceling the queue, either from + * vb2_streamoff() or vb2_queue_release(). Drivers should thus not call this * function before starting the stream, otherwise the error flag will remain set * until the queue is released when closing the device node. */ void vb2_queue_error(struct vb2_queue *q); /** - * vb2_mmap() - map video buffers into application address space - * @q: videobuf2 queue - * @vma: vma passed to the mmap file operation handler in the driver + * vb2_mmap() - map video buffers into application address space. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @vma: pointer to &struct vm_area_struct with the vma passed + * to the mmap file operation handler in the driver. * * Should be called from mmap file operation handler of a driver. * This function maps one plane of one of the available video buffers to @@ -837,8 +891,10 @@ void vb2_queue_error(struct vb2_queue *q); * has to be called once per each plane per each buffer previously allocated. * * When the userspace application calls mmap, it passes to it an offset returned - * to it earlier by the means of vidioc_querybuf handler. That offset acts as - * a "cookie", which is then used to identify the plane to be mapped. + * to it earlier by the means of &v4l2_ioctl_ops->vidioc_querybuf handler. + * That offset acts as a "cookie", which is then used to identify the plane + * to be mapped. + * * This function finds a plane with a matching offset and a mapping is performed * by the means of a provided memory operation. * @@ -848,6 +904,21 @@ void vb2_queue_error(struct vb2_queue *q); int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); #ifndef CONFIG_MMU +/** + * vb2_get_unmapped_area - map video buffers into application address space. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @addr: memory address. + * @len: buffer size. + * @pgoff: page offset. + * @flags: memory flags. + * + * This function is used in noMMU platforms to propose address mapping + * for a given buffer. It's intended to be used as a handler for the + * &file_operations->get_unmapped_area operation. + * + * This is called by the mmap() syscall routines will call this + * to get a proposed address for the mapping, when ``!CONFIG_MMU``. + */ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, unsigned long addr, unsigned long len, @@ -856,10 +927,12 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, #endif /** - * vb2_core_poll() - implements poll userspace operation - * @q: videobuf2 queue - * @file: file argument passed to the poll file operation handler - * @wait: wait argument passed to the poll file operation handler + * vb2_core_poll() - implements poll syscall() logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @file: &struct file argument passed to the poll + * file operation handler. + * @wait: &poll_table wait argument passed to the poll + * file operation handler. * * This function implements poll file operation handler for a driver. * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will @@ -871,19 +944,35 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q, * The return values from this function are intended to be directly returned * from poll handler in driver. */ -unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file, +__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, poll_table *wait); +/** + * vb2_read() - implements read() syscall logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @data: pointed to target userspace buffer + * @count: number of bytes to read + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + */ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, loff_t *ppos, int nonblock); +/** + * vb2_read() - implements write() syscall logic. + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @data: pointed to target userspace buffer + * @count: number of bytes to write + * @ppos: file handle position tracking pointer + * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) + */ size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, loff_t *ppos, int nonblock); /** - * typedef vb2_thread_fnc - callback function for use with vb2_thread + * typedef vb2_thread_fnc - callback function for use with vb2_thread. * - * @vb: pointer to struct &vb2_buffer - * @priv: pointer to a private pointer + * @vb: pointer to struct &vb2_buffer. + * @priv: pointer to a private data. * * This is called whenever a buffer is dequeued in the thread. */ @@ -891,13 +980,13 @@ typedef int (*vb2_thread_fnc)(struct vb2_buffer *vb, void *priv); /** * vb2_thread_start() - start a thread for the given queue. - * @q: videobuf queue - * @fnc: callback function - * @priv: priv pointer passed to the callback function + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @fnc: &vb2_thread_fnc callback function. + * @priv: priv pointer passed to the callback function. * @thread_name:the name of the thread. This will be prefixed with "vb2-". * * This starts a thread that will queue and dequeue until an error occurs - * or @vb2_thread_stop is called. + * or vb2_thread_stop() is called. * * .. attention:: * @@ -910,13 +999,13 @@ int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, /** * vb2_thread_stop() - stop the thread for the given queue. - * @q: videobuf queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. */ int vb2_thread_stop(struct vb2_queue *q); /** - * vb2_is_streaming() - return streaming status of the queue - * @q: videobuf queue + * vb2_is_streaming() - return streaming status of the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. */ static inline bool vb2_is_streaming(struct vb2_queue *q) { @@ -925,15 +1014,16 @@ static inline bool vb2_is_streaming(struct vb2_queue *q) /** * vb2_fileio_is_active() - return true if fileio is active. - * @q: videobuf queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. * * This returns true if read() or write() is used to stream the data * as opposed to stream I/O. This is almost never an important distinction, * except in rare cases. One such case is that using read() or write() to - * stream a format using V4L2_FIELD_ALTERNATE is not allowed since there + * stream a format using %V4L2_FIELD_ALTERNATE is not allowed since there * is no way you can pass the field information of each buffer to/from * userspace. A driver that supports this field format should check for - * this in the queue_setup op and reject it if this function returns true. + * this in the &vb2_ops->queue_setup op and reject it if this function returns + * true. */ static inline bool vb2_fileio_is_active(struct vb2_queue *q) { @@ -941,8 +1031,8 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q) } /** - * vb2_is_busy() - return busy status of the queue - * @q: videobuf queue + * vb2_is_busy() - return busy status of the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. * * This function checks if queue has any buffers allocated. */ @@ -952,8 +1042,8 @@ static inline bool vb2_is_busy(struct vb2_queue *q) } /** - * vb2_get_drv_priv() - return driver private data associated with the queue - * @q: videobuf queue + * vb2_get_drv_priv() - return driver private data associated with the queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. */ static inline void *vb2_get_drv_priv(struct vb2_queue *q) { @@ -961,10 +1051,11 @@ static inline void *vb2_get_drv_priv(struct vb2_queue *q) } /** - * vb2_set_plane_payload() - set bytesused for the plane plane_no - * @vb: buffer for which plane payload should be set - * @plane_no: plane number for which payload should be set - * @size: payload in bytes + * vb2_set_plane_payload() - set bytesused for the plane @plane_no. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which payload should be set. + * @size: payload in bytes. */ static inline void vb2_set_plane_payload(struct vb2_buffer *vb, unsigned int plane_no, unsigned long size) @@ -975,8 +1066,9 @@ static inline void vb2_set_plane_payload(struct vb2_buffer *vb, /** * vb2_get_plane_payload() - get bytesused for the plane plane_no - * @vb: buffer for which plane payload should be set - * @plane_no: plane number for which payload should be set + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which payload should be set. */ static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, unsigned int plane_no) @@ -987,9 +1079,10 @@ static inline unsigned long vb2_get_plane_payload(struct vb2_buffer *vb, } /** - * vb2_plane_size() - return plane size in bytes - * @vb: buffer for which plane size should be returned - * @plane_no: plane number for which size should be returned + * vb2_plane_size() - return plane size in bytes. + * @vb: pointer to &struct vb2_buffer to which the plane in + * question belongs to. + * @plane_no: plane number for which size should be returned. */ static inline unsigned long vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) @@ -1000,8 +1093,8 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no) } /** - * vb2_start_streaming_called() - return streaming status of driver - * @q: videobuf queue + * vb2_start_streaming_called() - return streaming status of driver. + * @q: pointer to &struct vb2_queue with videobuf2 queue. */ static inline bool vb2_start_streaming_called(struct vb2_queue *q) { @@ -1009,8 +1102,8 @@ static inline bool vb2_start_streaming_called(struct vb2_queue *q) } /** - * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue - * @q: videobuf queue + * vb2_clear_last_buffer_dequeued() - clear last buffer dequeued flag of queue. + * @q: pointer to &struct vb2_queue with videobuf2 queue. */ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) { @@ -1024,10 +1117,10 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) /** * vb2_buffer_in_use() - return true if the buffer is in use and - * the queue cannot be freed (by the means of REQBUFS(0)) call + * the queue cannot be freed (by the means of VIDIOC_REQBUFS(0)) call. * - * @vb: buffer for which plane size should be returned - * @q: videobuf queue + * @vb: buffer for which plane size should be returned. + * @q: pointer to &struct vb2_queue with videobuf2 queue. */ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); @@ -1035,11 +1128,11 @@ bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); * vb2_verify_memory_type() - Check whether the memory type and buffer type * passed to a buffer operation are compatible with the queue. * - * @q: videobuf queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. * @memory: memory model, as defined by enum &vb2_memory. * @type: private buffer type whose content is defined by the vb2-core * caller. For example, for V4L2, it should match - * the types defined on enum &v4l2_buf_type + * the types defined on enum &v4l2_buf_type. */ int vb2_verify_memory_type(struct vb2_queue *q, enum vb2_memory memory, unsigned int type); diff --git a/include/media/videobuf2-dvb.h b/include/media/videobuf2-dvb.h index 5a31faa24f1a..8605366ec87c 100644 --- a/include/media/videobuf2-dvb.h +++ b/include/media/videobuf2-dvb.h @@ -2,12 +2,11 @@ #ifndef _VIDEOBUF2_DVB_H_ #define _VIDEOBUF2_DVB_H_ -#include <dvbdev.h> -#include <dmxdev.h> -#include <dvb_demux.h> -#include <dvb_net.h> -#include <dvb_frontend.h> - +#include <media/dvbdev.h> +#include <media/dmxdev.h> +#include <media/dvb_demux.h> +#include <media/dvb_net.h> +#include <media/dvb_frontend.h> #include <media/videobuf2-v4l2.h> /* We don't actually need to include media-device.h here */ diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h index a6ed091b79ce..4b5b84f93538 100644 --- a/include/media/videobuf2-memops.h +++ b/include/media/videobuf2-memops.h @@ -19,11 +19,11 @@ #include <linux/refcount.h> /** - * struct vb2_vmarea_handler - common vma refcount tracking handler + * struct vb2_vmarea_handler - common vma refcount tracking handler. * - * @refcount: pointer to refcount entry in the buffer - * @put: callback to function that decreases buffer refcount - * @arg: argument for @put callback + * @refcount: pointer to &refcount_t entry in the buffer. + * @put: callback to function that decreases buffer refcount. + * @arg: argument for @put callback. */ struct vb2_vmarea_handler { refcount_t *refcount; diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h index 036127c54bbf..3d5e2d739f05 100644 --- a/include/media/videobuf2-v4l2.h +++ b/include/media/videobuf2-v4l2.h @@ -24,16 +24,17 @@ #endif /** - * struct vb2_v4l2_buffer - video buffer information for v4l2 + * struct vb2_v4l2_buffer - video buffer information for v4l2. * - * @vb2_buf: video buffer 2 - * @flags: buffer informational flags - * @field: enum v4l2_field; field order of the image in the buffer - * @timecode: frame timecode - * @sequence: sequence count of this frame + * @vb2_buf: embedded struct &vb2_buffer. + * @flags: buffer informational flags. + * @field: field order of the image in the buffer, as defined by + * &enum v4l2_field. + * @timecode: frame timecode. + * @sequence: sequence count of this frame. * * Should contain enough information to be able to cover all the fields - * of struct v4l2_buffer at videodev2.h + * of &struct v4l2_buffer at ``videodev2.h``. */ struct vb2_v4l2_buffer { struct vb2_buffer vb2_buf; @@ -56,9 +57,9 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); * vb2_reqbufs() - Wrapper for vb2_core_reqbufs() that also verifies * the memory and type values. * - * @q: videobuf2 queue - * @req: struct passed from userspace to vidioc_reqbufs handler - * in driver + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @req: &struct v4l2_requestbuffers passed from userspace to + * &v4l2_ioctl_ops->vidioc_reqbufs handler in driver. */ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); @@ -66,94 +67,99 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); * vb2_create_bufs() - Wrapper for vb2_core_create_bufs() that also verifies * the memory and type values. * - * @q: videobuf2 queue - * @create: creation parameters, passed from userspace to vidioc_create_bufs - * handler in driver + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @create: creation parameters, passed from userspace to + * &v4l2_ioctl_ops->vidioc_create_bufs handler in driver */ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); /** * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel * - * @q: videobuf2 queue - * @b: buffer structure passed from userspace to vidioc_prepare_buf - * handler in driver + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver + * + * Should be called from &v4l2_ioctl_ops->vidioc_prepare_buf ioctl handler + * of a driver. * - * Should be called from vidioc_prepare_buf ioctl handler of a driver. * This function: * * #) verifies the passed buffer, - * #) calls buf_prepare callback in the driver (if provided), in which - * driver-specific buffer initialization can be performed. + * #) calls &vb2_ops->buf_prepare callback in the driver (if provided), + * in which driver-specific buffer initialization can be performed. * * The return values from this function are intended to be directly returned - * from vidioc_prepare_buf handler in driver. + * from &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver. */ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b); /** * vb2_qbuf() - Queue a buffer from userspace - * @q: videobuf2 queue - * @b: buffer structure passed from userspace to VIDIOC_QBUF() handler - * in driver + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_qbuf handler in driver * - * Should be called from VIDIOC_QBUF() ioctl handler of a driver. + * Should be called from &v4l2_ioctl_ops->vidioc_qbuf handler of a driver. * * This function: * - * #) verifies the passed buffer, - * #) if necessary, calls buf_prepare callback in the driver (if provided), in - * which driver-specific buffer initialization can be performed, - * #) if streaming is on, queues the buffer in driver by the means of buf_queue - * callback for processing. + * #) verifies the passed buffer; + * #) if necessary, calls &vb2_ops->buf_prepare callback in the driver + * (if provided), in which driver-specific buffer initialization can + * be performed; + * #) if streaming is on, queues the buffer in driver by the means of + * &vb2_ops->buf_queue callback for processing. * * The return values from this function are intended to be directly returned - * from VIDIOC_QBUF() handler in driver. + * from &v4l2_ioctl_ops->vidioc_qbuf handler in driver. */ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b); /** * vb2_expbuf() - Export a buffer as a file descriptor - * @q: videobuf2 queue - * @eb: export buffer structure passed from userspace to VIDIOC_EXPBUF() - * handler in driver + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @eb: export buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_expbuf handler in driver * * The return values from this function are intended to be directly returned - * from VIDIOC_EXPBUF() handler in driver. + * from &v4l2_ioctl_ops->vidioc_expbuf handler in driver. */ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); /** * vb2_dqbuf() - Dequeue a buffer to the userspace - * @q: videobuf2 queue - * @b: buffer structure passed from userspace to VIDIOC_DQBUF() handler - * in driver + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @b: buffer structure passed from userspace to + * &v4l2_ioctl_ops->vidioc_dqbuf handler in driver * @nonblocking: if true, this call will not sleep waiting for a buffer if no * buffers ready for dequeuing are present. Normally the driver - * would be passing (file->f_flags & O_NONBLOCK) here + * would be passing (&file->f_flags & %O_NONBLOCK) here * - * Should be called from VIDIOC_DQBUF() ioctl handler of a driver. + * Should be called from &v4l2_ioctl_ops->vidioc_dqbuf ioctl handler + * of a driver. * * This function: * - * #) verifies the passed buffer, - * #) calls buf_finish callback in the driver (if provided), in which + * #) verifies the passed buffer; + * #) calls &vb2_ops->buf_finish callback in the driver (if provided), in which * driver can perform any additional operations that may be required before - * returning the buffer to userspace, such as cache sync, + * returning the buffer to userspace, such as cache sync; * #) the buffer struct members are filled with relevant information for * the userspace. * * The return values from this function are intended to be directly returned - * from VIDIOC_DQBUF() handler in driver. + * from &v4l2_ioctl_ops->vidioc_dqbuf handler in driver. */ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); /** * vb2_streamon - start streaming - * @q: videobuf2 queue - * @type: type argument passed from userspace to vidioc_streamon handler + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @type: type argument passed from userspace to vidioc_streamon handler, + * as defined by &enum v4l2_buf_type. * - * Should be called from vidioc_streamon handler of a driver. + * Should be called from &v4l2_ioctl_ops->vidioc_streamon handler of a driver. * * This function: * @@ -161,13 +167,13 @@ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); * 2) passes any previously queued buffers to the driver and starts streaming * * The return values from this function are intended to be directly returned - * from vidioc_streamon handler in the driver. + * from &v4l2_ioctl_ops->vidioc_streamon handler in the driver. */ int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); /** * vb2_streamoff - stop streaming - * @q: videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. * @type: type argument passed from userspace to vidioc_streamoff handler * * Should be called from vidioc_streamoff handler of a driver. @@ -186,7 +192,7 @@ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); /** * vb2_queue_init() - initialize a videobuf2 queue - * @q: videobuf2 queue; this structure should be allocated in driver + * @q: pointer to &struct vb2_queue with videobuf2 queue. * * The vb2_queue structure should be allocated by the driver. The driver is * responsible of clearing it's content and setting initial values for some @@ -199,7 +205,7 @@ int __must_check vb2_queue_init(struct vb2_queue *q); /** * vb2_queue_release() - stop streaming, release the queue and free memory - * @q: videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. * * This function stops streaming and performs necessary clean ups, including * freeing video buffer memory. The driver is responsible for freeing @@ -209,7 +215,7 @@ void vb2_queue_release(struct vb2_queue *q); /** * vb2_poll() - implements poll userspace operation - * @q: videobuf2 queue + * @q: pointer to &struct vb2_queue with videobuf2 queue. * @file: file argument passed to the poll file operation handler * @wait: wait argument passed to the poll file operation handler * @@ -226,8 +232,7 @@ void vb2_queue_release(struct vb2_queue *q); * The return values from this function are intended to be directly returned * from poll handler in driver. */ -unsigned int vb2_poll(struct vb2_queue *q, struct file *file, - poll_table *wait); +__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); /* * The following functions are not part of the vb2 core API, but are simple @@ -262,7 +267,7 @@ ssize_t vb2_fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos); ssize_t vb2_fop_read(struct file *file, char __user *buf, size_t count, loff_t *ppos); -unsigned int vb2_fop_poll(struct file *file, poll_table *wait); +__poll_t vb2_fop_poll(struct file *file, poll_table *wait); #ifndef CONFIG_MMU unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); @@ -271,7 +276,7 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, /** * vb2_ops_wait_prepare - helper function to lock a struct &vb2_queue * - * @vq: pointer to struct vb2_queue + * @vq: pointer to &struct vb2_queue * * ..note:: only use if vq->lock is non-NULL. */ @@ -280,7 +285,7 @@ void vb2_ops_wait_prepare(struct vb2_queue *vq); /** * vb2_ops_wait_finish - helper function to unlock a struct &vb2_queue * - * @vq: pointer to struct vb2_queue + * @vq: pointer to &struct vb2_queue * * ..note:: only use if vq->lock is non-NULL. */ diff --git a/include/misc/cxl.h b/include/misc/cxl.h index 480d50a0b8ba..b712be544f8c 100644 --- a/include/misc/cxl.h +++ b/include/misc/cxl.h @@ -267,7 +267,7 @@ int cxl_fd_open(struct inode *inode, struct file *file); int cxl_fd_release(struct inode *inode, struct file *file); long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm); -unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll); +__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll); ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count, loff_t *off); diff --git a/include/misc/ocxl-config.h b/include/misc/ocxl-config.h new file mode 100644 index 000000000000..3526fa996a22 --- /dev/null +++ b/include/misc/ocxl-config.h @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#ifndef _OCXL_CONFIG_H_ +#define _OCXL_CONFIG_H_ + +/* + * This file lists the various constants used to read the + * configuration space of an opencapi adapter. + * + * It follows the specification for opencapi 3.0 + */ + +#define OCXL_EXT_CAP_ID_DVSEC 0x23 + +#define OCXL_DVSEC_VENDOR_OFFSET 0x4 +#define OCXL_DVSEC_ID_OFFSET 0x8 +#define OCXL_DVSEC_TL_ID 0xF000 +#define OCXL_DVSEC_TL_BACKOFF_TIMERS 0x10 +#define OCXL_DVSEC_TL_RECV_CAP 0x18 +#define OCXL_DVSEC_TL_SEND_CAP 0x20 +#define OCXL_DVSEC_TL_RECV_RATE 0x30 +#define OCXL_DVSEC_TL_SEND_RATE 0x50 +#define OCXL_DVSEC_FUNC_ID 0xF001 +#define OCXL_DVSEC_FUNC_OFF_INDEX 0x08 +#define OCXL_DVSEC_FUNC_OFF_ACTAG 0x0C +#define OCXL_DVSEC_AFU_INFO_ID 0xF003 +#define OCXL_DVSEC_AFU_INFO_AFU_IDX 0x0A +#define OCXL_DVSEC_AFU_INFO_OFF 0x0C +#define OCXL_DVSEC_AFU_INFO_DATA 0x10 +#define OCXL_DVSEC_AFU_CTRL_ID 0xF004 +#define OCXL_DVSEC_AFU_CTRL_AFU_IDX 0x0A +#define OCXL_DVSEC_AFU_CTRL_TERM_PASID 0x0C +#define OCXL_DVSEC_AFU_CTRL_ENABLE 0x0F +#define OCXL_DVSEC_AFU_CTRL_PASID_SUP 0x10 +#define OCXL_DVSEC_AFU_CTRL_PASID_EN 0x11 +#define OCXL_DVSEC_AFU_CTRL_PASID_BASE 0x14 +#define OCXL_DVSEC_AFU_CTRL_ACTAG_SUP 0x18 +#define OCXL_DVSEC_AFU_CTRL_ACTAG_EN 0x1A +#define OCXL_DVSEC_AFU_CTRL_ACTAG_BASE 0x1C +#define OCXL_DVSEC_VENDOR_ID 0xF0F0 +#define OCXL_DVSEC_VENDOR_CFG_VERS 0x0C +#define OCXL_DVSEC_VENDOR_TLX_VERS 0x10 +#define OCXL_DVSEC_VENDOR_DLX_VERS 0x20 + +#endif /* _OCXL_CONFIG_H_ */ diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h new file mode 100644 index 000000000000..51ccf76db293 --- /dev/null +++ b/include/misc/ocxl.h @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#ifndef _MISC_OCXL_H_ +#define _MISC_OCXL_H_ + +#include <linux/pci.h> + +/* + * Opencapi drivers all need some common facilities, like parsing the + * device configuration space, adding a Process Element to the Shared + * Process Area, etc... + * + * The ocxl module provides a kernel API, to allow other drivers to + * reuse common code. A bit like a in-kernel library. + */ + +#define OCXL_AFU_NAME_SZ (24+1) /* add 1 for NULL termination */ + +/* + * The following 2 structures are a fairly generic way of representing + * the configuration data for a function and AFU, as read from the + * configuration space. + */ +struct ocxl_afu_config { + u8 idx; + int dvsec_afu_control_pos; /* offset of AFU control DVSEC */ + char name[OCXL_AFU_NAME_SZ]; + u8 version_major; + u8 version_minor; + u8 afuc_type; + u8 afum_type; + u8 profile; + u8 global_mmio_bar; /* global MMIO area */ + u64 global_mmio_offset; + u32 global_mmio_size; + u8 pp_mmio_bar; /* per-process MMIO area */ + u64 pp_mmio_offset; + u32 pp_mmio_stride; + u8 log_mem_size; + u8 pasid_supported_log; + u16 actag_supported; +}; + +struct ocxl_fn_config { + int dvsec_tl_pos; /* offset of the Transaction Layer DVSEC */ + int dvsec_function_pos; /* offset of the Function DVSEC */ + int dvsec_afu_info_pos; /* offset of the AFU information DVSEC */ + s8 max_pasid_log; + s8 max_afu_index; +}; + +/* + * Read the configuration space of a function and fill in a + * ocxl_fn_config structure with all the function details + */ +extern int ocxl_config_read_function(struct pci_dev *dev, + struct ocxl_fn_config *fn); + +/* + * Check if an AFU index is valid for the given function. + * + * AFU indexes can be sparse, so a driver should check all indexes up + * to the maximum found in the function description + */ +extern int ocxl_config_check_afu_index(struct pci_dev *dev, + struct ocxl_fn_config *fn, int afu_idx); + +/* + * Read the configuration space of a function for the AFU specified by + * the index 'afu_idx'. Fills in a ocxl_afu_config structure + */ +extern int ocxl_config_read_afu(struct pci_dev *dev, + struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu, + u8 afu_idx); + +/* + * Get the max PASID value that can be used by the function + */ +extern int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count); + +/* + * Tell an AFU, by writing in the configuration space, the PASIDs that + * it can use. Range starts at 'pasid_base' and its size is a multiple + * of 2 + * + * 'afu_control_offset' is the offset of the AFU control DVSEC which + * can be found in the function configuration + */ +extern void ocxl_config_set_afu_pasid(struct pci_dev *dev, + int afu_control_offset, + int pasid_base, u32 pasid_count_log); + +/* + * Get the actag configuration for the function: + * 'base' is the first actag value that can be used. + * 'enabled' it the number of actags available, starting from base. + * 'supported' is the total number of actags desired by all the AFUs + * of the function. + */ +extern int ocxl_config_get_actag_info(struct pci_dev *dev, + u16 *base, u16 *enabled, u16 *supported); + +/* + * Tell a function, by writing in the configuration space, the actags + * it can use. + * + * 'func_offset' is the offset of the Function DVSEC that can found in + * the function configuration + */ +extern void ocxl_config_set_actag(struct pci_dev *dev, int func_offset, + u32 actag_base, u32 actag_count); + +/* + * Tell an AFU, by writing in the configuration space, the actags it + * can use. + * + * 'afu_control_offset' is the offset of the AFU control DVSEC for the + * desired AFU. It can be found in the AFU configuration + */ +extern void ocxl_config_set_afu_actag(struct pci_dev *dev, + int afu_control_offset, + int actag_base, int actag_count); + +/* + * Enable/disable an AFU, by writing in the configuration space. + * + * 'afu_control_offset' is the offset of the AFU control DVSEC for the + * desired AFU. It can be found in the AFU configuration + */ +extern void ocxl_config_set_afu_state(struct pci_dev *dev, + int afu_control_offset, int enable); + +/* + * Set the Transaction Layer configuration in the configuration space. + * Only needed for function 0. + * + * It queries the host TL capabilities, find some common ground + * between the host and device, and set the Transaction Layer on both + * accordingly. + */ +extern int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec); + +/* + * Request an AFU to terminate a PASID. + * Will return once the AFU has acked the request, or an error in case + * of timeout. + * + * The hardware can only terminate one PASID at a time, so caller must + * guarantee some kind of serialization. + * + * 'afu_control_offset' is the offset of the AFU control DVSEC for the + * desired AFU. It can be found in the AFU configuration + */ +extern int ocxl_config_terminate_pasid(struct pci_dev *dev, + int afu_control_offset, int pasid); + +/* + * Set up the opencapi link for the function. + * + * When called for the first time for a link, it sets up the Shared + * Process Area for the link and the interrupt handler to process + * translation faults. + * + * Returns a 'link handle' that should be used for further calls for + * the link + */ +extern int ocxl_link_setup(struct pci_dev *dev, int PE_mask, + void **link_handle); + +/* + * Remove the association between the function and its link. + */ +extern void ocxl_link_release(struct pci_dev *dev, void *link_handle); + +/* + * Add a Process Element to the Shared Process Area for a link. + * The process is defined by its PASID, pid, tid and its mm_struct. + * + * 'xsl_err_cb' is an optional callback if the driver wants to be + * notified when the translation fault interrupt handler detects an + * address error. + * 'xsl_err_data' is an argument passed to the above callback, if + * defined + */ +extern int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, + u64 amr, struct mm_struct *mm, + void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), + void *xsl_err_data); + +/* + * Remove a Process Element from the Shared Process Area for a link + */ +extern int ocxl_link_remove_pe(void *link_handle, int pasid); + +/* + * Allocate an AFU interrupt associated to the link. + * + * 'hw_irq' is the hardware interrupt number + * 'obj_handle' is the 64-bit object handle to be passed to the AFU to + * trigger the interrupt. + * On P9, 'obj_handle' is an address, which, if written, triggers the + * interrupt. It is an MMIO address which needs to be remapped (one + * page). + */ +extern int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, + u64 *obj_handle); + +/* + * Free a previously allocated AFU interrupt + */ +extern void ocxl_link_free_irq(void *link_handle, int hw_irq); + +#endif /* _MISC_OCXL_H_ */ diff --git a/include/net/act_api.h b/include/net/act_api.h index fd08df74c466..6ed9692f20bd 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -86,7 +86,7 @@ struct tc_action_ops { int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); int (*dump)(struct sk_buff *, struct tc_action *, int, int); - void (*cleanup)(struct tc_action *, int bind); + void (*cleanup)(struct tc_action *); int (*lookup)(struct net *, struct tc_action **, u32); int (*init)(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **act, int ovr, @@ -120,12 +120,19 @@ int tc_action_net_init(struct tc_action_net *tn, void tcf_idrinfo_destroy(const struct tc_action_ops *ops, struct tcf_idrinfo *idrinfo); -static inline void tc_action_net_exit(struct tc_action_net *tn) +static inline void tc_action_net_exit(struct list_head *net_list, + unsigned int id) { + struct net *net; + rtnl_lock(); - tcf_idrinfo_destroy(tn->ops, tn->idrinfo); + list_for_each_entry(net, net_list, exit_list) { + struct tc_action_net *tn = net_generic(net, id); + + tcf_idrinfo_destroy(tn->ops, tn->idrinfo); + kfree(tn->idrinfo); + } rtnl_unlock(); - kfree(tn->idrinfo); } int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, diff --git a/include/net/addrconf.h b/include/net/addrconf.h index b623b65a79d1..c4185a7b0e90 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -180,7 +180,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout) */ int ipv6_addr_label_init(void); void ipv6_addr_label_cleanup(void); -void ipv6_addr_label_rtnl_register(void); +int ipv6_addr_label_rtnl_register(void); u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr, int type, int ifindex); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index e89cff0c4c23..ec9d6bc65855 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags); -uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); +__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); int bt_sock_wait_ready(struct sock *sk, unsigned long flags); diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h index fe328c52c46b..801489bb14c3 100644 --- a/include/net/caif/cfpkt.h +++ b/include/net/caif/cfpkt.h @@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt); */ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len); +static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt) +{ + u8 tmp; + + cfpkt_extr_head(pkt, &tmp, 1); + + return tmp; +} + +static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt) +{ + __le16 tmp; + + cfpkt_extr_head(pkt, &tmp, 2); + + return le16_to_cpu(tmp); +} + +static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt) +{ + __le32 tmp; + + cfpkt_extr_head(pkt, &tmp, 4); + + return le32_to_cpu(tmp); +} + /* * Peek header from packet. * Reads data from packet without changing packet. diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index fb94a8bd8ab5..81174f9b8d14 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1775,6 +1775,8 @@ enum cfg80211_signal_type { * by %parent_bssid. * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to * the BSS that requested the scan in which the beacon/probe was received. + * @chains: bitmask for filled values in @chain_signal. + * @chain_signal: per-chain signal strength of last received BSS in dBm. */ struct cfg80211_inform_bss { struct ieee80211_channel *chan; @@ -1783,6 +1785,8 @@ struct cfg80211_inform_bss { u64 boottime_ns; u64 parent_tsf; u8 parent_bssid[ETH_ALEN] __aligned(2); + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; }; /** @@ -1826,6 +1830,8 @@ struct cfg80211_bss_ies { * that holds the beacon data. @beacon_ies is still valid, of course, and * points to the same data as hidden_beacon_bss->beacon_ies in that case. * @signal: signal strength value (type depends on the wiphy's signal_type) + * @chains: bitmask for filled values in @chain_signal. + * @chain_signal: per-chain signal strength of last received BSS in dBm. * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes */ struct cfg80211_bss { @@ -1844,6 +1850,8 @@ struct cfg80211_bss { u16 capability; u8 bssid[ETH_ALEN]; + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 priv[0] __aligned(sizeof(void *)); }; @@ -2023,6 +2031,9 @@ struct cfg80211_disassoc_request { * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. + * @wep_keys: static WEP keys, if not NULL points to an array of + * CFG80211_MAX_WEP_KEYS WEP keys + * @wep_tx_key: key index (0..3) of the default TX static WEP key */ struct cfg80211_ibss_params { const u8 *ssid; @@ -2039,6 +2050,8 @@ struct cfg80211_ibss_params { int mcast_rate[NUM_NL80211_BANDS]; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; + struct key_params *wep_keys; + int wep_tx_key; }; /** @@ -5577,7 +5590,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * cfg80211_rx_mgmt - notification of received, unprocessed management frame * @wdev: wireless device receiving the frame * @freq: Frequency on which the frame was received in MHz - * @sig_dbm: signal strength in mBm, or 0 if unknown + * @sig_dbm: signal strength in dBm, or 0 if unknown * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in enum nl80211_rxmgmt_flags @@ -5756,7 +5769,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, * @frame: the frame * @len: length of the frame * @freq: frequency the frame was received on - * @sig_dbm: signal strength in mBm, or 0 if unknown + * @sig_dbm: signal strength in dBm, or 0 if unknown * * Use this function to report to userspace when a beacon was * received. It is not useful to call this when there is no diff --git a/include/net/devlink.h b/include/net/devlink.h index b9654e133599..6545b03e97f7 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -26,10 +26,12 @@ struct devlink { struct list_head port_list; struct list_head sb_list; struct list_head dpipe_table_list; + struct list_head resource_list; struct devlink_dpipe_headers *dpipe_headers; const struct devlink_ops *ops; struct device *dev; possible_net_t _net; + struct mutex lock; char priv[0] __aligned(NETDEV_ALIGN); }; @@ -181,6 +183,9 @@ struct devlink_dpipe_table_ops; * @counters_enabled: indicates if counters are active * @counter_control_extern: indicates if counter control is in dpipe or * external tool + * @resource_valid: Indicate that the resource id is valid + * @resource_id: relative resource this table is related to + * @resource_units: number of resource's unit consumed per table's entry * @table_ops: table operations * @rcu: rcu */ @@ -190,6 +195,9 @@ struct devlink_dpipe_table { const char *name; bool counters_enabled; bool counter_control_extern; + bool resource_valid; + u64 resource_id; + u64 resource_units; struct devlink_dpipe_table_ops *table_ops; struct rcu_head rcu; }; @@ -223,7 +231,63 @@ struct devlink_dpipe_headers { unsigned int headers_count; }; +/** + * struct devlink_resource_ops - resource ops + * @occ_get: get the occupied size + * @size_validate: validate the size of the resource before update, reload + * is needed for changes to take place + */ +struct devlink_resource_ops { + u64 (*occ_get)(struct devlink *devlink); + int (*size_validate)(struct devlink *devlink, u64 size, + struct netlink_ext_ack *extack); +}; + +/** + * struct devlink_resource_size_params - resource's size parameters + * @size_min: minimum size which can be set + * @size_max: maximum size which can be set + * @size_granularity: size granularity + * @size_unit: resource's basic unit + */ +struct devlink_resource_size_params { + u64 size_min; + u64 size_max; + u64 size_granularity; + enum devlink_resource_unit unit; +}; + +/** + * struct devlink_resource - devlink resource + * @name: name of the resource + * @id: id, per devlink instance + * @size: size of the resource + * @size_new: updated size of the resource, reload is needed + * @size_valid: valid in case the total size of the resource is valid + * including its children + * @parent: parent resource + * @size_params: size parameters + * @list: parent list + * @resource_list: list of child resources + * @resource_ops: resource ops + */ +struct devlink_resource { + const char *name; + u64 id; + u64 size; + u64 size_new; + bool size_valid; + struct devlink_resource *parent; + struct devlink_resource_size_params *size_params; + struct list_head list; + struct list_head resource_list; + const struct devlink_resource_ops *resource_ops; +}; + +#define DEVLINK_RESOURCE_ID_PARENT_TOP 0 + struct devlink_ops { + int (*reload)(struct devlink *devlink); int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); int (*port_split)(struct devlink *devlink, unsigned int port_index, @@ -332,6 +396,23 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet; extern struct devlink_dpipe_header devlink_dpipe_header_ipv4; extern struct devlink_dpipe_header devlink_dpipe_header_ipv6; +int devlink_resource_register(struct devlink *devlink, + const char *resource_name, + bool top_hierarchy, + u64 resource_size, + u64 resource_id, + u64 parent_resource_id, + struct devlink_resource_size_params *size_params, + const struct devlink_resource_ops *resource_ops); +void devlink_resources_unregister(struct devlink *devlink, + struct devlink_resource *resource); +int devlink_resource_size_get(struct devlink *devlink, + u64 resource_id, + u64 *p_resource_size); +int devlink_dpipe_table_resource_set(struct devlink *devlink, + const char *table_name, u64 resource_id, + u64 resource_units); + #else static inline struct devlink *devlink_alloc(const struct devlink_ops *ops, @@ -468,6 +549,40 @@ devlink_dpipe_match_put(struct sk_buff *skb, return 0; } +static inline int +devlink_resource_register(struct devlink *devlink, + const char *resource_name, + bool top_hierarchy, + u64 resource_size, + u64 resource_id, + u64 parent_resource_id, + struct devlink_resource_size_params *size_params, + const struct devlink_resource_ops *resource_ops) +{ + return 0; +} + +static inline void +devlink_resources_unregister(struct devlink *devlink, + struct devlink_resource *resource) +{ +} + +static inline int +devlink_resource_size_get(struct devlink *devlink, u64 resource_id, + u64 *p_resource_size) +{ + return -EOPNOTSUPP; +} + +static inline int +devlink_dpipe_table_resource_set(struct devlink *devlink, + const char *table_name, u64 resource_id, + u64 resource_units) +{ + return -EOPNOTSUPP; +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/include/net/dn_route.h b/include/net/dn_route.h index 55df9939bca2..342d2503cba5 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -69,6 +69,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, */ struct dn_route { struct dst_entry dst; + struct dn_route __rcu *dn_next; struct neighbour *n; diff --git a/include/net/dsa.h b/include/net/dsa.h index 2a05738570d8..6cb602dd970c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -296,31 +296,39 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds) return mask; } -static inline u8 dsa_upstream_port(struct dsa_switch *ds) +/* Return the local port used to reach an arbitrary switch port */ +static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device, + int port) { - struct dsa_switch_tree *dst = ds->dst; - - /* - * If this is the root switch (i.e. the switch that connects - * to the CPU), return the cpu port number on this switch. - * Else return the (DSA) port number that connects to the - * switch that is one hop closer to the cpu. - */ - if (dst->cpu_dp->ds == ds) - return dst->cpu_dp->index; + if (device == ds->index) + return port; else - return ds->rtable[dst->cpu_dp->ds->index]; + return ds->rtable[device]; +} + +/* Return the local port used to reach the dedicated CPU port */ +static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port) +{ + const struct dsa_port *dp = dsa_to_port(ds, port); + const struct dsa_port *cpu_dp = dp->cpu_dp; + + if (!cpu_dp) + return port; + + return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index); } typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid, bool is_static, void *data); struct dsa_switch_ops { +#if IS_ENABLED(CONFIG_NET_DSA_LEGACY) /* * Legacy probing. */ const char *(*probe)(struct device *dsa_dev, struct device *host_dev, int sw_addr, void **priv); +#endif enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds, int port); @@ -412,12 +420,10 @@ struct dsa_switch_ops { */ int (*port_vlan_filtering)(struct dsa_switch *ds, int port, bool vlan_filtering); - int (*port_vlan_prepare)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans); - void (*port_vlan_add)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans); + int (*port_vlan_prepare)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); + void (*port_vlan_add)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); int (*port_vlan_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); /* @@ -433,12 +439,10 @@ struct dsa_switch_ops { /* * Multicast database */ - int (*port_mdb_prepare)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans); - void (*port_mdb_add)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans); + int (*port_mdb_prepare)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb); + void (*port_mdb_add)(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb); int (*port_mdb_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb); /* @@ -472,11 +476,20 @@ struct dsa_switch_driver { const struct dsa_switch_ops *ops; }; +#if IS_ENABLED(CONFIG_NET_DSA_LEGACY) /* Legacy driver registration */ void register_switch_driver(struct dsa_switch_driver *type); void unregister_switch_driver(struct dsa_switch_driver *type); struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); +#else +static inline void register_switch_driver(struct dsa_switch_driver *type) { } +static inline void unregister_switch_driver(struct dsa_switch_driver *type) { } +static inline struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev) +{ + return NULL; +} +#endif struct net_device *dsa_dev_to_net_device(struct device *dev); /* Keep inline for faster access in hot path */ diff --git a/include/net/dst.h b/include/net/dst.h index d49d607dd2b3..c63d2c37f6e9 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -34,13 +34,9 @@ struct sk_buff; struct dst_entry { struct net_device *dev; - struct rcu_head rcu_head; - struct dst_entry *child; struct dst_ops *ops; unsigned long _metrics; unsigned long expires; - struct dst_entry *path; - struct dst_entry *from; #ifdef CONFIG_XFRM struct xfrm_state *xfrm; #else @@ -59,8 +55,6 @@ struct dst_entry { #define DST_XFRM_QUEUE 0x0040 #define DST_METADATA 0x0080 - short error; - /* A non-zero value of dst->obsolete forces by-hand validation * of the route entry. Positive values are set by the generic * dst layer to indicate that the entry has been forcefully @@ -76,35 +70,24 @@ struct dst_entry { #define DST_OBSOLETE_KILL -2 unsigned short header_len; /* more space at head required */ unsigned short trailer_len; /* space to reserve at tail */ - unsigned short __pad3; -#ifdef CONFIG_IP_ROUTE_CLASSID - __u32 tclassid; -#else - __u32 __pad2; -#endif - -#ifdef CONFIG_64BIT - /* - * Align __refcnt to a 64 bytes alignment - * (L1_CACHE_SIZE would be too much) - */ - long __pad_to_align_refcnt[2]; -#endif /* * __refcnt wants to be on a different cache line from * input/output/ops or performance tanks badly */ - atomic_t __refcnt; /* client references */ +#ifdef CONFIG_64BIT + atomic_t __refcnt; /* 64-bit offset 64 */ +#endif int __use; unsigned long lastuse; struct lwtunnel_state *lwtstate; - union { - struct dst_entry *next; - struct rtable __rcu *rt_next; - struct rt6_info __rcu *rt6_next; - struct dn_route __rcu *dn_next; - }; + struct rcu_head rcu_head; + short error; + short __pad; + __u32 tclassid; +#ifndef CONFIG_64BIT + atomic_t __refcnt; /* 32-bit offset 64 */ +#endif }; struct dst_metrics { @@ -250,7 +233,7 @@ static inline void dst_hold(struct dst_entry *dst) { /* * If your kernel compilation stops here, please check - * __pad_to_align_refcnt declaration in struct dst_entry + * the placement of __refcnt in struct dst_entry */ BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0); diff --git a/include/net/erspan.h b/include/net/erspan.h index ca94fc86865e..5daa4866412b 100644 --- a/include/net/erspan.h +++ b/include/net/erspan.h @@ -15,7 +15,7 @@ * s, Recur, Flags, Version fields only S (bit 03) is set to 1. The * other fields are set to zero, so only a sequence number follows. * - * ERSPAN Type II header (8 octets [42:49]) + * ERSPAN Version 1 (Type II) header (8 octets [42:49]) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -24,11 +24,31 @@ * | Reserved | Index | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * + * + * ERSPAN Version 2 (Type III) header (12 octets [42:49]) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Ver | VLAN | COS |BSO|T| Session ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Timestamp | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | SGT |P| FT | Hw ID |D|Gra|O| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Platform Specific SubHeader (8 octets, optional) + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Platf ID | Platform Specific Info | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Platform Specific Info | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB */ -#define ERSPAN_VERSION 0x1 +#include <uapi/linux/erspan.h> +#define ERSPAN_VERSION 0x1 /* ERSPAN type II */ #define VER_MASK 0xf000 #define VLAN_MASK 0x0fff #define COS_MASK 0xe000 @@ -37,6 +57,19 @@ #define ID_MASK 0x03ff #define INDEX_MASK 0xfffff +#define ERSPAN_VERSION2 0x2 /* ERSPAN type III*/ +#define BSO_MASK EN_MASK +#define SGT_MASK 0xffff0000 +#define P_MASK 0x8000 +#define FT_MASK 0x7c00 +#define HWID_MASK 0x03f0 +#define DIR_MASK 0x0008 +#define GRA_MASK 0x0006 +#define O_MASK 0x0001 + +#define HWID_OFFSET 4 +#define DIR_OFFSET 3 + enum erspan_encap_type { ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */ ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */ @@ -44,18 +77,199 @@ enum erspan_encap_type { ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */ }; -struct erspan_metadata { - __be32 index; /* type II */ -}; +#define ERSPAN_V1_MDSIZE 4 +#define ERSPAN_V2_MDSIZE 8 -struct erspanhdr { - __be16 ver_vlan; -#define VER_OFFSET 12 - __be16 session_id; -#define COS_OFFSET 13 -#define EN_OFFSET 11 -#define T_OFFSET 10 - struct erspan_metadata md; +struct erspan_base_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 vlan_upper:4, + ver:4; + __u8 vlan:8; + __u8 session_id_upper:2, + t:1, + en:2, + cos:3; + __u8 session_id:8; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 ver: 4, + vlan_upper:4; + __u8 vlan:8; + __u8 cos:3, + en:2, + t:1, + session_id_upper:2; + __u8 session_id:8; +#else +#error "Please fix <asm/byteorder.h>" +#endif }; +static inline void set_session_id(struct erspan_base_hdr *ershdr, u16 id) +{ + ershdr->session_id = id & 0xff; + ershdr->session_id_upper = (id >> 8) & 0x3; +} + +static inline u16 get_session_id(const struct erspan_base_hdr *ershdr) +{ + return (ershdr->session_id_upper << 8) + ershdr->session_id; +} + +static inline void set_vlan(struct erspan_base_hdr *ershdr, u16 vlan) +{ + ershdr->vlan = vlan & 0xff; + ershdr->vlan_upper = (vlan >> 8) & 0xf; +} + +static inline u16 get_vlan(const struct erspan_base_hdr *ershdr) +{ + return (ershdr->vlan_upper << 8) + ershdr->vlan; +} + +static inline void set_hwid(struct erspan_md2 *md2, u8 hwid) +{ + md2->hwid = hwid & 0xf; + md2->hwid_upper = (hwid >> 4) & 0x3; +} + +static inline u8 get_hwid(const struct erspan_md2 *md2) +{ + return (md2->hwid_upper << 4) + md2->hwid; +} + +static inline int erspan_hdr_len(int version) +{ + return sizeof(struct erspan_base_hdr) + + (version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE); +} + +static inline u8 tos_to_cos(u8 tos) +{ + u8 dscp, cos; + + dscp = tos >> 2; + cos = dscp >> 3; + return cos; +} + +static inline void erspan_build_header(struct sk_buff *skb, + u32 id, u32 index, + bool truncate, bool is_ipv4) +{ + struct ethhdr *eth = (struct ethhdr *)skb->data; + enum erspan_encap_type enc_type; + struct erspan_base_hdr *ershdr; + struct erspan_metadata *ersmd; + struct qtag_prefix { + __be16 eth_type; + __be16 tci; + } *qp; + u16 vlan_tci = 0; + u8 tos; + + tos = is_ipv4 ? ip_hdr(skb)->tos : + (ipv6_hdr(skb)->priority << 4) + + (ipv6_hdr(skb)->flow_lbl[0] >> 4); + + enc_type = ERSPAN_ENCAP_NOVLAN; + + /* If mirrored packet has vlan tag, extract tci and + * perserve vlan header in the mirrored frame. + */ + if (eth->h_proto == htons(ETH_P_8021Q)) { + qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN); + vlan_tci = ntohs(qp->tci); + enc_type = ERSPAN_ENCAP_INFRAME; + } + + skb_push(skb, sizeof(*ershdr) + ERSPAN_V1_MDSIZE); + ershdr = (struct erspan_base_hdr *)skb->data; + memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V1_MDSIZE); + + /* Build base header */ + ershdr->ver = ERSPAN_VERSION; + ershdr->cos = tos_to_cos(tos); + ershdr->en = enc_type; + ershdr->t = truncate; + set_vlan(ershdr, vlan_tci); + set_session_id(ershdr, id); + + /* Build metadata */ + ersmd = (struct erspan_metadata *)(ershdr + 1); + ersmd->u.index = htonl(index & INDEX_MASK); +} + +/* ERSPAN GRA: timestamp granularity + * 00b --> granularity = 100 microseconds + * 01b --> granularity = 100 nanoseconds + * 10b --> granularity = IEEE 1588 + * Here we only support 100 microseconds. + */ +static inline __be32 erspan_get_timestamp(void) +{ + u64 h_usecs; + ktime_t kt; + + kt = ktime_get_real(); + h_usecs = ktime_divns(kt, 100 * NSEC_PER_USEC); + + /* ERSPAN base header only has 32-bit, + * so it wraps around 4 days. + */ + return htonl((u32)h_usecs); +} + +static inline void erspan_build_header_v2(struct sk_buff *skb, + u32 id, u8 direction, u16 hwid, + bool truncate, bool is_ipv4) +{ + struct ethhdr *eth = (struct ethhdr *)skb->data; + struct erspan_base_hdr *ershdr; + struct erspan_metadata *md; + struct qtag_prefix { + __be16 eth_type; + __be16 tci; + } *qp; + u16 vlan_tci = 0; + u8 gra = 0; /* 100 usec */ + u8 bso = 0; /* Bad/Short/Oversized */ + u8 sgt = 0; + u8 tos; + + tos = is_ipv4 ? ip_hdr(skb)->tos : + (ipv6_hdr(skb)->priority << 4) + + (ipv6_hdr(skb)->flow_lbl[0] >> 4); + + /* Unlike v1, v2 does not have En field, + * so only extract vlan tci field. + */ + if (eth->h_proto == htons(ETH_P_8021Q)) { + qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN); + vlan_tci = ntohs(qp->tci); + } + + skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE); + ershdr = (struct erspan_base_hdr *)skb->data; + memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V2_MDSIZE); + + /* Build base header */ + ershdr->ver = ERSPAN_VERSION2; + ershdr->cos = tos_to_cos(tos); + ershdr->en = bso; + ershdr->t = truncate; + set_vlan(ershdr, vlan_tci); + set_session_id(ershdr, id); + + /* Build metadata */ + md = (struct erspan_metadata *)(ershdr + 1); + md->u.md2.timestamp = erspan_get_timestamp(); + md->u.md2.sgt = htons(sgt); + md->u.md2.p = 1; + md->u.md2.ft = 0; + md->u.md2.dir = direction; + md->u.md2.gra = gra; + md->u.md2.o = 0; + set_hwid(&md->u.md2, hwid); +} + #endif diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 304f7aa9cc01..0304ba2ae353 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -49,6 +49,9 @@ int gnet_stats_copy_rate_est(struct gnet_dump *d, int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue __percpu *cpu_q, struct gnet_stats_queue *q, __u32 qlen); +void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, + const struct gnet_stats_queue __percpu *cpu_q, + const struct gnet_stats_queue *q, __u32 qlen); int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); int gnet_stats_finish_copy(struct gnet_dump *d); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 0358745ea059..6692d67e9245 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -77,6 +77,7 @@ struct inet_connection_sock_af_ops { * @icsk_af_ops Operations which are AF_INET{4,6} specific * @icsk_ulp_ops Pluggable ULP control hook * @icsk_ulp_data ULP private data + * @icsk_listen_portaddr_node hash to the portaddr listener hashtable * @icsk_ca_state: Congestion control state * @icsk_retransmits: Number of unrecovered [RTO] timeouts * @icsk_pending: Scheduled timer event @@ -101,6 +102,7 @@ struct inet_connection_sock { const struct inet_connection_sock_af_ops *icsk_af_ops; const struct tcp_ulp_ops *icsk_ulp_ops; void *icsk_ulp_data; + struct hlist_node icsk_listen_portaddr_node; unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); __u8 icsk_ca_state:6, icsk_ca_setsockopt:1, @@ -305,7 +307,7 @@ void inet_csk_prepare_forced_close(struct sock *sk); /* * LISTEN is a special case for poll.. */ -static inline unsigned int inet_csk_listen_poll(const struct sock *sk) +static inline __poll_t inet_csk_listen_poll(const struct sock *sk) { return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (POLLIN | POLLRDNORM) : 0; diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 2dbbbff5e1e3..9141e95529e7 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -111,6 +111,7 @@ struct inet_bind_hashbucket { */ struct inet_listen_hashbucket { spinlock_t lock; + unsigned int count; struct hlist_head head; }; @@ -132,12 +133,13 @@ struct inet_hashinfo { /* Ok, let's try this, I give up, we do need a local binding * TCP hash as well as the others for fast bind/connect. */ + struct kmem_cache *bind_bucket_cachep; struct inet_bind_hashbucket *bhash; - unsigned int bhash_size; - /* 4 bytes hole on 64 bit */ - struct kmem_cache *bind_bucket_cachep; + /* The 2nd listener table hashed by local port and address */ + unsigned int lhash2_mask; + struct inet_listen_hashbucket *lhash2; /* All the above members are written once at bootup and * never written again _or_ are predominantly read-access. @@ -145,14 +147,25 @@ struct inet_hashinfo { * Now align to a new cache line as all the following members * might be often dirty. */ - /* All sockets in TCP_LISTEN state will be in here. This is the only - * table where wildcard'd TCP sockets can exist. Hash function here - * is just local port number. + /* All sockets in TCP_LISTEN state will be in listening_hash. + * This is the only table where wildcard'd TCP sockets can + * exist. listening_hash is only hashed by local port number. + * If lhash2 is initialized, the same socket will also be hashed + * to lhash2 by port and address. */ struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] ____cacheline_aligned_in_smp; }; +#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \ + hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node) + +static inline struct inet_listen_hashbucket * +inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash) +{ + return &h->lhash2[hash & h->lhash2_mask]; +} + static inline struct inet_ehash_bucket *inet_ehash_bucket( struct inet_hashinfo *hashinfo, unsigned int hash) @@ -208,6 +221,10 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child); void inet_put_port(struct sock *sk); void inet_hashinfo_init(struct inet_hashinfo *h); +void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, + unsigned long numentries, int scale, + unsigned long low_limit, + unsigned long high_limit); bool inet_ehash_insert(struct sock *sk, struct sock *osk); bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 39efb968b7a4..0a671c32d6b9 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to, int inet_sk_rebuild_header(struct sock *sk); +/** + * inet_sk_state_load - read sk->sk_state for lockless contexts + * @sk: socket pointer + * + * Paired with inet_sk_state_store(). Used in places we don't hold socket lock: + * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ... + */ +static inline int inet_sk_state_load(const struct sock *sk) +{ + /* state change might impact lockless readers. */ + return smp_load_acquire(&sk->sk_state); +} + +/** + * inet_sk_state_store - update sk->sk_state + * @sk: socket pointer + * @newstate: new state + * + * Paired with inet_sk_state_load(). Should be used in contexts where + * state change might impact lockless readers. + */ +void inet_sk_state_store(struct sock *sk, int newstate); + +void inet_sk_set_state(struct sock *sk, int state); + static inline unsigned int __inet_ehashfn(const __be32 laddr, const __u16 lport, const __be32 faddr, diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 1356fa6a7566..899495589a7e 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -93,8 +93,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, struct inet_timewait_death_row *dr, const int state); -void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, - struct inet_hashinfo *hashinfo); +void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, + struct inet_hashinfo *hashinfo); void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm); diff --git a/include/net/ip.h b/include/net/ip.h index af8addbaa3c1..746abff9ce51 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -26,12 +26,14 @@ #include <linux/ip.h> #include <linux/in.h> #include <linux/skbuff.h> +#include <linux/jhash.h> #include <net/inet_sock.h> #include <net/route.h> #include <net/snmp.h> #include <net/flow.h> #include <net/flow_dissector.h> +#include <net/netns/hash.h> #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ #define IPV4_MIN_MTU 68 /* RFC 791 */ @@ -522,6 +524,13 @@ static inline unsigned int ipv4_addr_hash(__be32 ip) return (__force unsigned int) ip; } +static inline u32 ipv4_portaddr_hash(const struct net *net, + __be32 saddr, + unsigned int port) +{ + return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; +} + bool ip_call_ra_chain(struct sk_buff *skb); /* diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 10c913816032..34ec321d6a03 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -129,6 +129,8 @@ struct rt6_exception { struct rt6_info { struct dst_entry dst; + struct rt6_info __rcu *rt6_next; + struct rt6_info *from; /* * Tail elements of dst_entry (__refcnt etc.) @@ -147,6 +149,7 @@ struct rt6_info { */ struct list_head rt6i_siblings; unsigned int rt6i_nsiblings; + atomic_t rt6i_nh_upper_bound; atomic_t rt6i_ref; @@ -168,19 +171,21 @@ struct rt6_info { u32 rt6i_metric; u32 rt6i_pmtu; /* more non-fragment space at head required */ + int rt6i_nh_weight; unsigned short rt6i_nfheader_len; u8 rt6i_protocol; u8 exception_bucket_flushed:1, - unused:7; + should_flush:1, + unused:6; }; #define for_each_fib6_node_rt_rcu(fn) \ for (rt = rcu_dereference((fn)->leaf); rt; \ - rt = rcu_dereference(rt->dst.rt6_next)) + rt = rcu_dereference(rt->rt6_next)) #define for_each_fib6_walker_rt(w) \ for (rt = (w)->leaf; rt; \ - rt = rcu_dereference_protected(rt->dst.rt6_next, 1)) + rt = rcu_dereference_protected(rt->rt6_next, 1)) static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) { @@ -203,11 +208,9 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) { struct rt6_info *rt; - for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); - rt = (struct rt6_info *)rt->dst.from); + for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from); if (rt && rt != rt0) rt0->dst.expires = rt->dst.expires; - dst_set_expires(&rt0->dst, timeout); rt0->rt6i_flags |= RTF_EXPIRES; } @@ -242,8 +245,8 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt) u32 cookie = 0; if (rt->rt6i_flags & RTF_PCPU || - (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from)) - rt = (struct rt6_info *)(rt->dst.from); + (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from)) + rt = rt->from; rt6_get_cookie_safe(rt, &cookie); @@ -404,6 +407,7 @@ unsigned int fib6_tables_seq_read(struct net *net); int fib6_tables_dump(struct net *net, struct notifier_block *nb); void fib6_update_sernum(struct rt6_info *rt); +void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt); #ifdef CONFIG_IPV6_MULTIPLE_TABLES int fib6_rules_init(void); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 18e442ea93d8..27d23a65f3cd 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } +static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt) +{ + return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == + RTF_GATEWAY; +} + void ip6_route_input(struct sk_buff *skb); struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, @@ -165,10 +171,13 @@ struct rt6_rtnl_dump_arg { }; int rt6_dump_route(struct rt6_info *rt, void *p_arg); -void rt6_ifdown(struct net *net, struct net_device *dev); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); +void rt6_sync_up(struct net_device *dev, unsigned int nh_flags); +void rt6_disable_ip(struct net_device *dev, unsigned long event); +void rt6_sync_down_dev(struct net_device *dev, unsigned long event); +void rt6_multipath_rebalance(struct rt6_info *rt); static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) { diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index d66f70f63734..236e40ba06bf 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -36,6 +36,10 @@ struct __ip6_tnl_parm { __be32 o_key; __u32 fwmark; + __u32 index; /* ERSPAN type II index */ + __u8 erspan_ver; /* ERSPAN version */ + __u8 dir; /* direction */ + __u16 hwid; /* hwid */ }; /* IPv6 tunnel */ diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 24628f6b09bf..1f16773cfd76 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -116,8 +116,11 @@ struct ip_tunnel { u32 o_seqno; /* The last output seqno */ int tun_hlen; /* Precalculated header length */ - /* This field used only by ERSPAN */ + /* These four fields used only by ERSPAN */ u32 index; /* ERSPAN type II index */ + u8 erspan_ver; /* ERSPAN version */ + u8 dir; /* ERSPAN direction */ + u16 hwid; /* ERSPAN hardware ID */ struct dst_cache dst_cache; diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index ff68cf288f9b..eb0bec043c96 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -69,8 +69,7 @@ struct ip_vs_iphdr { }; static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, - int len, void *buffer, - const struct ip_vs_iphdr *ipvsh) + int len, void *buffer) { return skb_header_pointer(skb, offset, len, buffer); } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 221238254eb7..8606c9113d3f 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -22,6 +22,7 @@ #include <net/flow.h> #include <net/flow_dissector.h> #include <net/snmp.h> +#include <net/netns/hash.h> #define SIN6_LEN_RFC2133 24 @@ -674,6 +675,22 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) cpu_to_be32(0x0000ffff))) == 0UL; } +static inline u32 ipv6_portaddr_hash(const struct net *net, + const struct in6_addr *addr6, + unsigned int port) +{ + unsigned int hash, mix = net_hash_mix(net); + + if (ipv6_addr_any(addr6)) + hash = jhash_1word(0, mix); + else if (ipv6_addr_v4mapped(addr6)) + hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); + else + hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); + + return hash ^ port; +} + /* * Check for a RFC 4843 ORCHID address * (Overlay Routable Cryptographic Hash Identifiers) @@ -953,6 +970,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) &inet6_sk(sk)->cork); } +unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst); + int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index 070e93a17c59..f4c21b5a1242 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -153,7 +153,7 @@ struct iucv_sock_list { atomic_t autobind_name; }; -unsigned int iucv_sock_poll(struct file *file, struct socket *sock, +__poll_t iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait); void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index eec143cca1c0..906e90223066 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1552,6 +1552,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the * driver for a key to indicate that sufficient tailroom must always * be reserved for ICV or MIC, even when HW encryption is enabled. + * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for + * a TKIP key if it only requires MIC space. Do not set together with + * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key. */ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0), @@ -1562,6 +1565,7 @@ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5), IEEE80211_KEY_FLAG_RX_MGMT = BIT(6), IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7), + IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8), }; /** @@ -1593,8 +1597,8 @@ struct ieee80211_key_conf { u8 icv_len; u8 iv_len; u8 hw_key_idx; - u8 flags; s8 keyidx; + u16 flags; u8 keylen; u8 key[0]; }; @@ -2056,6 +2060,9 @@ struct ieee80211_txq { * The stack will not do fragmentation. * The callback for @set_frag_threshold should be set as well. * + * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on + * TDLS links. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2098,6 +2105,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_TX_FRAG_LIST, IEEE80211_HW_REPORTS_LOW_ACK, IEEE80211_HW_SUPPORTS_TX_FRAG, + IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 049008493faf..f306b2aa15a4 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -51,7 +51,7 @@ struct net { refcount_t passive; /* To decided when the network * namespace should be freed. */ - atomic_t count; /* To decided when the network + refcount_t count; /* To decided when the network * namespace should be shut down. */ spinlock_t rules_mod_lock; @@ -195,7 +195,7 @@ void __put_net(struct net *net); static inline struct net *get_net(struct net *net) { - atomic_inc(&net->count); + refcount_inc(&net->count); return net; } @@ -206,14 +206,14 @@ static inline struct net *maybe_get_net(struct net *net) * exists. If the reference count is zero this * function fails and returns NULL. */ - if (!atomic_inc_not_zero(&net->count)) + if (!refcount_inc_not_zero(&net->count)) net = NULL; return net; } static inline void put_net(struct net *net) { - if (atomic_dec_and_test(&net->count)) + if (refcount_dec_and_test(&net->count)) __put_net(net); } @@ -225,7 +225,7 @@ int net_eq(const struct net *net1, const struct net *net2) static inline int check_net(const struct net *net) { - return atomic_read(&net->count) != 0; + return refcount_read(&net->count) != 0; } void net_drop_ns(void *); diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index 4ed1040bbe4a..73f825732326 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -13,17 +13,17 @@ const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; #ifdef CONFIG_NF_CT_PROTO_DCCP -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; #endif #ifdef CONFIG_NF_CT_PROTO_SCTP -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4; #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; #endif int nf_conntrack_ipv4_compat_init(void); diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index 9cd55be95853..effa8dfba68c 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -4,17 +4,17 @@ extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; #ifdef CONFIG_NF_CT_PROTO_DCCP -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; #endif #ifdef CONFIG_NF_CT_PROTO_SCTP -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6; #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; #endif #include <linux/sysctl.h> diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h new file mode 100644 index 000000000000..adf8db44cf86 --- /dev/null +++ b/include/net/netfilter/nf_conntrack_count.h @@ -0,0 +1,17 @@ +#ifndef _NF_CONNTRACK_COUNT_H +#define _NF_CONNTRACK_COUNT_H + +struct nf_conncount_data; + +struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, + unsigned int keylen); +void nf_conncount_destroy(struct net *net, unsigned int family, + struct nf_conncount_data *data); + +unsigned int nf_conncount_count(struct net *net, + struct nf_conncount_data *data, + const u32 *key, + unsigned int family, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone); +#endif diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 7ef56c13698a..a7220eef9aee 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -27,6 +27,9 @@ struct nf_conntrack_l4proto { /* Resolve clashes on insertion races. */ bool allow_clash; + /* protoinfo nlattr size, closes a hole */ + u16 nlattr_size; + /* Try to fill in the third arg: dataoff is offset past network protocol hdr. Return true if possible. */ bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, @@ -66,8 +69,6 @@ struct nf_conntrack_l4proto { /* convert protoinfo to nfnetink attributes */ int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct); - /* Calculate protoinfo nlattr size */ - int (*nlattr_size)(void); /* convert nfnetlink attributes to protoinfo */ int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); @@ -80,8 +81,6 @@ struct nf_conntrack_l4proto { struct nf_conntrack_tuple *t); const struct nla_policy *nla_policy; - size_t nla_size; - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) struct { int (*nlattr_to_obj)(struct nlattr *tb[], @@ -109,7 +108,7 @@ struct nf_conntrack_l4proto { }; /* Existing built-in generic protocol */ -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; #define MAX_NF_CT_PROTO 256 @@ -126,18 +125,18 @@ int nf_ct_l4proto_pernet_register_one(struct net *net, void nf_ct_l4proto_pernet_unregister_one(struct net *net, const struct nf_conntrack_l4proto *proto); int nf_ct_l4proto_pernet_register(struct net *net, - struct nf_conntrack_l4proto *const proto[], + const struct nf_conntrack_l4proto *const proto[], unsigned int num_proto); void nf_ct_l4proto_pernet_unregister(struct net *net, - struct nf_conntrack_l4proto *const proto[], + const struct nf_conntrack_l4proto *const proto[], unsigned int num_proto); /* Protocol global registration. */ -int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto); +int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto); void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto); -int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[], +int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[], unsigned int num_proto); -void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[], +void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[], unsigned int num_proto); /* Generic netlink helpers */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h new file mode 100644 index 000000000000..b22b22082733 --- /dev/null +++ b/include/net/netfilter/nf_flow_table.h @@ -0,0 +1,122 @@ +#ifndef _NF_FLOW_TABLE_H +#define _NF_FLOW_TABLE_H + +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/netdevice.h> +#include <linux/rhashtable.h> +#include <linux/rcupdate.h> +#include <net/dst.h> + +struct nf_flowtable; + +struct nf_flowtable_type { + struct list_head list; + int family; + void (*gc)(struct work_struct *work); + const struct rhashtable_params *params; + nf_hookfn *hook; + struct module *owner; +}; + +struct nf_flowtable { + struct rhashtable rhashtable; + const struct nf_flowtable_type *type; + struct delayed_work gc_work; +}; + +enum flow_offload_tuple_dir { + FLOW_OFFLOAD_DIR_ORIGINAL, + FLOW_OFFLOAD_DIR_REPLY, + __FLOW_OFFLOAD_DIR_MAX = FLOW_OFFLOAD_DIR_REPLY, +}; +#define FLOW_OFFLOAD_DIR_MAX (__FLOW_OFFLOAD_DIR_MAX + 1) + +struct flow_offload_tuple { + union { + struct in_addr src_v4; + struct in6_addr src_v6; + }; + union { + struct in_addr dst_v4; + struct in6_addr dst_v6; + }; + struct { + __be16 src_port; + __be16 dst_port; + }; + + int iifidx; + + u8 l3proto; + u8 l4proto; + u8 dir; + + int oifidx; + + struct dst_entry *dst_cache; +}; + +struct flow_offload_tuple_rhash { + struct rhash_head node; + struct flow_offload_tuple tuple; +}; + +#define FLOW_OFFLOAD_SNAT 0x1 +#define FLOW_OFFLOAD_DNAT 0x2 +#define FLOW_OFFLOAD_DYING 0x4 + +struct flow_offload { + struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX]; + u32 flags; + union { + /* Your private driver data here. */ + u32 timeout; + }; +}; + +#define NF_FLOW_TIMEOUT (30 * HZ) + +struct nf_flow_route { + struct { + struct dst_entry *dst; + int ifindex; + } tuple[FLOW_OFFLOAD_DIR_MAX]; +}; + +struct flow_offload *flow_offload_alloc(struct nf_conn *ct, + struct nf_flow_route *route); +void flow_offload_free(struct flow_offload *flow); + +int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow); +void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow); +struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table, + struct flow_offload_tuple *tuple); +int nf_flow_table_iterate(struct nf_flowtable *flow_table, + void (*iter)(struct flow_offload *flow, void *data), + void *data); +void nf_flow_offload_work_gc(struct work_struct *work); +extern const struct rhashtable_params nf_flow_offload_rhash_params; + +void flow_offload_dead(struct flow_offload *flow); + +int nf_flow_snat_port(const struct flow_offload *flow, + struct sk_buff *skb, unsigned int thoff, + u8 protocol, enum flow_offload_tuple_dir dir); +int nf_flow_dnat_port(const struct flow_offload *flow, + struct sk_buff *skb, unsigned int thoff, + u8 protocol, enum flow_offload_tuple_dir dir); + +struct flow_ports { + __be16 source, dest; +}; + +unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state); +unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state); + +#define MODULE_ALIAS_NF_FLOWTABLE(family) \ + MODULE_ALIAS("nf-flowtable-" __stringify(family)) + +#endif /* _FLOW_OFFLOAD_H */ diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 814058d0f167..a50a69f5334c 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -25,7 +25,7 @@ struct nf_queue_entry { struct nf_queue_handler { int (*outfn)(struct nf_queue_entry *entry, unsigned int queuenum); - unsigned int (*nf_hook_drop)(struct net *net); + void (*nf_hook_drop)(struct net *net); }; void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index fecc6112c768..663b015dace5 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -9,6 +9,7 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nf_tables.h> #include <linux/u64_stats_sync.h> +#include <net/netfilter/nf_flow_table.h> #include <net/netlink.h> #define NFT_JUMP_STACK_SIZE 16 @@ -54,8 +55,8 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, pkt->xt.state = state; } -static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt, + struct sk_buff *skb) { pkt->tprot_set = false; pkt->tprot = 0; @@ -63,14 +64,6 @@ static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt, pkt->xt.fragoff = 0; } -static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - nft_set_pktinfo(pkt, skb, state); - nft_set_pktinfo_proto_unspec(pkt, skb); -} - /** * struct nft_verdict - nf_tables verdict * @@ -150,22 +143,22 @@ static inline void nft_data_debug(const struct nft_data *data) * struct nft_ctx - nf_tables rule/set context * * @net: net namespace - * @afi: address family info * @table: the table the chain is contained in * @chain: the chain the rule is contained in * @nla: netlink attributes * @portid: netlink portID of the original message * @seq: netlink sequence number + * @family: protocol family * @report: notify via unicast netlink message */ struct nft_ctx { struct net *net; - struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; const struct nlattr * const *nla; u32 portid; u32 seq; + u8 family; bool report; }; @@ -381,6 +374,7 @@ void nft_unregister_set(struct nft_set_type *type); * @list: table set list node * @bindings: list of set bindings * @name: name of the set + * @handle: unique handle of the set * @ktype: key type (numeric type defined by userspace, not used in the kernel) * @dtype: data type (verdict or numeric type defined by userspace) * @objtype: object type (see NFT_OBJECT_* definitions) @@ -403,6 +397,7 @@ struct nft_set { struct list_head list; struct list_head bindings; char *name; + u64 handle; u32 ktype; u32 dtype; u32 objtype; @@ -424,6 +419,11 @@ struct nft_set { __attribute__((aligned(__alignof__(u64)))); }; +static inline bool nft_set_is_anonymous(const struct nft_set *set) +{ + return set->flags & NFT_SET_ANONYMOUS; +} + static inline void *nft_set_priv(const struct nft_set *set) { return (void *)set->data; @@ -883,7 +883,7 @@ enum nft_chain_type { * @family: address family * @owner: module owner * @hook_mask: mask of valid hooks - * @hooks: hookfn overrides + * @hooks: array of hook functions */ struct nf_chain_type { const char *name; @@ -905,8 +905,6 @@ struct nft_stats { struct u64_stats_sync syncp; }; -#define NFT_HOOK_OPS_MAX 2 - /** * struct nft_base_chain - nf_tables base chain * @@ -918,7 +916,7 @@ struct nft_stats { * @dev_name: device name that this base chain is attached to (if any) */ struct nft_base_chain { - struct nf_hook_ops ops[NFT_HOOK_OPS_MAX]; + struct nf_hook_ops ops; const struct nf_chain_type *type; u8 policy; u8 flags; @@ -948,10 +946,13 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); * @chains: chains in the table * @sets: sets in the table * @objects: stateful objects in the table + * @flowtables: flow tables in the table * @hgenerator: handle generator state + * @handle: table handle * @use: number of chain references to this table * @flags: table flag (see enum nft_table_flags) * @genmask: generation mask + * @afinfo: address family info * @name: name of the table */ struct nft_table { @@ -959,46 +960,16 @@ struct nft_table { struct list_head chains; struct list_head sets; struct list_head objects; + struct list_head flowtables; u64 hgenerator; + u64 handle; u32 use; - u16 flags:14, + u16 family:6, + flags:8, genmask:2; char *name; }; -enum nft_af_flags { - NFT_AF_NEEDS_DEV = (1 << 0), -}; - -/** - * struct nft_af_info - nf_tables address family info - * - * @list: used internally - * @family: address family - * @nhooks: number of hooks in this family - * @owner: module owner - * @tables: used internally - * @flags: family flags - * @nops: number of hook ops in this family - * @hook_ops_init: initialization function for chain hook ops - * @hooks: hookfn overrides for packet validation - */ -struct nft_af_info { - struct list_head list; - int family; - unsigned int nhooks; - struct module *owner; - struct list_head tables; - u32 flags; - unsigned int nops; - void (*hook_ops_init)(struct nf_hook_ops *, - unsigned int); - nf_hookfn *hooks[NF_MAX_HOOKS]; -}; - -int nft_register_afinfo(struct net *, struct nft_af_info *); -void nft_unregister_afinfo(struct net *, struct nft_af_info *); - int nft_register_chain_type(const struct nf_chain_type *); void nft_unregister_chain_type(const struct nf_chain_type *); @@ -1016,9 +987,9 @@ int nft_verdict_dump(struct sk_buff *skb, int type, * @name: name of this stateful object * @genmask: generation mask * @use: number of references to this stateful object - * @data: object data, layout depends on type + * @handle: unique object handle * @ops: object operations - * @data: pointer to object data + * @data: object data, layout depends on type */ struct nft_object { struct list_head list; @@ -1026,6 +997,7 @@ struct nft_object { struct nft_table *table; u32 genmask:2, use:30; + u64 handle; /* runtime data below here */ const struct nft_object_ops *ops ____cacheline_aligned; unsigned char data[] @@ -1097,6 +1069,46 @@ int nft_register_obj(struct nft_object_type *obj_type); void nft_unregister_obj(struct nft_object_type *obj_type); /** + * struct nft_flowtable - nf_tables flow table + * + * @list: flow table list node in table list + * @table: the table the flow table is contained in + * @name: name of this flow table + * @hooknum: hook number + * @priority: hook priority + * @ops_len: number of hooks in array + * @genmask: generation mask + * @use: number of references to this flow table + * @handle: unique object handle + * @data: rhashtable and garbage collector + * @ops: array of hooks + */ +struct nft_flowtable { + struct list_head list; + struct nft_table *table; + char *name; + int hooknum; + int priority; + int ops_len; + u32 genmask:2, + use:30; + u64 handle; + /* runtime data below here */ + struct nf_hook_ops *ops ____cacheline_aligned; + struct nf_flowtable data; +}; + +struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table, + const struct nlattr *nla, + u8 genmask); +void nft_flow_table_iterate(struct net *net, + void (*iter)(struct nf_flowtable *flowtable, void *data), + void *data); + +void nft_register_flowtable_type(struct nf_flowtable_type *type); +void nft_unregister_flowtable_type(struct nf_flowtable_type *type); + +/** * struct nft_traceinfo - nft tracing information and state * * @pkt: pktinfo currently processed @@ -1125,12 +1137,6 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, void nft_trace_notify(struct nft_traceinfo *info); -#define nft_dereference(p) \ - nfnl_dereference(p, NFNL_SUBSYS_NFTABLES) - -#define MODULE_ALIAS_NFT_FAMILY(family) \ - MODULE_ALIAS("nft-afinfo-" __stringify(family)) - #define MODULE_ALIAS_NFT_CHAIN(family, name) \ MODULE_ALIAS("nft-chain-" __stringify(family) "-" name) @@ -1332,4 +1338,11 @@ struct nft_trans_obj { #define nft_trans_obj(trans) \ (((struct nft_trans_obj *)trans->data)->obj) +struct nft_trans_flowtable { + struct nft_flowtable *flowtable; +}; + +#define nft_trans_flowtable(trans) \ + (((struct nft_trans_flowtable *)trans->data)->flowtable) + #endif /* _NET_NF_TABLES_H */ diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h index f0896ba456c4..ed7b511f0a59 100644 --- a/include/net/netfilter/nf_tables_ipv4.h +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -5,15 +5,11 @@ #include <net/netfilter/nf_tables.h> #include <net/ip.h> -static inline void -nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, + struct sk_buff *skb) { struct iphdr *ip; - nft_set_pktinfo(pkt, skb, state); - ip = ip_hdr(pkt->skb); pkt->tprot_set = true; pkt->tprot = ip->protocol; @@ -21,10 +17,8 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET; } -static inline int -__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, + struct sk_buff *skb) { struct iphdr *iph, _iph; u32 len, thoff; @@ -52,16 +46,11 @@ __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, return 0; } -static inline void -nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, + struct sk_buff *skb) { - nft_set_pktinfo(pkt, skb, state); - if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0) - nft_set_pktinfo_proto_unspec(pkt, skb); + if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0) + nft_set_pktinfo_unspec(pkt, skb); } -extern struct nft_af_info nft_af_ipv4; - #endif diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index b8065b72f56e..dabe6fdb553a 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -5,20 +5,16 @@ #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/ipv6.h> -static inline void -nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, + struct sk_buff *skb) { unsigned int flags = IP6_FH_F_AUTH; int protohdr, thoff = 0; unsigned short frag_off; - nft_set_pktinfo(pkt, skb, state); - protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); if (protohdr < 0) { - nft_set_pktinfo_proto_unspec(pkt, skb); + nft_set_pktinfo_unspec(pkt, skb); return; } @@ -28,10 +24,8 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, pkt->xt.fragoff = frag_off; } -static inline int -__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, + struct sk_buff *skb) { #if IS_ENABLED(CONFIG_IPV6) unsigned int flags = IP6_FH_F_AUTH; @@ -68,16 +62,11 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, #endif } -static inline void -nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, + struct sk_buff *skb) { - nft_set_pktinfo(pkt, skb, state); - if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0) - nft_set_pktinfo_proto_unspec(pkt, skb); + if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0) + nft_set_pktinfo_unspec(pkt, skb); } -extern struct nft_af_info nft_af_ipv6; - #endif diff --git a/include/net/netns/can.h b/include/net/netns/can.h index ecf238b8862c..ca9bd9fba5b5 100644 --- a/include/net/netns/can.h +++ b/include/net/netns/can.h @@ -8,7 +8,7 @@ #include <linux/spinlock.h> -struct dev_rcv_lists; +struct can_dev_rcv_lists; struct s_stats; struct s_pstats; @@ -28,7 +28,7 @@ struct netns_can { #endif /* receive filters subscribed for 'all' CAN devices */ - struct dev_rcv_lists *can_rx_alldev_list; + struct can_dev_rcv_lists *can_rx_alldev_list; spinlock_t can_rcvlists_lock; struct timer_list can_stattimer;/* timer for statistics update */ struct s_stats *can_stats; /* packet statistics */ diff --git a/include/net/netns/core.h b/include/net/netns/core.h index 0ad4d0c71228..36c2d998a43c 100644 --- a/include/net/netns/core.h +++ b/include/net/netns/core.h @@ -11,7 +11,10 @@ struct netns_core { int sysctl_somaxconn; - struct prot_inuse __percpu *inuse; +#ifdef CONFIG_PROC_FS + int __percpu *sock_inuse; + struct prot_inuse __percpu *prot_inuse; +#endif }; #endif diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index cc00af2ac2d7..ca043342c0eb 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h @@ -17,7 +17,17 @@ struct netns_nf { #ifdef CONFIG_SYSCTL struct ctl_table_header *nf_log_dir_header; #endif - struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; + struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS]; + struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS]; +#ifdef CONFIG_NETFILTER_FAMILY_ARP + struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS]; +#endif +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE + struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS]; +#endif +#if IS_ENABLED(CONFIG_DECNET) + struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS]; +#endif #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) bool defrag_ipv4; #endif diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index 4109b5f3010f..48134353411d 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -7,14 +7,8 @@ struct nft_af_info; struct netns_nftables { - struct list_head af_info; + struct list_head tables; struct list_head commit_list; - struct nft_af_info *ipv4; - struct nft_af_info *ipv6; - struct nft_af_info *inet; - struct nft_af_info *arp; - struct nft_af_info *bridge; - struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; }; diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index ebc813277662..0db7fb3e4e15 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -122,9 +122,12 @@ struct netns_sctp { /* Flag to indicate if PR-CONFIG is enabled. */ int reconf_enable; - /* Flag to idicate if SCTP-AUTH is enabled */ + /* Flag to indicate if SCTP-AUTH is enabled */ int auth_enable; + /* Flag to indicate if stream interleave is enabled */ + int intl_enable; + /* * Policy to control SCTP IPv4 address scoping * 0 - Disable IPv4 address scoping diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 753ac9361154..87406252f0a3 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -29,6 +29,7 @@ struct tcf_block_ext_info { enum tcf_block_binder_type binder_type; tcf_chain_head_change_t *chain_head_change; void *chain_head_change_priv; + u32 block_index; }; struct tcf_block_cb; @@ -38,16 +39,25 @@ bool tcf_queue_work(struct work_struct *work); struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, bool create); void tcf_chain_put(struct tcf_chain *chain); +void tcf_block_netif_keep_dst(struct tcf_block *block); int tcf_block_get(struct tcf_block **p_block, - struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q); + struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, + struct netlink_ext_ack *extack); int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, - struct tcf_block_ext_info *ei); + struct tcf_block_ext_info *ei, + struct netlink_ext_ack *extack); void tcf_block_put(struct tcf_block *block); void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, struct tcf_block_ext_info *ei); +static inline bool tcf_block_shared(struct tcf_block *block) +{ + return block->index; +} + static inline struct Qdisc *tcf_block_q(struct tcf_block *block) { + WARN_ON(tcf_block_shared(block)); return block->q; } @@ -77,14 +87,16 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, #else static inline int tcf_block_get(struct tcf_block **p_block, - struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q) + struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, + struct netlink_ext_ack *extack) { return 0; } static inline int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, - struct tcf_block_ext_info *ei) + struct tcf_block_ext_info *ei, + struct netlink_ext_ack *extack) { return 0; } @@ -364,7 +376,8 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, - struct tcf_exts *exts, bool ovr); + struct tcf_exts *exts, bool ovr, + struct netlink_ext_ack *extack); void tcf_exts_destroy(struct tcf_exts *exts); void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); @@ -544,13 +557,16 @@ static inline int tcf_valid_offset(const struct sk_buff *skb, #include <net/net_namespace.h> static inline int -tcf_change_indev(struct net *net, struct nlattr *indev_tlv) +tcf_change_indev(struct net *net, struct nlattr *indev_tlv, + struct netlink_ext_ack *extack) { char indev[IFNAMSIZ]; struct net_device *dev; - if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) + if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) { + NL_SET_ERR_MSG(extack, "Interface name too long"); return -EINVAL; + } dev = __dev_get_by_name(net, indev); if (!dev) return -ENODEV; @@ -586,17 +602,9 @@ struct tc_cls_common_offload { u32 chain_index; __be16 protocol; u32 prio; + struct netlink_ext_ack *extack; }; -static inline void -tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, - const struct tcf_proto *tp) -{ - cls_common->chain_index = tp->chain->index; - cls_common->protocol = tp->protocol; - cls_common->prio = tp->prio; -} - struct tc_cls_u32_knode { struct tcf_exts *exts; struct tc_u32_sel *sel; @@ -637,6 +645,31 @@ static inline bool tc_can_offload(const struct net_device *dev) return dev->features & NETIF_F_HW_TC; } +static inline bool tc_can_offload_extack(const struct net_device *dev, + struct netlink_ext_ack *extack) +{ + bool can = tc_can_offload(dev); + + if (!can) + NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); + + return can; +} + +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload_extack(dev, common->extack)) + return false; + if (common->chain_index) { + NL_SET_ERR_MSG(common->extack, + "Driver supports only offload of chain 0"); + return false; + } + return true; +} + static inline bool tc_skip_hw(u32 flags) { return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; @@ -664,6 +697,18 @@ static inline bool tc_in_hw(u32 flags) return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; } +static inline void +tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, + const struct tcf_proto *tp, u32 flags, + struct netlink_ext_ack *extack) +{ + cls_common->chain_index = tp->chain->index; + cls_common->protocol = tp->protocol; + cls_common->prio = tp->prio; + if (tc_skip_sw(flags)) + cls_common->extack = extack; +} + enum tc_fl_command { TC_CLSFLOWER_REPLACE, TC_CLSFLOWER_DESTROY, @@ -706,7 +751,6 @@ struct tc_cls_bpf_offload { struct bpf_prog *oldprog; const char *name; bool exts_integrated; - u32 gen_flags; }; struct tc_mqprio_qopt_offload { @@ -727,6 +771,11 @@ struct tc_cookie { u32 len; }; +struct tc_qopt_offload_stats { + struct gnet_stats_basic_packed *bstats; + struct gnet_stats_queue *qstats; +}; + enum tc_red_command { TC_RED_REPLACE, TC_RED_DESTROY, @@ -739,9 +788,6 @@ struct tc_red_qopt_offload_params { u32 max; u32 probability; bool is_ecn; -}; -struct tc_red_qopt_offload_stats { - struct gnet_stats_basic_packed *bstats; struct gnet_stats_queue *qstats; }; @@ -751,9 +797,34 @@ struct tc_red_qopt_offload { u32 parent; union { struct tc_red_qopt_offload_params set; - struct tc_red_qopt_offload_stats stats; + struct tc_qopt_offload_stats stats; struct red_stats *xstats; }; }; +enum tc_prio_command { + TC_PRIO_REPLACE, + TC_PRIO_DESTROY, + TC_PRIO_STATS, +}; + +struct tc_prio_qopt_offload_params { + int bands; + u8 priomap[TC_PRIO_MAX + 1]; + /* In case that a prio qdisc is offloaded and now is changed to a + * non-offloadedable config, it needs to update the backlog & qlen + * values to negate the HW backlog & qlen values (and only them). + */ + struct gnet_stats_queue *qstats; +}; + +struct tc_prio_qopt_offload { + enum tc_prio_command command; + u32 handle; + u32 parent; + union { + struct tc_prio_qopt_offload_params replace_params; + struct tc_qopt_offload_stats stats; + }; +}; #endif diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index d1f413f06c72..815b92a23936 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -89,7 +89,8 @@ extern struct Qdisc_ops pfifo_head_drop_qdisc_ops; int fifo_set_limit(struct Qdisc *q, unsigned int limit); struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, - unsigned int limit); + unsigned int limit, + struct netlink_ext_ack *extack); int register_qdisc(struct Qdisc_ops *qops); int unregister_qdisc(struct Qdisc_ops *qops); @@ -99,22 +100,24 @@ int qdisc_set_default(const char *id); void qdisc_hash_add(struct Qdisc *q, bool invisible); void qdisc_hash_del(struct Qdisc *q); struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); -struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, - struct nlattr *tab); + struct nlattr *tab, + struct netlink_ext_ack *extack); void qdisc_put_rtab(struct qdisc_rate_table *tab); void qdisc_put_stab(struct qdisc_size_table *tab); void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc); -int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, - struct net_device *dev, struct netdev_queue *txq, - spinlock_t *root_lock, bool validate); +bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, + struct net_device *dev, struct netdev_queue *txq, + spinlock_t *root_lock, bool validate); void __qdisc_run(struct Qdisc *q); static inline void qdisc_run(struct Qdisc *q) { - if (qdisc_run_begin(q)) + if (qdisc_run_begin(q)) { __qdisc_run(q); + qdisc_run_end(q); + } } static inline __be16 tc_skb_protocol(const struct sk_buff *skb) diff --git a/include/net/route.h b/include/net/route.h index d538e6db1afe..1eb9ce470e25 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -217,7 +217,7 @@ unsigned int inet_addr_type_dev_table(struct net *net, const struct net_device *dev, __be32 addr); void ip_rt_multicast_event(struct in_device *); -int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); +int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt); void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); struct rtable *rt_dst_alloc(struct net_device *dev, unsigned int flags, u16 type, diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index ead018744ff5..14b6b3af8918 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -13,10 +13,10 @@ enum rtnl_link_flags { RTNL_FLAG_DOIT_UNLOCKED = 1, }; -int __rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); void rtnl_register(int protocol, int msgtype, rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); +int rtnl_register_module(struct module *owner, int protocol, int msgtype, + rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); int rtnl_unregister(int protocol, int msgtype); void rtnl_unregister_all(int protocol); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index becf86aa4ac6..e2ab13687fb9 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -71,6 +71,7 @@ struct Qdisc { * qdisc_tree_decrease_qlen() should stop. */ #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ +#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ u32 limit; const struct Qdisc_ops *ops; @@ -88,14 +89,14 @@ struct Qdisc { /* * For performance sake on SMP, we put highly modified fields at the end */ - struct sk_buff *gso_skb ____cacheline_aligned_in_smp; + struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; struct qdisc_skb_head q; struct gnet_stats_basic_packed bstats; seqcount_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; - struct sk_buff *skb_bad_txq; + struct sk_buff_head skb_bad_txq; int padded; refcount_t refcnt; @@ -150,19 +151,23 @@ struct Qdisc_class_ops { /* Child qdisc manipulation */ struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); int (*graft)(struct Qdisc *, unsigned long cl, - struct Qdisc *, struct Qdisc **); + struct Qdisc *, struct Qdisc **, + struct netlink_ext_ack *extack); struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl); void (*qlen_notify)(struct Qdisc *, unsigned long); /* Class manipulation routines */ unsigned long (*find)(struct Qdisc *, u32 classid); int (*change)(struct Qdisc *, u32, u32, - struct nlattr **, unsigned long *); + struct nlattr **, unsigned long *, + struct netlink_ext_ack *); int (*delete)(struct Qdisc *, unsigned long); void (*walk)(struct Qdisc *, struct qdisc_walker * arg); /* Filter manipulation */ - struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long); + struct tcf_block * (*tcf_block)(struct Qdisc *sch, + unsigned long arg, + struct netlink_ext_ack *extack); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); void (*unbind_tcf)(struct Qdisc *, unsigned long); @@ -187,15 +192,26 @@ struct Qdisc_ops { struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); - int (*init)(struct Qdisc *, struct nlattr *arg); + int (*init)(struct Qdisc *sch, struct nlattr *arg, + struct netlink_ext_ack *extack); void (*reset)(struct Qdisc *); void (*destroy)(struct Qdisc *); - int (*change)(struct Qdisc *, struct nlattr *arg); - void (*attach)(struct Qdisc *); + int (*change)(struct Qdisc *sch, + struct nlattr *arg, + struct netlink_ext_ack *extack); + void (*attach)(struct Qdisc *sch); + int (*change_tx_queue_len)(struct Qdisc *, unsigned int); int (*dump)(struct Qdisc *, struct sk_buff *); int (*dump_stats)(struct Qdisc *, struct gnet_dump *); + void (*ingress_block_set)(struct Qdisc *sch, + u32 block_index); + void (*egress_block_set)(struct Qdisc *sch, + u32 block_index); + u32 (*ingress_block_get)(struct Qdisc *sch); + u32 (*egress_block_get)(struct Qdisc *sch); + struct module *owner; }; @@ -218,14 +234,18 @@ struct tcf_proto_ops { const struct tcf_proto *, struct tcf_result *); int (*init)(struct tcf_proto*); - void (*destroy)(struct tcf_proto*); + void (*destroy)(struct tcf_proto *tp, + struct netlink_ext_ack *extack); void* (*get)(struct tcf_proto*, u32 handle); int (*change)(struct net *net, struct sk_buff *, struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, - void **, bool); - int (*delete)(struct tcf_proto*, void *, bool*); + void **, bool, + struct netlink_ext_ack *); + int (*delete)(struct tcf_proto *tp, void *arg, + bool *last, + struct netlink_ext_ack *); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); void (*bind_class)(void *, u32, unsigned long); @@ -247,8 +267,6 @@ struct tcf_proto { /* All the rest */ u32 prio; - u32 classid; - struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; struct tcf_chain *chain; @@ -267,8 +285,7 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); struct tcf_chain { struct tcf_proto __rcu *filter_chain; - tcf_chain_head_change_t *chain_head_change; - void *chain_head_change_priv; + struct list_head filter_chain_list; struct list_head list; struct tcf_block *block; u32 index; /* chain index */ @@ -277,12 +294,33 @@ struct tcf_chain { struct tcf_block { struct list_head chain_list; + u32 index; /* block index for shared blocks */ + unsigned int refcnt; struct net *net; struct Qdisc *q; struct list_head cb_list; - struct work_struct work; + struct list_head owner_list; + bool keep_dst; + unsigned int offloadcnt; /* Number of oddloaded filters */ + unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ }; +static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) +{ + if (*flags & TCA_CLS_FLAGS_IN_HW) + return; + *flags |= TCA_CLS_FLAGS_IN_HW; + block->offloadcnt++; +} + +static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) +{ + if (!(*flags & TCA_CLS_FLAGS_IN_HW)) + return; + *flags &= ~TCA_CLS_FLAGS_IN_HW; + block->offloadcnt--; +} + static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; @@ -291,11 +329,31 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) BUILD_BUG_ON(sizeof(qcb->data) < sz); } +static inline int qdisc_qlen_cpu(const struct Qdisc *q) +{ + return this_cpu_ptr(q->cpu_qstats)->qlen; +} + static inline int qdisc_qlen(const struct Qdisc *q) { return q->q.qlen; } +static inline int qdisc_qlen_sum(const struct Qdisc *q) +{ + __u32 qlen = 0; + int i; + + if (q->flags & TCQ_F_NOLOCK) { + for_each_possible_cpu(i) + qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; + } else { + qlen = q->q.qlen; + } + + return qlen; +} + static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) { return (struct qdisc_skb_cb *)skb->cb; @@ -432,6 +490,7 @@ void qdisc_class_hash_remove(struct Qdisc_class_hash *, void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *); void qdisc_class_hash_destroy(struct Qdisc_class_hash *); +int dev_qdisc_change_tx_queue_len(struct net_device *dev); void dev_init_scheduler(struct net_device *dev); void dev_shutdown(struct net_device *dev); void dev_activate(struct net_device *dev); @@ -444,10 +503,12 @@ void qdisc_destroy(struct Qdisc *qdisc); void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, unsigned int len); struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, - const struct Qdisc_ops *ops); + const struct Qdisc_ops *ops, + struct netlink_ext_ack *extack); void qdisc_free(struct Qdisc *qdisc); struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, - const struct Qdisc_ops *ops, u32 parentid); + const struct Qdisc_ops *ops, u32 parentid, + struct netlink_ext_ack *extack); void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); int skb_do_redirect(struct sk_buff *); @@ -633,12 +694,39 @@ static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch, sch->qstats.backlog -= qdisc_pkt_len(skb); } +static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch, + const struct sk_buff *skb) +{ + this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); +} + static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch, const struct sk_buff *skb) { sch->qstats.backlog += qdisc_pkt_len(skb); } +static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch, + const struct sk_buff *skb) +{ + this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); +} + +static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) +{ + this_cpu_inc(sch->cpu_qstats->qlen); +} + +static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) +{ + this_cpu_dec(sch->cpu_qstats->qlen); +} + +static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) +{ + this_cpu_inc(sch->cpu_qstats->requeues); +} + static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count) { sch->qstats.drops += count; @@ -769,26 +857,30 @@ static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) /* generic pseudo peek method for non-work-conserving qdisc */ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) { + struct sk_buff *skb = skb_peek(&sch->gso_skb); + /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ - if (!sch->gso_skb) { - sch->gso_skb = sch->dequeue(sch); - if (sch->gso_skb) { + if (!skb) { + skb = sch->dequeue(sch); + + if (skb) { + __skb_queue_head(&sch->gso_skb, skb); /* it's still part of the queue */ - qdisc_qstats_backlog_inc(sch, sch->gso_skb); + qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; } } - return sch->gso_skb; + return skb; } /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) { - struct sk_buff *skb = sch->gso_skb; + struct sk_buff *skb = skb_peek(&sch->gso_skb); if (skb) { - sch->gso_skb = NULL; + skb = __skb_dequeue(&sch->gso_skb); qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; } else { @@ -846,6 +938,14 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) qdisc_qstats_drop(sch); } +static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +{ + __qdisc_drop(skb, to_free); + qdisc_qstats_cpu_drop(sch); + + return NET_XMIT_DROP; +} static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index deaafa9b09cb..20ff237c5eb2 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other) SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive) -#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA) +#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \ + a->chunk_hdr->type == SCTP_CID_I_DATA) /* Calculate the actual data size in a data chunk */ -#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\ - - (unsigned long)(c->chunk_hdr)\ - - sizeof(struct sctp_data_chunk))) +#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \ + (unsigned long)(c->chunk_hdr) - \ + sctp_datachk_len(&c->asoc->stream))) /* Internal error codes */ enum sctp_ierror { diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 906a9c0efa71..f7ae6b0a21d0 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -107,7 +107,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); int sctp_inet_listen(struct socket *sock, int backlog); void sctp_write_space(struct sock *sk); void sctp_data_ready(struct sock *sk); -unsigned int sctp_poll(struct file *file, struct socket *sock, +__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait); void sctp_sock_rfree(struct sk_buff *skb); void sctp_copy_sock(struct sock *newsk, struct sock *sk, @@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated; int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); -int sctp_transport_walk_start(struct rhashtable_iter *iter); +void sctp_transport_walk_start(struct rhashtable_iter *iter); void sctp_transport_walk_stop(struct rhashtable_iter *iter); struct sctp_transport *sctp_transport_get_next(struct net *net, struct rhashtable_iter *iter); @@ -444,13 +444,13 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu) int frag = pmtu; frag -= sp->pf->af->net_header_len; - frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); + frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream); if (asoc->user_frag) frag = min_t(int, frag, asoc->user_frag); frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN - - sizeof(struct sctp_data_chunk))); + sctp_datachk_len(&asoc->stream))); return frag; } diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 70fb397f65b0..2883c43c5258 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -197,10 +197,14 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, const __u32 lowest_tsn, const struct sctp_chunk *chunk); -struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, +struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc, + __u8 flags, int paylen, gfp_t gfp); +struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc, + __u32 new_cum_tsn, size_t nstreams, + struct sctp_ifwdtsn_skip *skiplist); +struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, - int len, const __u8 flags, - __u16 ssn, gfp_t gfp); + int len, __u8 flags, gfp_t gfp); struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, const __u32 lowest_tsn); struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc); @@ -342,7 +346,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk) __u16 size; size = ntohs(chunk->chunk_hdr->length); - size -= sizeof(struct sctp_data_chunk); + size -= sctp_datahdr_len(&chunk->asoc->stream); return size; } @@ -358,6 +362,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk) typecheck(__u32, b) && \ ((__s32)((a) - (b)) <= 0)) +/* Compare two MIDs */ +#define MID_lt(a, b) \ + (typecheck(__u32, a) && \ + typecheck(__u32, b) && \ + ((__s32)((a) - (b)) < 0)) + /* Compare two SSNs */ #define SSN_lt(a,b) \ (typecheck(__u16, a) && \ diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h new file mode 100644 index 000000000000..6657711c8bc4 --- /dev/null +++ b/include/net/sctp/stream_interleave.h @@ -0,0 +1,61 @@ +/* SCTP kernel implementation + * (C) Copyright Red Hat Inc. 2017 + * + * These are definitions used by the stream schedulers, defined in RFC + * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11) + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email addresses: + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Xin Long <lucien.xin@gmail.com> + */ + +#ifndef __sctp_stream_interleave_h__ +#define __sctp_stream_interleave_h__ + +struct sctp_stream_interleave { + __u16 data_chunk_len; + __u16 ftsn_chunk_len; + /* (I-)DATA process */ + struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc, + const struct sctp_sndrcvinfo *sinfo, + int len, __u8 flags, gfp_t gfp); + void (*assign_number)(struct sctp_chunk *chunk); + bool (*validate_data)(struct sctp_chunk *chunk); + int (*ulpevent_data)(struct sctp_ulpq *ulpq, + struct sctp_chunk *chunk, gfp_t gfp); + int (*enqueue_event)(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event); + void (*renege_events)(struct sctp_ulpq *ulpq, + struct sctp_chunk *chunk, gfp_t gfp); + void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp); + void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp); + /* (I-)FORWARD-TSN process */ + void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn); + bool (*validate_ftsn)(struct sctp_chunk *chunk); + void (*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn); + void (*handle_ftsn)(struct sctp_ulpq *ulpq, + struct sctp_chunk *chunk); +}; + +void sctp_stream_interleave_init(struct sctp_stream *stream); + +#endif /* __sctp_stream_interleave_h__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 9a5ccf03a59b..03e92dda1813 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -89,6 +89,7 @@ struct sctp_stream; #include <net/sctp/tsnmap.h> #include <net/sctp/ulpevent.h> #include <net/sctp/ulpqueue.h> +#include <net/sctp/stream_interleave.h> /* Structures useful for managing bind/connect. */ @@ -202,12 +203,17 @@ struct sctp_sock { /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */ __u32 param_flags; - struct sctp_initmsg initmsg; struct sctp_rtoinfo rtoinfo; struct sctp_paddrparams paddrparam; - struct sctp_event_subscribe subscribe; struct sctp_assocparams assocparams; + /* + * These two structures must be grouped together for the usercopy + * whitelist region. + */ + struct sctp_event_subscribe subscribe; + struct sctp_initmsg initmsg; + int user_frag; __u32 autoclose; @@ -217,6 +223,7 @@ struct sctp_sock { disable_fragments:1, v4mapped:1, frag_interleave:1, + strm_interleave:1, recvrcvinfo:1, recvnxtinfo:1, data_ready_signalled:1; @@ -397,6 +404,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new); #define sctp_ssn_skip(stream, type, sid, ssn) \ ((stream)->type[sid].ssn = ssn + 1) +/* What is the current MID number for this stream? */ +#define sctp_mid_peek(stream, type, sid) \ + ((stream)->type[sid].mid) + +/* Return the next MID number for this stream. */ +#define sctp_mid_next(stream, type, sid) \ + ((stream)->type[sid].mid++) + +/* Skip over this mid and all below. */ +#define sctp_mid_skip(stream, type, sid, mid) \ + ((stream)->type[sid].mid = mid + 1) + +#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid]) + +/* What is the current MID_uo number for this stream? */ +#define sctp_mid_uo_peek(stream, type, sid) \ + ((stream)->type[sid].mid_uo) + +/* Return the next MID_uo number for this stream. */ +#define sctp_mid_uo_next(stream, type, sid) \ + ((stream)->type[sid].mid_uo++) + /* * Pointers to address related SCTP functions. * (i.e. things that depend on the address family.) @@ -574,6 +603,8 @@ struct sctp_chunk { struct sctp_addiphdr *addip_hdr; struct sctp_fwdtsn_hdr *fwdtsn_hdr; struct sctp_authhdr *auth_hdr; + struct sctp_idatahdr *idata_hdr; + struct sctp_ifwdtsn_hdr *ifwdtsn_hdr; } subh; __u8 *chunk_end; @@ -620,6 +651,7 @@ struct sctp_chunk { __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ has_tsn:1, /* Does this chunk have a TSN yet? */ has_ssn:1, /* Does this chunk have a SSN yet? */ +#define has_mid has_ssn singleton:1, /* Only chunk in the packet? */ end_of_packet:1, /* Last chunk in the packet? */ ecn_ce_done:1, /* Have we processed the ECN CE bit? */ @@ -1073,6 +1105,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8); void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp); void sctp_prsctp_prune(struct sctp_association *asoc, struct sctp_sndrcvinfo *sinfo, int msg_len); +void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); /* Uncork and flush an outqueue. */ static inline void sctp_outq_cork(struct sctp_outq *q) { @@ -1357,13 +1390,25 @@ struct sctp_stream_out_ext { }; struct sctp_stream_out { - __u16 ssn; - __u8 state; + union { + __u32 mid; + __u16 ssn; + }; + __u32 mid_uo; struct sctp_stream_out_ext *ext; + __u8 state; }; struct sctp_stream_in { - __u16 ssn; + union { + __u32 mid; + __u16 ssn; + }; + __u32 mid_uo; + __u32 fsn; + __u32 fsn_uo; + char pd_mode; + char pd_mode_uo; }; struct sctp_stream { @@ -1387,11 +1432,32 @@ struct sctp_stream { struct sctp_stream_out_ext *rr_next; }; }; + struct sctp_stream_interleave *si; }; #define SCTP_STREAM_CLOSED 0x00 #define SCTP_STREAM_OPEN 0x01 +static inline __u16 sctp_datachk_len(const struct sctp_stream *stream) +{ + return stream->si->data_chunk_len; +} + +static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream) +{ + return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr); +} + +static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream) +{ + return stream->si->ftsn_chunk_len; +} + +static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream) +{ + return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr); +} + /* SCTP_GET_ASSOC_STATS counters */ struct sctp_priv_assoc_stats { /* Maximum observed rto in the association during subsequent @@ -1940,6 +2006,7 @@ struct sctp_association { __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ temp:1, /* Is it a temporary association? */ force_delay:1, + intl_enable:1, prsctp_enable:1, reconf_enable:1; diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 231dc42f1da6..51b4e0626c34 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -45,19 +45,29 @@ /* A structure to carry information to the ULP (e.g. Sockets API) */ /* Warning: This sits inside an skb.cb[] area. Be very careful of * growing this structure as it is at the maximum limit now. + * + * sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes + * have been taken by sock_skb_cb, So here it has to use 'packed' + * to make sctp_ulpevent fit into the rest 44 bytes. */ struct sctp_ulpevent { struct sctp_association *asoc; struct sctp_chunk *chunk; unsigned int rmem_len; - __u32 ppid; + union { + __u32 mid; + __u16 ssn; + }; + union { + __u32 ppid; + __u32 fsn; + }; __u32 tsn; __u32 cumtsn; __u16 stream; - __u16 ssn; __u16 flags; __u16 msg_flags; -}; +} __packed; /* Retrieve the skb this event sits inside of. */ static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev) @@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( struct sctp_ulpevent *sctp_ulpevent_make_pdapi( const struct sctp_association *asoc, - __u32 indication, gfp_t gfp); + __u32 indication, __u32 sid, __u32 seq, + __u32 flags, gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication( const struct sctp_association *asoc, gfp_t gfp); @@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event( const struct sctp_association *asoc, __u16 flags, __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp); +struct sctp_ulpevent *sctp_make_reassembled_event( + struct net *net, struct sk_buff_head *queue, + struct sk_buff *f_frag, struct sk_buff *l_frag); + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index e0dce07b8794..bb0ecba3db2b 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -45,6 +45,7 @@ struct sctp_ulpq { char pd_mode; struct sctp_association *asoc; struct sk_buff_head reasm; + struct sk_buff_head reasm_uo; struct sk_buff_head lobby; }; @@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc); void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn); void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32); -#endif /* __sctp_ulpqueue_h__ */ - - - - - +__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, + struct sk_buff_head *list, __u16 needed); +#endif /* __sctp_ulpqueue_h__ */ diff --git a/include/net/sock.h b/include/net/sock.h index c4a424fe6fdd..169c92afcafa 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -72,6 +72,7 @@ #include <net/tcp_states.h> #include <linux/net_tstamp.h> #include <net/smc.h> +#include <net/l3mdev.h> /* * This structure really needs to be cleaned up. @@ -1108,6 +1109,8 @@ struct proto { struct kmem_cache *slab; unsigned int obj_size; slab_flags_t slab_flags; + size_t useroffset; /* Usercopy region offset */ + size_t usersize; /* Usercopy region size */ struct percpu_counter *orphan_count; @@ -1262,6 +1265,7 @@ proto_memory_pressure(struct proto *prot) /* Called with local bh disabled */ void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); int sock_prot_inuse_get(struct net *net, struct proto *proto); +int sock_inuse_get(struct net *net); #else static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc) @@ -1581,7 +1585,7 @@ int sock_no_connect(struct socket *, struct sockaddr *, int, int); int sock_no_socketpair(struct socket *, struct socket *); int sock_no_accept(struct socket *, struct socket *, int, bool); int sock_no_getname(struct socket *, struct sockaddr *, int *, int); -unsigned int sock_no_poll(struct file *, struct socket *, +__poll_t sock_no_poll(struct file *, struct socket *, struct poll_table_struct *); int sock_no_ioctl(struct socket *, unsigned int, unsigned long); int sock_no_listen(struct socket *, int); @@ -2335,31 +2339,6 @@ static inline bool sk_listener(const struct sock *sk) return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); } -/** - * sk_state_load - read sk->sk_state for lockless contexts - * @sk: socket pointer - * - * Paired with sk_state_store(). Used in places we do not hold socket lock : - * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ... - */ -static inline int sk_state_load(const struct sock *sk) -{ - return smp_load_acquire(&sk->sk_state); -} - -/** - * sk_state_store - update sk->sk_state - * @sk: socket pointer - * @newstate: new state - * - * Paired with sk_state_load(). Should be used in contexts where - * state change might impact lockless readers. - */ -static inline void sk_state_store(struct sock *sk, int newstate) -{ - smp_store_release(&sk->sk_state, newstate); -} - void sock_enable_timestamp(struct sock *sk, int flag); int sock_get_timestamp(struct sock *, struct timeval __user *); int sock_get_timestampns(struct sock *, struct timespec __user *); @@ -2410,4 +2389,34 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) return *proto->sysctl_rmem; } +/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10) + * Some wifi drivers need to tweak it to get more chunks. + * They can use this helper from their ndo_start_xmit() + */ +static inline void sk_pacing_shift_update(struct sock *sk, int val) +{ + if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val) + return; + sk->sk_pacing_shift = val; +} + +/* if a socket is bound to a device, check that the given device + * index is either the same or that the socket is bound to an L3 + * master device and the given device index is also enslaved to + * that L3 master + */ +static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) +{ + int mdif; + + if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif) + return true; + + mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif); + if (mdif && mdif == sk->sk_bound_dev_if) + return true; + + return false; +} + #endif /* _SOCK_H */ diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h index 781f3433a0be..9470fd7e4350 100644 --- a/include/net/tc_act/tc_csum.h +++ b/include/net/tc_act/tc_csum.h @@ -6,10 +6,16 @@ #include <net/act_api.h> #include <linux/tc_act/tc_csum.h> +struct tcf_csum_params { + int action; + u32 update_flags; + struct rcu_head rcu; +}; + struct tcf_csum { struct tc_action common; - u32 update_flags; + struct tcf_csum_params __rcu *params; }; #define to_tcf_csum(a) ((struct tcf_csum *)a) @@ -24,7 +30,13 @@ static inline bool is_tcf_csum(const struct tc_action *a) static inline u32 tcf_csum_update_flags(const struct tc_action *a) { - return to_tcf_csum(a)->update_flags; + u32 update_flags; + + rcu_read_lock(); + update_flags = rcu_dereference(to_tcf_csum(a)->params)->update_flags; + rcu_read_unlock(); + + return update_flags; } #endif /* __NET_TC_CSUM_H */ diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h index 21d253c9a8c6..a2e9cbca5c9e 100644 --- a/include/net/tc_act/tc_mirred.h +++ b/include/net/tc_act/tc_mirred.h @@ -8,10 +8,8 @@ struct tcf_mirred { struct tc_action common; int tcfm_eaction; - int tcfm_ifindex; bool tcfm_mac_header_xmit; struct net_device __rcu *tcfm_dev; - struct net *net; struct list_head tcfm_list; }; #define to_mirred(a) ((struct tcf_mirred *)a) @@ -34,9 +32,9 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a) return false; } -static inline int tcf_mirred_ifindex(const struct tc_action *a) +static inline struct net_device *tcf_mirred_dev(const struct tc_action *a) { - return to_mirred(a)->tcfm_ifindex; + return rtnl_dereference(to_mirred(a)->tcfm_dev); } #endif /* __NET_TC_MIR_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 6da880d2f022..58278669cc55 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -387,7 +387,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); void tcp_close(struct sock *sk, long timeout); void tcp_init_sock(struct sock *sk); void tcp_init_transfer(struct sock *sk, int bpf_op); -unsigned int tcp_poll(struct file *file, struct socket *sock, +__poll_t tcp_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); @@ -953,6 +953,7 @@ struct rate_sample { u32 prior_in_flight; /* in flight before this ACK */ bool is_app_limited; /* is sample from packet with bubble in pipe? */ bool is_retrans; /* is sample from retransmission? */ + bool is_ack_delayed; /* is this (likely) a delayed ACK? */ }; struct tcp_congestion_ops { @@ -1507,8 +1508,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, /* From tcp_fastopen.c */ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, - struct tcp_fastopen_cookie *cookie, int *syn_loss, - unsigned long *last_syn_loss); + struct tcp_fastopen_cookie *cookie); void tcp_fastopen_cache_set(struct sock *sk, u16 mss, struct tcp_fastopen_cookie *cookie, bool syn_lost, u16 try_exp); @@ -1546,7 +1546,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; void tcp_fastopen_active_disable(struct sock *sk); bool tcp_fastopen_active_should_disable(struct sock *sk); void tcp_fastopen_active_disable_ofo_check(struct sock *sk); -void tcp_fastopen_active_timeout_reset(void); +void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired); /* Latencies incurred by various limits for a sender. They are * chronograph-like stats that are mutually exclusive. @@ -2006,17 +2006,21 @@ void tcp_cleanup_ulp(struct sock *sk); * program loaded). */ #ifdef CONFIG_BPF -static inline int tcp_call_bpf(struct sock *sk, int op) +static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) { struct bpf_sock_ops_kern sock_ops; int ret; - if (sk_fullsock(sk)) + memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); + if (sk_fullsock(sk)) { + sock_ops.is_fullsock = 1; sock_owned_by_me(sk); + } - memset(&sock_ops, 0, sizeof(sock_ops)); sock_ops.sk = sk; sock_ops.op = op; + if (nargs > 0) + memcpy(sock_ops.args, args, nargs * sizeof(*args)); ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); if (ret == 0) @@ -2025,18 +2029,46 @@ static inline int tcp_call_bpf(struct sock *sk, int op) ret = -1; return ret; } + +static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) +{ + u32 args[2] = {arg1, arg2}; + + return tcp_call_bpf(sk, op, 2, args); +} + +static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, + u32 arg3) +{ + u32 args[3] = {arg1, arg2, arg3}; + + return tcp_call_bpf(sk, op, 3, args); +} + #else -static inline int tcp_call_bpf(struct sock *sk, int op) +static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) { return -EPERM; } + +static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) +{ + return -EPERM; +} + +static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, + u32 arg3) +{ + return -EPERM; +} + #endif static inline u32 tcp_timeout_init(struct sock *sk) { int timeout; - timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT); + timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); if (timeout <= 0) timeout = TCP_TIMEOUT_INIT; @@ -2047,7 +2079,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk) { int rwnd; - rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT); + rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); if (rwnd < 0) rwnd = 0; @@ -2056,7 +2088,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk) static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) { - return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1); + return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); } #if IS_ENABLED(CONFIG_SMC) diff --git a/include/net/tls.h b/include/net/tls.h index 9185e53a743c..4913430ab807 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -36,6 +36,7 @@ #include <linux/types.h> #include <asm/byteorder.h> +#include <linux/crypto.h> #include <linux/socket.h> #include <linux/tcp.h> #include <net/tcp.h> @@ -57,6 +58,7 @@ struct tls_sw_context { struct crypto_aead *aead_send; + struct crypto_wait async_wait; /* Sending context */ char aad_space[TLS_AAD_SPACE_SIZE]; diff --git a/include/net/udp.h b/include/net/udp.h index 6c759c8594e2..850a8e581cce 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -275,7 +275,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); int udp_init_sock(struct sock *sk); int __udp_disconnect(struct sock *sk, int flags); int udp_disconnect(struct sock *sk, int flags); -unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait); +__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait); struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, bool is_ipv6); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index f96391e84a8a..ad73d8b3fcc2 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -301,7 +301,7 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, l4_hdr = ipv6_hdr(skb)->nexthdr; break; default: - return features;; + return features; } if ((l4_hdr == IPPROTO_UDP) && diff --git a/include/net/wext.h b/include/net/wext.h index e51f067fdb3a..aa192a670304 100644 --- a/include/net/wext.h +++ b/include/net/wext.h @@ -7,7 +7,7 @@ struct net; #ifdef CONFIG_WEXT_CORE -int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, +int wext_handle_ioctl(struct net *net, unsigned int cmd, void __user *arg); int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, unsigned long arg); @@ -15,7 +15,7 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, struct iw_statistics *get_wireless_stats(struct net_device *dev); int call_commit_handler(struct net_device *dev); #else -static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, +static inline int wext_handle_ioctl(struct net *net, unsigned int cmd, void __user *arg) { return -EINVAL; diff --git a/include/net/xdp.h b/include/net/xdp.h new file mode 100644 index 000000000000..b2362ddfa694 --- /dev/null +++ b/include/net/xdp.h @@ -0,0 +1,48 @@ +/* include/net/xdp.h + * + * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. + * Released under terms in GPL version 2. See COPYING. + */ +#ifndef __LINUX_NET_XDP_H__ +#define __LINUX_NET_XDP_H__ + +/** + * DOC: XDP RX-queue information + * + * The XDP RX-queue info (xdp_rxq_info) is associated with the driver + * level RX-ring queues. It is information that is specific to how + * the driver have configured a given RX-ring queue. + * + * Each xdp_buff frame received in the driver carry a (pointer) + * reference to this xdp_rxq_info structure. This provides the XDP + * data-path read-access to RX-info for both kernel and bpf-side + * (limited subset). + * + * For now, direct access is only safe while running in NAPI/softirq + * context. Contents is read-mostly and must not be updated during + * driver NAPI/softirq poll. + * + * The driver usage API is a register and unregister API. + * + * The struct is not directly tied to the XDP prog. A new XDP prog + * can be attached as long as it doesn't change the underlying + * RX-ring. If the RX-ring does change significantly, the NIC driver + * naturally need to stop the RX-ring before purging and reallocating + * memory. In that process the driver MUST call unregistor (which + * also apply for driver shutdown and unload). The register API is + * also mandatory during RX-ring setup. + */ + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; +} ____cacheline_aligned; /* perf critical, avoid false-sharing */ + +int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index); +void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); +void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); +bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); + +#endif /* __LINUX_NET_XDP_H__ */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index ae35991b5877..7d2077665c0b 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -968,7 +968,7 @@ static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_c /* A struct encoding bundle of transformations to apply to some set of flow. * - * dst->child points to the next element of bundle. + * xdst->child points to the next element of bundle. * dst->xfrm points to an instanse of transformer. * * Due to unfortunate limitations of current routing cache, which we @@ -984,6 +984,8 @@ struct xfrm_dst { struct rt6_info rt6; } u; struct dst_entry *route; + struct dst_entry *child; + struct dst_entry *path; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int num_pols, num_xfrms; u32 xfrm_genid; @@ -994,7 +996,35 @@ struct xfrm_dst { u32 path_cookie; }; +static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) +{ +#ifdef CONFIG_XFRM + if (dst->xfrm) { + const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; + + return xdst->path; + } +#endif + return (struct dst_entry *) dst; +} + +static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) +{ +#ifdef CONFIG_XFRM + if (dst->xfrm) { + struct xfrm_dst *xdst = (struct xfrm_dst *) dst; + return xdst->child; + } +#endif + return NULL; +} + #ifdef CONFIG_XFRM +static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child) +{ + xdst->child = child; +} + static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) { xfrm_pols_put(xdst->pols, xdst->num_pols); @@ -1021,6 +1051,7 @@ struct xfrm_offload { #define XFRM_GSO_SEGMENT 16 #define XFRM_GRO 32 #define XFRM_ESP_NO_TRAILER 64 +#define XFRM_DEV_RESUME 128 __u32 status; #define CRYPTO_SUCCESS 1 @@ -1847,34 +1878,53 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) { return skb->sp->xvec[skb->sp->len - 1]; } +#endif + static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) { +#ifdef CONFIG_XFRM struct sec_path *sp = skb->sp; if (!sp || !sp->olen || sp->len != sp->olen) return NULL; return &sp->ovec[sp->olen - 1]; -} +#else + return NULL; #endif +} void __net_init xfrm_dev_init(void); #ifdef CONFIG_XFRM_OFFLOAD -int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); +void xfrm_dev_resume(struct sk_buff *skb); +void xfrm_dev_backlog(struct softnet_data *sd); +struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again); int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); +static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) +{ + struct xfrm_state_offload *xso = &x->xso; + + if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn) + xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); +} + static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) { struct xfrm_state *x = dst->xfrm; + struct xfrm_dst *xdst; if (!x || !x->type_offload) return false; - if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) && - !dst->child->xfrm) + xdst = (struct xfrm_dst *) dst; + if (!x->xso.offload_handle && !xdst->child->xfrm) + return true; + if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) && + !xdst->child->xfrm) return true; return false; @@ -1894,15 +1944,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) struct net_device *dev = xso->dev; if (dev && dev->xfrmdev_ops) { - dev->xfrmdev_ops->xdo_dev_state_free(x); + if (dev->xfrmdev_ops->xdo_dev_state_free) + dev->xfrmdev_ops->xdo_dev_state_free(x); xso->dev = NULL; dev_put(dev); } } #else -static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) +static inline void xfrm_dev_resume(struct sk_buff *skb) { - return 0; +} + +static inline void xfrm_dev_backlog(struct softnet_data *sd) +{ +} + +static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) +{ + return skb; } static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo) @@ -1923,6 +1982,10 @@ static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x return false; } +static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) +{ +} + static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) { return false; diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 18c564f60e93..d656809f1217 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -94,7 +94,7 @@ struct rdma_dev_addr { * The dev_addr->net field must be initialized. */ int rdma_translate_ip(const struct sockaddr *addr, - struct rdma_dev_addr *dev_addr, u16 *vlan_id); + struct rdma_dev_addr *dev_addr); /** * rdma_resolve_ip - Resolve source and destination IP addresses to @@ -131,10 +131,9 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr, int rdma_addr_size(struct sockaddr *addr); -int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id); int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, const union ib_gid *dgid, - u8 *smac, u16 *vlan_id, int *if_index, + u8 *dmac, const struct net_device *ndev, int *hoplimit); static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) @@ -198,34 +197,15 @@ static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid) } } -static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, - union ib_gid *gid) -{ - struct net_device *dev; - struct in_device *ip4; - - dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); - if (dev) { - ip4 = in_dev_get(dev); - if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) - ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address, - (struct in6_addr *)gid); - - if (ip4) - in_dev_put(ip4); - - dev_put(dev); - } -} - +/* + * rdma_get/set_sgid/dgid() APIs are applicable to IB, and iWarp. + * They are not applicable to RoCE. + * RoCE GIDs are derived from the IP addresses. + */ static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) { - if (dev_addr->transport == RDMA_TRANSPORT_IB && - dev_addr->dev_type != ARPHRD_INFINIBAND) - iboe_addr_get_sgid(dev_addr, gid); - else - memcpy(gid, dev_addr->src_dev_addr + - rdma_addr_gid_offset(dev_addr), sizeof *gid); + memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), + sizeof(*gid)); } static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h index c124d515f7d5..6e35416170a3 100644 --- a/include/rdma/ib_hdrs.h +++ b/include/rdma/ib_hdrs.h @@ -313,16 +313,14 @@ static inline u32 ib_bth_get_qpn(struct ib_other_headers *ohdr) return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK); } -static inline u8 ib_bth_get_becn(struct ib_other_headers *ohdr) +static inline bool ib_bth_get_becn(struct ib_other_headers *ohdr) { - return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) & - IB_BECN_MASK); + return (ohdr->bth[1]) & cpu_to_be32(IB_BECN_SMASK); } -static inline u8 ib_bth_get_fecn(struct ib_other_headers *ohdr) +static inline bool ib_bth_get_fecn(struct ib_other_headers *ohdr) { - return (u8)((be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) & - IB_FECN_MASK); + return (ohdr->bth[1]) & cpu_to_be32(IB_FECN_SMASK); } static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr) @@ -331,4 +329,13 @@ static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr) IB_BTH_TVER_MASK); } +static inline bool ib_bth_is_solicited(struct ib_other_headers *ohdr) +{ + return ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED); +} + +static inline bool ib_bth_is_migration(struct ib_other_headers *ohdr) +{ + return ohdr->bth[0] & cpu_to_be32(IB_BTH_MIG_REQ); +} #endif /* IB_HDRS_H */ diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 1f7f604db5aa..811cfcfcbe3d 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -549,12 +549,12 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, struct rdma_ah_attr *ah_attr); /** - * ib_init_ah_from_path - Initialize address handle attributes based on an SA - * path record. + * ib_init_ah_attr_from_path - Initialize address handle attributes based on + * an SA path record. */ -int ib_init_ah_from_path(struct ib_device *device, u8 port_num, - struct sa_path_rec *rec, - struct rdma_ah_attr *ah_attr); +int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, + struct sa_path_rec *rec, + struct rdma_ah_attr *ah_attr); /** * ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index fd84cda5ed7c..73b2387e3f74 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -63,6 +63,7 @@ #include <linux/uaccess.h> #include <linux/cgroup_rdma.h> #include <uapi/rdma/ib_user_verbs.h> +#include <rdma/restrack.h> #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN @@ -300,11 +301,6 @@ struct ib_tm_caps { u32 max_sge; }; -enum ib_cq_creation_flags { - IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, - IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, -}; - struct ib_cq_init_attr { unsigned int cqe; int comp_vector; @@ -878,6 +874,7 @@ struct ib_mr_status { __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); enum rdma_ah_attr_type { + RDMA_AH_ATTR_TYPE_UNDEFINED, RDMA_AH_ATTR_TYPE_IB, RDMA_AH_ATTR_TYPE_ROCE, RDMA_AH_ATTR_TYPE_OPA, @@ -983,9 +980,9 @@ struct ib_wc { u32 invalidate_rkey; } ex; u32 src_qp; + u32 slid; int wc_flags; u16 pkey_index; - u32 slid; u8 sl; u8 dlid_path_bits; u8 port_num; /* valid only for DR SMPs on switches */ @@ -1082,6 +1079,7 @@ enum ib_qp_type { IB_QPT_XRC_INI = 9, IB_QPT_XRC_TGT, IB_QPT_MAX, + IB_QPT_DRIVER = 0xFF, /* Reserve a range for qp types internal to the low level driver. * These qp types will not be visible at the IB core layer, so the * IB_QPT_MAX usages should not be affected in the core layer @@ -1529,6 +1527,7 @@ struct ib_pd { * Implementation details of the RDMA core, don't use in drivers: */ struct ib_mr *__internal_mr; + struct rdma_restrack_entry res; }; struct ib_xrcd { @@ -1538,6 +1537,10 @@ struct ib_xrcd { struct mutex tgt_qp_mutex; struct list_head tgt_qp_list; + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; }; struct ib_ah { @@ -1569,6 +1572,10 @@ struct ib_cq { struct irq_poll iop; struct work_struct work; }; + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; }; struct ib_srq { @@ -1745,6 +1752,11 @@ struct ib_qp { struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_qp_security *qp_sec; u8 port; + + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; }; struct ib_mr { @@ -2351,6 +2363,10 @@ struct ib_device { #endif u32 index; + /* + * Implementation details of the RDMA core, don't use in drivers + */ + struct rdma_restrack_root res; /** * The following mandatory functions are used only at device @@ -2836,8 +2852,7 @@ int ib_modify_port(struct ib_device *device, struct ib_port_modify *port_modify); int ib_find_gid(struct ib_device *device, union ib_gid *gid, - enum ib_gid_type gid_type, struct net_device *ndev, - u8 *port_num, u16 *index); + struct net_device *ndev, u8 *port_num, u16 *index); int ib_find_pkey(struct ib_device *device, u8 port_num, u16 pkey, u16 *index); @@ -2858,7 +2873,7 @@ enum ib_pd_flags { struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, const char *caller); #define ib_alloc_pd(device, flags) \ - __ib_alloc_pd((device), (flags), __func__) + __ib_alloc_pd((device), (flags), KBUILD_MODNAME) void ib_dealloc_pd(struct ib_pd *pd); /** @@ -2905,7 +2920,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); /** - * ib_init_ah_from_wc - Initializes address handle attributes from a + * ib_init_ah_attr_from_wc - Initializes address handle attributes from a * work completion. * @device: Device on which the received message arrived. * @port_num: Port on which the received message arrived. @@ -2915,9 +2930,9 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); * @ah_attr: Returned attributes that can be used when creating an address * handle for replying to the message. */ -int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, - const struct ib_wc *wc, const struct ib_grh *grh, - struct rdma_ah_attr *ah_attr); +int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, + const struct ib_wc *wc, const struct ib_grh *grh, + struct rdma_ah_attr *ah_attr); /** * ib_create_ah_from_wc - Creates an address handle associated with the @@ -3135,8 +3150,12 @@ static inline int ib_post_recv(struct ib_qp *qp, return qp->device->post_recv(qp, recv_wr, bad_recv_wr); } -struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, - int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); +struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, + int nr_cqe, int comp_vector, + enum ib_poll_context poll_ctx, const char *caller); +#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \ + __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME) + void ib_free_cq(struct ib_cq *cq); int ib_process_cq_direct(struct ib_cq *cq, int budget); @@ -3560,8 +3579,11 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); /** * ib_alloc_xrcd - Allocates an XRC domain. * @device: The device on which to allocate the XRC domain. + * @caller: Module name for kernel consumers */ -struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); +struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller); +#define ib_alloc_xrcd(device) \ + __ib_alloc_xrcd((device), KBUILD_MODNAME) /** * ib_dealloc_xrcd - Deallocates an XRC domain. @@ -3789,18 +3811,24 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, grh->traffic_class = traffic_class; } -/*Get AH type */ +/** + * rdma_ah_find_type - Return address handle type. + * + * @dev: Device to be checked + * @port_num: Port number + */ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, - u32 port_num) + u8 port_num) { - if ((rdma_protocol_roce(dev, port_num)) || - (rdma_protocol_iwarp(dev, port_num))) + if (rdma_protocol_roce(dev, port_num)) return RDMA_AH_ATTR_TYPE_ROCE; - else if ((rdma_protocol_ib(dev, port_num)) && - (rdma_cap_opa_ah(dev, port_num))) - return RDMA_AH_ATTR_TYPE_OPA; - else + if (rdma_protocol_ib(dev, port_num)) { + if (rdma_cap_opa_ah(dev, port_num)) + return RDMA_AH_ATTR_TYPE_OPA; return RDMA_AH_ATTR_TYPE_IB; + } + + return RDMA_AH_ATTR_TYPE_UNDEFINED; } /** @@ -3850,4 +3878,12 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector) } +/** + * rdma_roce_rescan_device - Rescan all of the network devices in the system + * and add their gids, as needed, to the relevant RoCE devices. + * + * @device: the rdma device + */ +void rdma_roce_rescan_device(struct ib_device *ibdev); + #endif /* IB_VERBS_H */ diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h index f68fca296631..2bbb7a67e643 100644 --- a/include/rdma/opa_addr.h +++ b/include/rdma/opa_addr.h @@ -114,4 +114,20 @@ static inline u32 opa_get_mcast_base(u32 nr_top_bits) return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits)); } +/* Check for a valid unicast LID for non-SM traffic types */ +static inline bool rdma_is_valid_unicast_lid(struct rdma_ah_attr *attr) +{ + if (attr->type == RDMA_AH_ATTR_TYPE_IB) { + if (!rdma_ah_get_dlid(attr) || + rdma_ah_get_dlid(attr) >= + be32_to_cpu(IB_MULTICAST_LID_BASE)) + return false; + } else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) { + if (!rdma_ah_get_dlid(attr) || + rdma_ah_get_dlid(attr) >= + opa_get_mcast_base(OPA_MCAST_NR)) + return false; + } + return true; +} #endif /* OPA_ADDR_H */ diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 3d2eed3c4e75..6538a5cc27b6 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -413,4 +413,23 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason); const void *rdma_consumer_reject_data(struct rdma_cm_id *id, struct rdma_cm_event *ev, u8 *data_len); +/** + * rdma_read_gids - Return the SGID and DGID used for establishing + * connection. This can be used after rdma_resolve_addr() + * on client side. This can be use on new connection + * on server side. This is applicable to IB, RoCE, iWarp. + * If cm_id is not bound yet to the RDMA device, it doesn't + * copy and SGID or DGID to the given pointers. + * @id: Communication identifier whose GIDs are queried. + * @sgid: Pointer to SGID where SGID will be returned. It is optional. + * @dgid: Pointer to DGID where DGID will be returned. It is optional. + * Note: This API should not be used by any new ULPs or new code. + * Instead, users interested in querying GIDs should refer to path record + * of the rdma_cm_id to query the GIDs. + * This API is provided for compatibility for existing users. + */ + +void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid, + union ib_gid *dgid); + #endif /* RDMA_CM_H */ diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h index 6947a6ba2557..6a69d71a21a5 100644 --- a/include/rdma/rdma_cm_ib.h +++ b/include/rdma/rdma_cm_ib.h @@ -36,17 +36,17 @@ #include <rdma/rdma_cm.h> /** - * rdma_set_ib_paths - Manually sets the path records used to establish a + * rdma_set_ib_path - Manually sets the path record used to establish a * connection. * @id: Connection identifier associated with the request. * @path_rec: Reference to the path record * * This call permits a user to specify routing information for rdma_cm_id's - * bound to Infiniband devices. It is called on the client side of a + * bound to InfiniBand devices. It is called on the client side of a * connection and replaces the call to rdma_resolve_route. */ -int rdma_set_ib_paths(struct rdma_cm_id *id, - struct sa_path_rec *path_rec, int num_paths); +int rdma_set_ib_path(struct rdma_cm_id *id, + struct sa_path_rec *path_rec); /* Global qkey for UDP QPs and multicast groups. */ #define RDMA_UDP_QKEY 0x01234567 diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 1ba84a78f1c5..4118324a0310 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -228,13 +228,6 @@ struct rvt_driver_provided { int (*port_callback)(struct ib_device *, u8, struct kobject *); /* - * Returns a string to represent the device for which is being - * registered. This is primarily used for error and debug messages on - * the console. - */ - const char * (*get_card_name)(struct rvt_dev_info *rdi); - - /* * Returns a pointer to the undelying hardware's PCI device. This is * used to display information as to what hardware is being referenced * in an output message @@ -419,6 +412,30 @@ struct rvt_dev_info { }; +/** + * rvt_set_ibdev_name - Craft an IB device name from client info + * @rdi: pointer to the client rvt_dev_info structure + * @name: client specific name + * @unit: client specific unit number. + */ +static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi, + const char *fmt, const char *name, + const int unit) +{ + snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit); +} + +/** + * rvt_get_ibdev_name - return the IB name + * @rdi: rdmavt device + * + * Return the registered name of the device. + */ +static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi) +{ + return rdi->ibdev.name; +} + static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd) { return container_of(ibpd, struct rvt_pd, ibpd); diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h new file mode 100644 index 000000000000..c2d81167c858 --- /dev/null +++ b/include/rdma/restrack.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + */ + +#ifndef _RDMA_RESTRACK_H_ +#define _RDMA_RESTRACK_H_ + +#include <linux/typecheck.h> +#include <linux/rwsem.h> +#include <linux/sched.h> +#include <linux/kref.h> +#include <linux/completion.h> + +/** + * enum rdma_restrack_type - HW objects to track + */ +enum rdma_restrack_type { + /** + * @RDMA_RESTRACK_PD: Protection domain (PD) + */ + RDMA_RESTRACK_PD, + /** + * @RDMA_RESTRACK_CQ: Completion queue (CQ) + */ + RDMA_RESTRACK_CQ, + /** + * @RDMA_RESTRACK_QP: Queue pair (QP) + */ + RDMA_RESTRACK_QP, + /** + * @RDMA_RESTRACK_XRCD: XRC domain (XRCD) + */ + RDMA_RESTRACK_XRCD, + /** + * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations + */ + RDMA_RESTRACK_MAX +}; + +#define RDMA_RESTRACK_HASH_BITS 8 +/** + * struct rdma_restrack_root - main resource tracking management + * entity, per-device + */ +struct rdma_restrack_root { + /* + * @rwsem: Read/write lock to protect lists + */ + struct rw_semaphore rwsem; + /** + * @hash: global database for all resources per-device + */ + DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS); +}; + +/** + * struct rdma_restrack_entry - metadata per-entry + */ +struct rdma_restrack_entry { + /** + * @valid: validity indicator + * + * The entries are filled during rdma_restrack_add, + * can be attempted to be free during rdma_restrack_del. + * + * As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI + */ + bool valid; + /* + * @kref: Protect destroy of the resource + */ + struct kref kref; + /* + * @comp: Signal that all consumers of resource are completed their work + */ + struct completion comp; + /** + * @task: owner of resource tracking entity + * + * There are two types of entities: created by user and created + * by kernel. + * + * This is relevant for the entities created by users. + * For the entities created by kernel, this pointer will be NULL. + */ + struct task_struct *task; + /** + * @kern_name: name of owner for the kernel created entities. + */ + const char *kern_name; + /** + * @node: hash table entry + */ + struct hlist_node node; + /** + * @type: various objects in restrack database + */ + enum rdma_restrack_type type; +}; + +/** + * rdma_restrack_init() - initialize resource tracking + * @res: resource tracking root + */ +void rdma_restrack_init(struct rdma_restrack_root *res); + +/** + * rdma_restrack_clean() - clean resource tracking + * @res: resource tracking root + */ +void rdma_restrack_clean(struct rdma_restrack_root *res); + +/** + * rdma_restrack_count() - the current usage of specific object + * @res: resource entry + * @type: actual type of object to operate + * @ns: PID namespace + */ +int rdma_restrack_count(struct rdma_restrack_root *res, + enum rdma_restrack_type type, + struct pid_namespace *ns); + +/** + * rdma_restrack_add() - add object to the reource tracking database + * @res: resource entry + */ +void rdma_restrack_add(struct rdma_restrack_entry *res); + +/** + * rdma_restrack_del() - delete object from the reource tracking database + * @res: resource entry + * @type: actual type of object to operate + */ +void rdma_restrack_del(struct rdma_restrack_entry *res); + +/** + * rdma_is_kernel_res() - check the owner of resource + * @res: resource entry + */ +static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res) +{ + return !res->task; +} + +/** + * rdma_restrack_get() - grab to protect resource from release + * @res: resource entry + */ +int __must_check rdma_restrack_get(struct rdma_restrack_entry *res); + +/** + * rdma_restrack_put() - relase resource + * @res: resource entry + */ +int rdma_restrack_put(struct rdma_restrack_entry *res); +#endif /* _RDMA_RESTRACK_H_ */ diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 6df6fe0c2198..225ab7783dfd 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -75,16 +75,15 @@ enum phy_event { PHYE_OOB_ERROR, PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */ PHYE_RESUME_TIMEOUT, + PHYE_SHUTDOWN, PHY_NUM_EVENTS, }; enum discover_event { DISCE_DISCOVER_DOMAIN = 0U, DISCE_REVALIDATE_DOMAIN, - DISCE_PROBE, DISCE_SUSPEND, DISCE_RESUME, - DISCE_DESTRUCT, DISC_NUM_EVENTS, }; @@ -261,6 +260,7 @@ struct asd_sas_port { struct list_head dev_list; struct list_head disco_list; struct list_head destroy_list; + struct list_head sas_port_del_list; enum sas_linkrate linkrate; struct sas_work work; @@ -292,6 +292,7 @@ struct asd_sas_port { struct asd_sas_event { struct sas_work work; struct asd_sas_phy *phy; + int event; }; static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work) @@ -301,17 +302,24 @@ static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work) return ev; } +static inline void INIT_SAS_EVENT(struct asd_sas_event *ev, + void (*fn)(struct work_struct *), + struct asd_sas_phy *phy, int event) +{ + INIT_SAS_WORK(&ev->work, fn); + ev->phy = phy; + ev->event = event; +} + +#define SAS_PHY_SHUTDOWN_THRES 1024 + /* The phy pretty much is controlled by the LLDD. * The class only reads those fields. */ struct asd_sas_phy { /* private: */ - struct asd_sas_event port_events[PORT_NUM_EVENTS]; - struct asd_sas_event phy_events[PHY_NUM_EVENTS]; - - unsigned long port_events_pending; - unsigned long phy_events_pending; - + atomic_t event_nr; + int in_shutdown; int error; int suspended; @@ -380,6 +388,9 @@ struct sas_ha_struct { struct device *dev; /* should be set */ struct module *lldd_module; /* should be set */ + struct workqueue_struct *event_q; + struct workqueue_struct *disco_q; + u8 *sas_addr; /* must be set */ u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE]; @@ -399,6 +410,8 @@ struct sas_ha_struct { struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */ struct list_head eh_ata_q; /* scmds to promote from sas to ata eh */ + + int event_thres; }; #define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata) @@ -670,6 +683,7 @@ extern int sas_bios_param(struct scsi_device *, sector_t capacity, int *hsc); extern struct scsi_transport_template * sas_domain_attach_transport(struct sas_domain_function_template *); +extern struct device_attribute dev_attr_phy_event_threshold; int sas_discover_root_expander(struct domain_device *); diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 7fb57e905526..d8d4a902a88d 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -58,8 +58,7 @@ struct scsi_pointer { /* for scmd->flags */ #define SCMD_TAGGED (1 << 0) #define SCMD_UNCHECKED_ISA_DMA (1 << 1) -#define SCMD_ZONE_WRITE_LOCK (1 << 2) -#define SCMD_INITIALIZED (1 << 3) +#define SCMD_INITIALIZED (1 << 2) /* flags preserved across unprep / reprep */ #define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED) @@ -171,7 +170,6 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, extern void scsi_kunmap_atomic_sg(void *virt); extern int scsi_init_io(struct scsi_cmnd *cmd); -extern void scsi_initialize_rq(struct request *rq); extern int scsi_dma_map(struct scsi_cmnd *cmd); extern void scsi_dma_unmap(struct scsi_cmnd *cmd); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index a8b7bf879ced..1a1df0d21ee3 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -571,6 +571,8 @@ struct Scsi_Host { struct blk_mq_tag_set tag_set; }; + struct rcu_head rcu; + atomic_t host_busy; /* commands actually active on low-level */ atomic_t host_blocked; diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index 1df8efb0ee01..c36860111932 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -236,6 +236,7 @@ struct scsi_varlen_cdb_hdr { #define UNIT_ATTENTION 0x06 #define DATA_PROTECT 0x07 #define BLANK_CHECK 0x08 +#define VENDOR_SPECIFIC 0x09 #define COPY_ABORTED 0x0a #define ABORTED_COMMAND 0x0b #define VOLUME_OVERFLOW 0x0d diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 8cf30215c177..15da45dc2a5d 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -139,8 +139,8 @@ enum fc_vport_state { #define FC_PORTSPEED_50GBIT 0x200 #define FC_PORTSPEED_100GBIT 0x400 #define FC_PORTSPEED_25GBIT 0x800 -#define FC_PORTSPEED_64BIT 0x1000 -#define FC_PORTSPEED_128BIT 0x2000 +#define FC_PORTSPEED_64GBIT 0x1000 +#define FC_PORTSPEED_128GBIT 0x2000 #define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */ /* diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index 62895b405933..05ec927a3c72 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -156,6 +156,7 @@ struct sas_port { struct mutex phy_list_mutex; struct list_head phy_list; + struct list_head del_list; /* libsas only */ }; #define dev_to_sas_port(d) \ diff --git a/include/scsi/srp.h b/include/scsi/srp.h index 5be834de491a..c16a3c9a4d9b 100644 --- a/include/scsi/srp.h +++ b/include/scsi/srp.h @@ -129,6 +129,23 @@ struct srp_login_req { u8 target_port_id[16]; }; +/** + * struct srp_login_req_rdma - RDMA/CM login parameters. + * + * RDMA/CM over InfiniBand can only carry 92 - 36 = 56 bytes of private + * data. The %srp_login_req_rdma structure contains the same information as + * %srp_login_req but with the reserved data removed. + */ +struct srp_login_req_rdma { + u64 tag; + __be16 req_buf_fmt; + u8 req_flags; + u8 opcode; + __be32 req_it_iu_len; + u8 initiator_port_id[16]; + u8 target_port_id[16]; +}; + /* * The SRP spec defines the size of the LOGIN_RSP structure to be 52 * bytes, so it needs to be packed to avoid having it padded to 56 diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index 44202ff897fd..233bae954970 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -51,6 +51,12 @@ struct tegra_smmu_swgroup { unsigned int reg; }; +struct tegra_smmu_group_soc { + const char *name; + const unsigned int *swgroups; + unsigned int num_swgroups; +}; + struct tegra_smmu_soc { const struct tegra_mc_client *clients; unsigned int num_clients; @@ -58,6 +64,9 @@ struct tegra_smmu_soc { const struct tegra_smmu_swgroup *swgroups; unsigned int num_swgroups; + const struct tegra_smmu_group_soc *groups; + unsigned int num_groups; + bool supports_round_robin_arbitration; bool supports_request_limit; diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index 1c3982bc558f..c32bf91c23e6 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -83,6 +83,7 @@ enum tegra_io_pad { TEGRA_IO_PAD_BB, TEGRA_IO_PAD_CAM, TEGRA_IO_PAD_COMP, + TEGRA_IO_PAD_CONN, TEGRA_IO_PAD_CSIA, TEGRA_IO_PAD_CSIB, TEGRA_IO_PAD_CSIC, @@ -92,31 +93,42 @@ enum tegra_io_pad { TEGRA_IO_PAD_DBG, TEGRA_IO_PAD_DEBUG_NONAO, TEGRA_IO_PAD_DMIC, + TEGRA_IO_PAD_DMIC_HV, TEGRA_IO_PAD_DP, TEGRA_IO_PAD_DSI, TEGRA_IO_PAD_DSIB, TEGRA_IO_PAD_DSIC, TEGRA_IO_PAD_DSID, + TEGRA_IO_PAD_EDP, TEGRA_IO_PAD_EMMC, TEGRA_IO_PAD_EMMC2, TEGRA_IO_PAD_GPIO, TEGRA_IO_PAD_HDMI, + TEGRA_IO_PAD_HDMI_DP0, + TEGRA_IO_PAD_HDMI_DP1, TEGRA_IO_PAD_HSIC, TEGRA_IO_PAD_HV, TEGRA_IO_PAD_LVDS, TEGRA_IO_PAD_MIPI_BIAS, TEGRA_IO_PAD_NAND, TEGRA_IO_PAD_PEX_BIAS, + TEGRA_IO_PAD_PEX_CLK_BIAS, TEGRA_IO_PAD_PEX_CLK1, TEGRA_IO_PAD_PEX_CLK2, + TEGRA_IO_PAD_PEX_CLK3, TEGRA_IO_PAD_PEX_CNTRL, TEGRA_IO_PAD_SDMMC1, + TEGRA_IO_PAD_SDMMC1_HV, + TEGRA_IO_PAD_SDMMC2, + TEGRA_IO_PAD_SDMMC2_HV, TEGRA_IO_PAD_SDMMC3, + TEGRA_IO_PAD_SDMMC3_HV, TEGRA_IO_PAD_SDMMC4, TEGRA_IO_PAD_SPI, TEGRA_IO_PAD_SPI_HV, TEGRA_IO_PAD_SYS_DDC, TEGRA_IO_PAD_UART, + TEGRA_IO_PAD_UFS, TEGRA_IO_PAD_USB0, TEGRA_IO_PAD_USB1, TEGRA_IO_PAD_USB2, diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h index ab9fcb2f97f0..afeca593188a 100644 --- a/include/sound/hwdep.h +++ b/include/sound/hwdep.h @@ -37,7 +37,7 @@ struct snd_hwdep_ops { long count, loff_t *offset); int (*open)(struct snd_hwdep *hw, struct file * file); int (*release)(struct snd_hwdep *hw, struct file * file); - unsigned int (*poll)(struct snd_hwdep *hw, struct file *file, + __poll_t (*poll)(struct snd_hwdep *hw, struct file *file, poll_table *wait); int (*ioctl)(struct snd_hwdep *hw, struct file *file, unsigned int cmd, unsigned long arg); diff --git a/include/sound/info.h b/include/sound/info.h index 67390ee846aa..becdf66d2825 100644 --- a/include/sound/info.h +++ b/include/sound/info.h @@ -62,7 +62,7 @@ struct snd_info_entry_ops { loff_t (*llseek)(struct snd_info_entry *entry, void *file_private_data, struct file *file, loff_t offset, int orig); - unsigned int (*poll)(struct snd_info_entry *entry, + __poll_t (*poll)(struct snd_info_entry *entry, void *file_private_data, struct file *file, poll_table *wait); int (*ioctl)(struct snd_info_entry *entry, void *file_private_data, diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h index 1bee3e7fdf32..8ea966448b58 100644 --- a/include/trace/events/bridge.h +++ b/include/trace/events/bridge.h @@ -82,8 +82,8 @@ TRACE_EVENT(fdb_delete, TP_fast_assign( __assign_str(br_dev, br->dev->name); __assign_str(dev, f->dst ? f->dst->dev->name : "null"); - memcpy(__entry->addr, f->addr.addr, ETH_ALEN); - __entry->vid = f->vlan_id; + memcpy(__entry->addr, f->key.addr.addr, ETH_ALEN); + __entry->vid = f->key.vlan_id; ), TP_printk("br_dev %s dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u", diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 8f8dd42fa57b..06c87f9f720c 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -147,7 +147,8 @@ TRACE_DEFINE_ENUM(CP_TRIMMED); { CP_NO_SPC_ROLL, "no space roll forward" }, \ { CP_NODE_NEED_CP, "node needs cp" }, \ { CP_FASTBOOT_MODE, "fastboot mode" }, \ - { CP_SPEC_LOG_NUM, "log type is 2" }) + { CP_SPEC_LOG_NUM, "log type is 2" }, \ + { CP_RECOVER_DIR, "dir needs recovery" }) struct victim_sel_policy; struct f2fs_map_blocks; diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h new file mode 100644 index 000000000000..3930119cab08 --- /dev/null +++ b/include/trace/events/net_probe_common.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#if !defined(_TRACE_NET_PROBE_COMMON_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NET_PROBE_COMMON_H + +#define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk) \ + do { \ + struct sockaddr_in *v4 = (void *)__entry->saddr; \ + \ + v4->sin_family = AF_INET; \ + v4->sin_port = inet->inet_sport; \ + v4->sin_addr.s_addr = inet->inet_saddr; \ + v4 = (void *)__entry->daddr; \ + v4->sin_family = AF_INET; \ + v4->sin_port = inet->inet_dport; \ + v4->sin_addr.s_addr = inet->inet_daddr; \ + } while (0) + +#if IS_ENABLED(CONFIG_IPV6) + +#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \ + do { \ + if (sk->sk_family == AF_INET6) { \ + struct sockaddr_in6 *v6 = (void *)__entry->saddr; \ + \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = inet->inet_sport; \ + v6->sin6_addr = inet6_sk(sk)->saddr; \ + v6 = (void *)__entry->daddr; \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = inet->inet_dport; \ + v6->sin6_addr = sk->sk_v6_daddr; \ + } else \ + TP_STORE_ADDR_PORTS_V4(__entry, inet, sk); \ + } while (0) + +#else + +#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \ + TP_STORE_ADDR_PORTS_V4(__entry, inet, sk); + +#endif + +#endif diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h new file mode 100644 index 000000000000..aa19afc73a4e --- /dev/null +++ b/include/trace/events/rdma.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017 Oracle. All rights reserved. + */ + +/* + * enum ib_event_type, from include/rdma/ib_verbs.h + */ + +#define IB_EVENT_LIST \ + ib_event(CQ_ERR) \ + ib_event(QP_FATAL) \ + ib_event(QP_REQ_ERR) \ + ib_event(QP_ACCESS_ERR) \ + ib_event(COMM_EST) \ + ib_event(SQ_DRAINED) \ + ib_event(PATH_MIG) \ + ib_event(PATH_MIG_ERR) \ + ib_event(DEVICE_FATAL) \ + ib_event(PORT_ACTIVE) \ + ib_event(PORT_ERR) \ + ib_event(LID_CHANGE) \ + ib_event(PKEY_CHANGE) \ + ib_event(SM_CHANGE) \ + ib_event(SRQ_ERR) \ + ib_event(SRQ_LIMIT_REACHED) \ + ib_event(QP_LAST_WQE_REACHED) \ + ib_event(CLIENT_REREGISTER) \ + ib_event(GID_CHANGE) \ + ib_event_end(WQ_FATAL) + +#undef ib_event +#undef ib_event_end + +#define ib_event(x) TRACE_DEFINE_ENUM(IB_EVENT_##x); +#define ib_event_end(x) TRACE_DEFINE_ENUM(IB_EVENT_##x); + +IB_EVENT_LIST + +#undef ib_event +#undef ib_event_end + +#define ib_event(x) { IB_EVENT_##x, #x }, +#define ib_event_end(x) { IB_EVENT_##x, #x } + +#define rdma_show_ib_event(x) \ + __print_symbolic(x, IB_EVENT_LIST) + +/* + * enum ib_wc_status type, from include/rdma/ib_verbs.h + */ +#define IB_WC_STATUS_LIST \ + ib_wc_status(SUCCESS) \ + ib_wc_status(LOC_LEN_ERR) \ + ib_wc_status(LOC_QP_OP_ERR) \ + ib_wc_status(LOC_EEC_OP_ERR) \ + ib_wc_status(LOC_PROT_ERR) \ + ib_wc_status(WR_FLUSH_ERR) \ + ib_wc_status(MW_BIND_ERR) \ + ib_wc_status(BAD_RESP_ERR) \ + ib_wc_status(LOC_ACCESS_ERR) \ + ib_wc_status(REM_INV_REQ_ERR) \ + ib_wc_status(REM_ACCESS_ERR) \ + ib_wc_status(REM_OP_ERR) \ + ib_wc_status(RETRY_EXC_ERR) \ + ib_wc_status(RNR_RETRY_EXC_ERR) \ + ib_wc_status(LOC_RDD_VIOL_ERR) \ + ib_wc_status(REM_INV_RD_REQ_ERR) \ + ib_wc_status(REM_ABORT_ERR) \ + ib_wc_status(INV_EECN_ERR) \ + ib_wc_status(INV_EEC_STATE_ERR) \ + ib_wc_status(FATAL_ERR) \ + ib_wc_status(RESP_TIMEOUT_ERR) \ + ib_wc_status_end(GENERAL_ERR) + +#undef ib_wc_status +#undef ib_wc_status_end + +#define ib_wc_status(x) TRACE_DEFINE_ENUM(IB_WC_##x); +#define ib_wc_status_end(x) TRACE_DEFINE_ENUM(IB_WC_##x); + +IB_WC_STATUS_LIST + +#undef ib_wc_status +#undef ib_wc_status_end + +#define ib_wc_status(x) { IB_WC_##x, #x }, +#define ib_wc_status_end(x) { IB_WC_##x, #x } + +#define rdma_show_wc_status(x) \ + __print_symbolic(x, IB_WC_STATUS_LIST) + +/* + * enum rdma_cm_event_type, from include/rdma/rdma_cm.h + */ +#define RDMA_CM_EVENT_LIST \ + rdma_cm_event(ADDR_RESOLVED) \ + rdma_cm_event(ADDR_ERROR) \ + rdma_cm_event(ROUTE_RESOLVED) \ + rdma_cm_event(ROUTE_ERROR) \ + rdma_cm_event(CONNECT_REQUEST) \ + rdma_cm_event(CONNECT_RESPONSE) \ + rdma_cm_event(CONNECT_ERROR) \ + rdma_cm_event(UNREACHABLE) \ + rdma_cm_event(REJECTED) \ + rdma_cm_event(ESTABLISHED) \ + rdma_cm_event(DISCONNECTED) \ + rdma_cm_event(DEVICE_REMOVAL) \ + rdma_cm_event(MULTICAST_JOIN) \ + rdma_cm_event(MULTICAST_ERROR) \ + rdma_cm_event(ADDR_CHANGE) \ + rdma_cm_event_end(TIMEWAIT_EXIT) + +#undef rdma_cm_event +#undef rdma_cm_event_end + +#define rdma_cm_event(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x); +#define rdma_cm_event_end(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x); + +RDMA_CM_EVENT_LIST + +#undef rdma_cm_event +#undef rdma_cm_event_end + +#define rdma_cm_event(x) { RDMA_CM_EVENT_##x, #x }, +#define rdma_cm_event_end(x) { RDMA_CM_EVENT_##x, #x } + +#define rdma_show_cm_event(x) \ + __print_symbolic(x, RDMA_CM_EVENT_LIST) diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h new file mode 100644 index 000000000000..50ed3f8bf534 --- /dev/null +++ b/include/trace/events/rpcrdma.h @@ -0,0 +1,890 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017 Oracle. All rights reserved. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rpcrdma + +#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RPCRDMA_H + +#include <linux/tracepoint.h> +#include <trace/events/rdma.h> + +/** + ** Event classes + **/ + +DECLARE_EVENT_CLASS(xprtrdma_reply_event, + TP_PROTO( + const struct rpcrdma_rep *rep + ), + + TP_ARGS(rep), + + TP_STRUCT__entry( + __field(const void *, rep) + __field(const void *, r_xprt) + __field(u32, xid) + __field(u32, version) + __field(u32, proc) + ), + + TP_fast_assign( + __entry->rep = rep; + __entry->r_xprt = rep->rr_rxprt; + __entry->xid = be32_to_cpu(rep->rr_xid); + __entry->version = be32_to_cpu(rep->rr_vers); + __entry->proc = be32_to_cpu(rep->rr_proc); + ), + + TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u", + __entry->r_xprt, __entry->xid, __entry->rep, + __entry->version, __entry->proc + ) +); + +#define DEFINE_REPLY_EVENT(name) \ + DEFINE_EVENT(xprtrdma_reply_event, name, \ + TP_PROTO( \ + const struct rpcrdma_rep *rep \ + ), \ + TP_ARGS(rep)) + +DECLARE_EVENT_CLASS(xprtrdma_rxprt, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt + ), + + TP_ARGS(r_xprt), + + TP_STRUCT__entry( + __field(const void *, r_xprt) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) + ), + + TP_fast_assign( + __entry->r_xprt = r_xprt; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); + ), + + TP_printk("peer=[%s]:%s r_xprt=%p", + __get_str(addr), __get_str(port), __entry->r_xprt + ) +); + +#define DEFINE_RXPRT_EVENT(name) \ + DEFINE_EVENT(xprtrdma_rxprt, name, \ + TP_PROTO( \ + const struct rpcrdma_xprt *r_xprt \ + ), \ + TP_ARGS(r_xprt)) + +DECLARE_EVENT_CLASS(xprtrdma_rdch_event, + TP_PROTO( + const struct rpc_task *task, + unsigned int pos, + struct rpcrdma_mr *mr, + int nsegs + ), + + TP_ARGS(task, pos, mr, nsegs), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, mr) + __field(unsigned int, pos) + __field(int, nents) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + __field(int, nsegs) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->mr = mr; + __entry->pos = pos; + __entry->nents = mr->mr_nents; + __entry->handle = mr->mr_handle; + __entry->length = mr->mr_length; + __entry->offset = mr->mr_offset; + __entry->nsegs = nsegs; + ), + + TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, __entry->mr, + __entry->pos, __entry->length, + (unsigned long long)__entry->offset, __entry->handle, + __entry->nents < __entry->nsegs ? "more" : "last" + ) +); + +#define DEFINE_RDCH_EVENT(name) \ + DEFINE_EVENT(xprtrdma_rdch_event, name, \ + TP_PROTO( \ + const struct rpc_task *task, \ + unsigned int pos, \ + struct rpcrdma_mr *mr, \ + int nsegs \ + ), \ + TP_ARGS(task, pos, mr, nsegs)) + +DECLARE_EVENT_CLASS(xprtrdma_wrch_event, + TP_PROTO( + const struct rpc_task *task, + struct rpcrdma_mr *mr, + int nsegs + ), + + TP_ARGS(task, mr, nsegs), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, mr) + __field(int, nents) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + __field(int, nsegs) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->mr = mr; + __entry->nents = mr->mr_nents; + __entry->handle = mr->mr_handle; + __entry->length = mr->mr_length; + __entry->offset = mr->mr_offset; + __entry->nsegs = nsegs; + ), + + TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, __entry->mr, + __entry->length, (unsigned long long)__entry->offset, + __entry->handle, + __entry->nents < __entry->nsegs ? "more" : "last" + ) +); + +#define DEFINE_WRCH_EVENT(name) \ + DEFINE_EVENT(xprtrdma_wrch_event, name, \ + TP_PROTO( \ + const struct rpc_task *task, \ + struct rpcrdma_mr *mr, \ + int nsegs \ + ), \ + TP_ARGS(task, mr, nsegs)) + +TRACE_DEFINE_ENUM(FRWR_IS_INVALID); +TRACE_DEFINE_ENUM(FRWR_IS_VALID); +TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR); +TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI); + +#define xprtrdma_show_frwr_state(x) \ + __print_symbolic(x, \ + { FRWR_IS_INVALID, "INVALID" }, \ + { FRWR_IS_VALID, "VALID" }, \ + { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \ + { FRWR_FLUSHED_LI, "FLUSHED_LI" }) + +DECLARE_EVENT_CLASS(xprtrdma_frwr_done, + TP_PROTO( + const struct ib_wc *wc, + const struct rpcrdma_frwr *frwr + ), + + TP_ARGS(wc, frwr), + + TP_STRUCT__entry( + __field(const void *, mr) + __field(unsigned int, state) + __field(unsigned int, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr); + __entry->state = frwr->fr_state; + __entry->status = wc->status; + __entry->vendor_err = __entry->status ? wc->vendor_err : 0; + ), + + TP_printk( + "mr=%p state=%s: %s (%u/0x%x)", + __entry->mr, xprtrdma_show_frwr_state(__entry->state), + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +#define DEFINE_FRWR_DONE_EVENT(name) \ + DEFINE_EVENT(xprtrdma_frwr_done, name, \ + TP_PROTO( \ + const struct ib_wc *wc, \ + const struct rpcrdma_frwr *frwr \ + ), \ + TP_ARGS(wc, frwr)) + +DECLARE_EVENT_CLASS(xprtrdma_mr, + TP_PROTO( + const struct rpcrdma_mr *mr + ), + + TP_ARGS(mr), + + TP_STRUCT__entry( + __field(const void *, mr) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + ), + + TP_fast_assign( + __entry->mr = mr; + __entry->handle = mr->mr_handle; + __entry->length = mr->mr_length; + __entry->offset = mr->mr_offset; + ), + + TP_printk("mr=%p %u@0x%016llx:0x%08x", + __entry->mr, __entry->length, + (unsigned long long)__entry->offset, + __entry->handle + ) +); + +#define DEFINE_MR_EVENT(name) \ + DEFINE_EVENT(xprtrdma_mr, name, \ + TP_PROTO( \ + const struct rpcrdma_mr *mr \ + ), \ + TP_ARGS(mr)) + +DECLARE_EVENT_CLASS(xprtrdma_cb_event, + TP_PROTO( + const struct rpc_rqst *rqst + ), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + __field(const void *, rqst) + __field(const void *, rep) + __field(const void *, req) + __field(u32, xid) + ), + + TP_fast_assign( + __entry->rqst = rqst; + __entry->req = rpcr_to_rdmar(rqst); + __entry->rep = rpcr_to_rdmar(rqst)->rl_reply; + __entry->xid = be32_to_cpu(rqst->rq_xid); + ), + + TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", + __entry->xid, __entry->rqst, __entry->req, __entry->rep + ) +); + +#define DEFINE_CB_EVENT(name) \ + DEFINE_EVENT(xprtrdma_cb_event, name, \ + TP_PROTO( \ + const struct rpc_rqst *rqst \ + ), \ + TP_ARGS(rqst)) + +/** + ** Connection events + **/ + +TRACE_EVENT(xprtrdma_conn_upcall, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + struct rdma_cm_event *event + ), + + TP_ARGS(r_xprt, event), + + TP_STRUCT__entry( + __field(const void *, r_xprt) + __field(unsigned int, event) + __field(int, status) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) + ), + + TP_fast_assign( + __entry->r_xprt = r_xprt; + __entry->event = event->event; + __entry->status = event->status; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); + ), + + TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)", + __get_str(addr), __get_str(port), + __entry->r_xprt, rdma_show_cm_event(__entry->event), + __entry->event, __entry->status + ) +); + +TRACE_EVENT(xprtrdma_disconnect, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + int status + ), + + TP_ARGS(r_xprt, status), + + TP_STRUCT__entry( + __field(const void *, r_xprt) + __field(int, status) + __field(int, connected) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) + ), + + TP_fast_assign( + __entry->r_xprt = r_xprt; + __entry->status = status; + __entry->connected = r_xprt->rx_ep.rep_connected; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); + ), + + TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected", + __get_str(addr), __get_str(port), + __entry->r_xprt, __entry->status, + __entry->connected == 1 ? "still " : "dis" + ) +); + +DEFINE_RXPRT_EVENT(xprtrdma_conn_start); +DEFINE_RXPRT_EVENT(xprtrdma_conn_tout); +DEFINE_RXPRT_EVENT(xprtrdma_create); +DEFINE_RXPRT_EVENT(xprtrdma_destroy); +DEFINE_RXPRT_EVENT(xprtrdma_remove); +DEFINE_RXPRT_EVENT(xprtrdma_reinsert); +DEFINE_RXPRT_EVENT(xprtrdma_reconnect); +DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc); + +TRACE_EVENT(xprtrdma_qp_error, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + const struct ib_event *event + ), + + TP_ARGS(r_xprt, event), + + TP_STRUCT__entry( + __field(const void *, r_xprt) + __field(unsigned int, event) + __string(name, event->device->name) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) + ), + + TP_fast_assign( + __entry->r_xprt = r_xprt; + __entry->event = event->event; + __assign_str(name, event->device->name); + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); + ), + + TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)", + __get_str(addr), __get_str(port), __entry->r_xprt, + __get_str(name), rdma_show_ib_event(__entry->event), + __entry->event + ) +); + +/** + ** Call events + **/ + +TRACE_EVENT(xprtrdma_createmrs, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + unsigned int count + ), + + TP_ARGS(r_xprt, count), + + TP_STRUCT__entry( + __field(const void *, r_xprt) + __field(unsigned int, count) + ), + + TP_fast_assign( + __entry->r_xprt = r_xprt; + __entry->count = count; + ), + + TP_printk("r_xprt=%p: created %u MRs", + __entry->r_xprt, __entry->count + ) +); + +DEFINE_RXPRT_EVENT(xprtrdma_nomrs); + +DEFINE_RDCH_EVENT(xprtrdma_read_chunk); +DEFINE_WRCH_EVENT(xprtrdma_write_chunk); +DEFINE_WRCH_EVENT(xprtrdma_reply_chunk); + +TRACE_DEFINE_ENUM(rpcrdma_noch); +TRACE_DEFINE_ENUM(rpcrdma_readch); +TRACE_DEFINE_ENUM(rpcrdma_areadch); +TRACE_DEFINE_ENUM(rpcrdma_writech); +TRACE_DEFINE_ENUM(rpcrdma_replych); + +#define xprtrdma_show_chunktype(x) \ + __print_symbolic(x, \ + { rpcrdma_noch, "inline" }, \ + { rpcrdma_readch, "read list" }, \ + { rpcrdma_areadch, "*read list" }, \ + { rpcrdma_writech, "write list" }, \ + { rpcrdma_replych, "reply chunk" }) + +TRACE_EVENT(xprtrdma_marshal, + TP_PROTO( + const struct rpc_rqst *rqst, + unsigned int hdrlen, + unsigned int rtype, + unsigned int wtype + ), + + TP_ARGS(rqst, hdrlen, rtype, wtype), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(unsigned int, hdrlen) + __field(unsigned int, headlen) + __field(unsigned int, pagelen) + __field(unsigned int, taillen) + __field(unsigned int, rtype) + __field(unsigned int, wtype) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->hdrlen = hdrlen; + __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; + __entry->pagelen = rqst->rq_snd_buf.page_len; + __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len; + __entry->rtype = rtype; + __entry->wtype = wtype; + ), + + TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->hdrlen, + __entry->headlen, __entry->pagelen, __entry->taillen, + xprtrdma_show_chunktype(__entry->rtype), + xprtrdma_show_chunktype(__entry->wtype) + ) +); + +TRACE_EVENT(xprtrdma_post_send, + TP_PROTO( + const struct rpcrdma_req *req, + int status + ), + + TP_ARGS(req, status), + + TP_STRUCT__entry( + __field(const void *, req) + __field(int, num_sge) + __field(bool, signaled) + __field(int, status) + ), + + TP_fast_assign( + __entry->req = req; + __entry->num_sge = req->rl_sendctx->sc_wr.num_sge; + __entry->signaled = req->rl_sendctx->sc_wr.send_flags & + IB_SEND_SIGNALED; + __entry->status = status; + ), + + TP_printk("req=%p, %d SGEs%s, status=%d", + __entry->req, __entry->num_sge, + (__entry->signaled ? ", signaled" : ""), + __entry->status + ) +); + +TRACE_EVENT(xprtrdma_post_recv, + TP_PROTO( + const struct rpcrdma_rep *rep, + int status + ), + + TP_ARGS(rep, status), + + TP_STRUCT__entry( + __field(const void *, rep) + __field(int, status) + ), + + TP_fast_assign( + __entry->rep = rep; + __entry->status = status; + ), + + TP_printk("rep=%p status=%d", + __entry->rep, __entry->status + ) +); + +/** + ** Completion events + **/ + +TRACE_EVENT(xprtrdma_wc_send, + TP_PROTO( + const struct rpcrdma_sendctx *sc, + const struct ib_wc *wc + ), + + TP_ARGS(sc, wc), + + TP_STRUCT__entry( + __field(const void *, req) + __field(unsigned int, unmap_count) + __field(unsigned int, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->req = sc->sc_req; + __entry->unmap_count = sc->sc_unmap_count; + __entry->status = wc->status; + __entry->vendor_err = __entry->status ? wc->vendor_err : 0; + ), + + TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)", + __entry->req, __entry->unmap_count, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +TRACE_EVENT(xprtrdma_wc_receive, + TP_PROTO( + const struct rpcrdma_rep *rep, + const struct ib_wc *wc + ), + + TP_ARGS(rep, wc), + + TP_STRUCT__entry( + __field(const void *, rep) + __field(unsigned int, byte_len) + __field(unsigned int, status) + __field(unsigned int, vendor_err) + ), + + TP_fast_assign( + __entry->rep = rep; + __entry->byte_len = wc->byte_len; + __entry->status = wc->status; + __entry->vendor_err = __entry->status ? wc->vendor_err : 0; + ), + + TP_printk("rep=%p, %u bytes: %s (%u/0x%x)", + __entry->rep, __entry->byte_len, + rdma_show_wc_status(__entry->status), + __entry->status, __entry->vendor_err + ) +); + +DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); +DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); +DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); + +DEFINE_MR_EVENT(xprtrdma_localinv); +DEFINE_MR_EVENT(xprtrdma_dma_unmap); +DEFINE_MR_EVENT(xprtrdma_remoteinv); +DEFINE_MR_EVENT(xprtrdma_recover_mr); + +/** + ** Reply events + **/ + +TRACE_EVENT(xprtrdma_reply, + TP_PROTO( + const struct rpc_task *task, + const struct rpcrdma_rep *rep, + const struct rpcrdma_req *req, + unsigned int credits + ), + + TP_ARGS(task, rep, req, credits), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, rep) + __field(const void *, req) + __field(u32, xid) + __field(unsigned int, credits) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->rep = rep; + __entry->req = req; + __entry->xid = be32_to_cpu(rep->rr_xid); + __entry->credits = credits; + ), + + TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->credits, __entry->rep, __entry->req + ) +); + +TRACE_EVENT(xprtrdma_defer_cmp, + TP_PROTO( + const struct rpcrdma_rep *rep + ), + + TP_ARGS(rep), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, rep) + __field(u32, xid) + ), + + TP_fast_assign( + __entry->task_id = rep->rr_rqst->rq_task->tk_pid; + __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid; + __entry->rep = rep; + __entry->xid = be32_to_cpu(rep->rr_xid); + ), + + TP_printk("task:%u@%u xid=0x%08x rep=%p", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->rep + ) +); + +DEFINE_REPLY_EVENT(xprtrdma_reply_vers); +DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); +DEFINE_REPLY_EVENT(xprtrdma_reply_short); +DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); + +TRACE_EVENT(xprtrdma_fixup, + TP_PROTO( + const struct rpc_rqst *rqst, + int len, + int hdrlen + ), + + TP_ARGS(rqst, len, hdrlen), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, base) + __field(int, len) + __field(int, hdrlen) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->base = rqst->rq_rcv_buf.head[0].iov_base; + __entry->len = len; + __entry->hdrlen = hdrlen; + ), + + TP_printk("task:%u@%u base=%p len=%d hdrlen=%d", + __entry->task_id, __entry->client_id, + __entry->base, __entry->len, __entry->hdrlen + ) +); + +TRACE_EVENT(xprtrdma_fixup_pg, + TP_PROTO( + const struct rpc_rqst *rqst, + int pageno, + const void *pos, + int len, + int curlen + ), + + TP_ARGS(rqst, pageno, pos, len, curlen), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, pos) + __field(int, pageno) + __field(int, len) + __field(int, curlen) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->pos = pos; + __entry->pageno = pageno; + __entry->len = len; + __entry->curlen = curlen; + ), + + TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d", + __entry->task_id, __entry->client_id, + __entry->pageno, __entry->pos, __entry->len, __entry->curlen + ) +); + +TRACE_EVENT(xprtrdma_decode_seg, + TP_PROTO( + u32 handle, + u32 length, + u64 offset + ), + + TP_ARGS(handle, length, offset), + + TP_STRUCT__entry( + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + ), + + TP_fast_assign( + __entry->handle = handle; + __entry->length = length; + __entry->offset = offset; + ), + + TP_printk("%u@0x%016llx:0x%08x", + __entry->length, (unsigned long long)__entry->offset, + __entry->handle + ) +); + +/** + ** Allocation/release of rpcrdma_reqs and rpcrdma_reps + **/ + +TRACE_EVENT(xprtrdma_allocate, + TP_PROTO( + const struct rpc_task *task, + const struct rpcrdma_req *req + ), + + TP_ARGS(task, req), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, req) + __field(const void *, rep) + __field(size_t, callsize) + __field(size_t, rcvsize) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->req = req; + __entry->rep = req ? req->rl_reply : NULL; + __entry->callsize = task->tk_rqstp->rq_callsize; + __entry->rcvsize = task->tk_rqstp->rq_rcvsize; + ), + + TP_printk("task:%u@%u req=%p rep=%p (%zu, %zu)", + __entry->task_id, __entry->client_id, + __entry->req, __entry->rep, + __entry->callsize, __entry->rcvsize + ) +); + +TRACE_EVENT(xprtrdma_rpc_done, + TP_PROTO( + const struct rpc_task *task, + const struct rpcrdma_req *req + ), + + TP_ARGS(task, req), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(const void *, req) + __field(const void *, rep) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->req = req; + __entry->rep = req->rl_reply; + ), + + TP_printk("task:%u@%u req=%p rep=%p", + __entry->task_id, __entry->client_id, + __entry->req, __entry->rep + ) +); + +DEFINE_RXPRT_EVENT(xprtrdma_noreps); + +/** + ** Callback events + **/ + +TRACE_EVENT(xprtrdma_cb_setup, + TP_PROTO( + const struct rpcrdma_xprt *r_xprt, + unsigned int reqs + ), + + TP_ARGS(r_xprt, reqs), + + TP_STRUCT__entry( + __field(const void *, r_xprt) + __field(unsigned int, reqs) + __string(addr, rpcrdma_addrstr(r_xprt)) + __string(port, rpcrdma_portstr(r_xprt)) + ), + + TP_fast_assign( + __entry->r_xprt = r_xprt; + __entry->reqs = reqs; + __assign_str(addr, rpcrdma_addrstr(r_xprt)); + __assign_str(port, rpcrdma_portstr(r_xprt)); + ), + + TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs", + __get_str(addr), __get_str(port), + __entry->r_xprt, __entry->reqs + ) +); + +DEFINE_CB_EVENT(xprtrdma_cb_call); +DEFINE_CB_EVENT(xprtrdma_cb_reply); + +#endif /* _TRACE_RPCRDMA_H */ + +#include <trace/define_trace.h> diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h new file mode 100644 index 000000000000..7475c7be165a --- /dev/null +++ b/include/trace/events/sctp.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sctp + +#if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SCTP_H + +#include <net/sctp/structs.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(sctp_probe_path, + + TP_PROTO(struct sctp_transport *sp, + const struct sctp_association *asoc), + + TP_ARGS(sp, asoc), + + TP_STRUCT__entry( + __field(__u64, asoc) + __field(__u32, primary) + __array(__u8, ipaddr, sizeof(union sctp_addr)) + __field(__u32, state) + __field(__u32, cwnd) + __field(__u32, ssthresh) + __field(__u32, flight_size) + __field(__u32, partial_bytes_acked) + __field(__u32, pathmtu) + ), + + TP_fast_assign( + __entry->asoc = (unsigned long)asoc; + __entry->primary = (sp == asoc->peer.primary_path); + memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr)); + __entry->state = sp->state; + __entry->cwnd = sp->cwnd; + __entry->ssthresh = sp->ssthresh; + __entry->flight_size = sp->flight_size; + __entry->partial_bytes_acked = sp->partial_bytes_acked; + __entry->pathmtu = sp->pathmtu; + ), + + TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u " + "flight_size=%u partial_bytes_acked=%u pathmtu=%u", + __entry->asoc, __entry->primary ? "(*)" : "", + __entry->ipaddr, __entry->state, __entry->cwnd, + __entry->ssthresh, __entry->flight_size, + __entry->partial_bytes_acked, __entry->pathmtu) +); + +TRACE_EVENT(sctp_probe, + + TP_PROTO(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk), + + TP_ARGS(ep, asoc, chunk), + + TP_STRUCT__entry( + __field(__u64, asoc) + __field(__u32, mark) + __field(__u16, bind_port) + __field(__u16, peer_port) + __field(__u32, pathmtu) + __field(__u32, rwnd) + __field(__u16, unack_data) + ), + + TP_fast_assign( + struct sk_buff *skb = chunk->skb; + + __entry->asoc = (unsigned long)asoc; + __entry->mark = skb->mark; + __entry->bind_port = ep->base.bind_addr.port; + __entry->peer_port = asoc->peer.port; + __entry->pathmtu = asoc->pathmtu; + __entry->rwnd = asoc->peer.rwnd; + __entry->unack_data = asoc->unack_data; + + if (trace_sctp_probe_path_enabled()) { + struct sctp_transport *sp; + + list_for_each_entry(sp, &asoc->peer.transport_addr_list, + transports) { + trace_sctp_probe_path(sp, asoc); + } + } + ), + + TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d " + "rwnd=%u unack_data=%d", + __entry->asoc, __entry->mark, __entry->bind_port, + __entry->peer_port, __entry->pathmtu, __entry->rwnd, + __entry->unack_data) +); + +#endif /* _TRACE_SCTP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/siox.h b/include/trace/events/siox.h new file mode 100644 index 000000000000..68a43fc2c3a5 --- /dev/null +++ b/include/trace/events/siox.h @@ -0,0 +1,66 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM siox + +#if !defined(_TRACE_SIOX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SIOX_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(siox_set_data, + TP_PROTO(const struct siox_master *smaster, + const struct siox_device *sdevice, + unsigned int devno, size_t bufoffset), + TP_ARGS(smaster, sdevice, devno, bufoffset), + TP_STRUCT__entry( + __field(int, busno) + __field(unsigned int, devno) + __field(size_t, inbytes) + __dynamic_array(u8, buf, sdevice->inbytes) + ), + TP_fast_assign( + __entry->busno = smaster->busno; + __entry->devno = devno; + __entry->inbytes = sdevice->inbytes; + memcpy(__get_dynamic_array(buf), + smaster->buf + bufoffset, sdevice->inbytes); + ), + TP_printk("siox-%d-%u [%*phD]", + __entry->busno, + __entry->devno, + (int)__entry->inbytes, __get_dynamic_array(buf) + ) +); + +TRACE_EVENT(siox_get_data, + TP_PROTO(const struct siox_master *smaster, + const struct siox_device *sdevice, + unsigned int devno, u8 status_clean, + size_t bufoffset), + TP_ARGS(smaster, sdevice, devno, status_clean, bufoffset), + TP_STRUCT__entry( + __field(int, busno) + __field(unsigned int, devno) + __field(u8, status_clean) + __field(size_t, outbytes) + __dynamic_array(u8, buf, sdevice->outbytes) + ), + TP_fast_assign( + __entry->busno = smaster->busno; + __entry->devno = devno; + __entry->status_clean = status_clean; + __entry->outbytes = sdevice->outbytes; + memcpy(__get_dynamic_array(buf), + smaster->buf + bufoffset, sdevice->outbytes); + ), + TP_printk("siox-%d-%u (%02hhx) [%*phD]", + __entry->busno, + __entry->devno, + __entry->status_clean, + (int)__entry->outbytes, __get_dynamic_array(buf) + ) +); + +#endif /* if !defined(_TRACE_SIOX_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index ec4dade24466..3176a3931107 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -6,7 +6,58 @@ #define _TRACE_SOCK_H #include <net/sock.h> +#include <net/ipv6.h> #include <linux/tracepoint.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> + +#define family_names \ + EM(AF_INET) \ + EMe(AF_INET6) + +/* The protocol traced by inet_sock_set_state */ +#define inet_protocol_names \ + EM(IPPROTO_TCP) \ + EM(IPPROTO_DCCP) \ + EMe(IPPROTO_SCTP) + +#define tcp_state_names \ + EM(TCP_ESTABLISHED) \ + EM(TCP_SYN_SENT) \ + EM(TCP_SYN_RECV) \ + EM(TCP_FIN_WAIT1) \ + EM(TCP_FIN_WAIT2) \ + EM(TCP_TIME_WAIT) \ + EM(TCP_CLOSE) \ + EM(TCP_CLOSE_WAIT) \ + EM(TCP_LAST_ACK) \ + EM(TCP_LISTEN) \ + EM(TCP_CLOSING) \ + EMe(TCP_NEW_SYN_RECV) + +/* enums need to be exported to user space */ +#undef EM +#undef EMe +#define EM(a) TRACE_DEFINE_ENUM(a); +#define EMe(a) TRACE_DEFINE_ENUM(a); + +family_names +inet_protocol_names +tcp_state_names + +#undef EM +#undef EMe +#define EM(a) { a, #a }, +#define EMe(a) { a, #a } + +#define show_family_name(val) \ + __print_symbolic(val, family_names) + +#define show_inet_protocol_name(val) \ + __print_symbolic(val, inet_protocol_names) + +#define show_tcp_state_name(val) \ + __print_symbolic(val, tcp_state_names) TRACE_EVENT(sock_rcvqueue_full, @@ -63,6 +114,72 @@ TRACE_EVENT(sock_exceed_buf_limit, __entry->rmem_alloc) ); +TRACE_EVENT(inet_sock_set_state, + + TP_PROTO(const struct sock *sk, const int oldstate, const int newstate), + + TP_ARGS(sk, oldstate, newstate), + + TP_STRUCT__entry( + __field(const void *, skaddr) + __field(int, oldstate) + __field(int, newstate) + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, family) + __field(__u8, protocol) + __array(__u8, saddr, 4) + __array(__u8, daddr, 4) + __array(__u8, saddr_v6, 16) + __array(__u8, daddr_v6, 16) + ), + + TP_fast_assign( + struct inet_sock *inet = inet_sk(sk); + struct in6_addr *pin6; + __be32 *p32; + + __entry->skaddr = sk; + __entry->oldstate = oldstate; + __entry->newstate = newstate; + + __entry->family = sk->sk_family; + __entry->protocol = sk->sk_protocol; + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + + p32 = (__be32 *) __entry->saddr; + *p32 = inet->inet_saddr; + + p32 = (__be32 *) __entry->daddr; + *p32 = inet->inet_daddr; + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) { + pin6 = (struct in6_addr *)__entry->saddr_v6; + *pin6 = sk->sk_v6_rcv_saddr; + pin6 = (struct in6_addr *)__entry->daddr_v6; + *pin6 = sk->sk_v6_daddr; + } else +#endif + { + pin6 = (struct in6_addr *)__entry->saddr_v6; + ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); + pin6 = (struct in6_addr *)__entry->daddr_v6; + ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); + } + ), + + TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", + show_family_name(__entry->family), + show_inet_protocol_name(__entry->protocol), + __entry->sport, __entry->dport, + __entry->saddr, __entry->daddr, + __entry->saddr_v6, __entry->daddr_v6, + show_tcp_state_name(__entry->oldstate), + show_tcp_state_name(__entry->newstate)) +); + #endif /* _TRACE_SOCK_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 8c153f68509e..970c91a83173 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(rpc_task_status, __entry->status = task->tk_status; ), - TP_printk("task:%u@%u, status %d", + TP_printk("task:%u@%u status=%d", __entry->task_id, __entry->client_id, __entry->status) ); @@ -66,7 +66,7 @@ TRACE_EVENT(rpc_connect_status, __entry->status = status; ), - TP_printk("task:%u@%u, status %d", + TP_printk("task:%u@%u status=%d", __entry->task_id, __entry->client_id, __entry->status) ); @@ -175,7 +175,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued, ), TP_fast_assign( - __entry->client_id = clnt->cl_clid; + __entry->client_id = clnt ? clnt->cl_clid : -1; __entry->task_id = task->tk_pid; __entry->timeout = task->tk_timeout; __entry->runstate = task->tk_runstate; @@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued, __assign_str(q_name, rpc_qname(q)); ), - TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s", + TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s", __entry->task_id, __entry->client_id, __entry->flags, __entry->runstate, @@ -390,6 +390,10 @@ DECLARE_EVENT_CLASS(rpc_xprt_event, __entry->status) ); +DEFINE_EVENT(rpc_xprt_event, xprt_timer, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst, TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), TP_ARGS(xprt, xid, status)); diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index ab34c561f26b..878b2be7ce77 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM tcp @@ -8,22 +9,7 @@ #include <linux/tcp.h> #include <linux/tracepoint.h> #include <net/ipv6.h> - -#define tcp_state_name(state) { state, #state } -#define show_tcp_state_name(val) \ - __print_symbolic(val, \ - tcp_state_name(TCP_ESTABLISHED), \ - tcp_state_name(TCP_SYN_SENT), \ - tcp_state_name(TCP_SYN_RECV), \ - tcp_state_name(TCP_FIN_WAIT1), \ - tcp_state_name(TCP_FIN_WAIT2), \ - tcp_state_name(TCP_TIME_WAIT), \ - tcp_state_name(TCP_CLOSE), \ - tcp_state_name(TCP_CLOSE_WAIT), \ - tcp_state_name(TCP_LAST_ACK), \ - tcp_state_name(TCP_LISTEN), \ - tcp_state_name(TCP_CLOSING), \ - tcp_state_name(TCP_NEW_SYN_RECV)) +#include <net/tcp.h> #define TP_STORE_V4MAPPED(__entry, saddr, daddr) \ do { \ @@ -270,6 +256,64 @@ TRACE_EVENT(tcp_retransmit_synack, __entry->saddr_v6, __entry->daddr_v6) ); +#include <trace/events/net_probe_common.h> + +TRACE_EVENT(tcp_probe, + + TP_PROTO(struct sock *sk, struct sk_buff *skb), + + TP_ARGS(sk, skb), + + TP_STRUCT__entry( + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + __field(__u16, sport) + __field(__u16, dport) + __field(__u32, mark) + __field(__u16, length) + __field(__u32, snd_nxt) + __field(__u32, snd_una) + __field(__u32, snd_cwnd) + __field(__u32, ssthresh) + __field(__u32, snd_wnd) + __field(__u32, srtt) + __field(__u32, rcv_wnd) + ), + + TP_fast_assign( + const struct tcp_sock *tp = tcp_sk(sk); + const struct inet_sock *inet = inet_sk(sk); + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + + TP_STORE_ADDR_PORTS(__entry, inet, sk); + + /* For filtering use */ + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + __entry->mark = skb->mark; + + __entry->length = skb->len; + __entry->snd_nxt = tp->snd_nxt; + __entry->snd_una = tp->snd_una; + __entry->snd_cwnd = tp->snd_cwnd; + __entry->snd_wnd = tp->snd_wnd; + __entry->rcv_wnd = tp->rcv_wnd; + __entry->ssthresh = tcp_current_ssthresh(sk); + __entry->srtt = tp->srtt_us >> 3; + ), + + TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x " + "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u " + "rcv_wnd=%u", + __entry->saddr, __entry->daddr, __entry->mark, + __entry->length, __entry->snd_nxt, __entry->snd_una, + __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd, + __entry->srtt, __entry->rcv_wnd) +); + #endif /* _TRACE_TCP_H */ /* This part must be outside protection */ diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index d70b53e65f43..e0b8b9173e1c 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -192,12 +192,12 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re TRACE_EVENT(mm_shrink_slab_start, TP_PROTO(struct shrinker *shr, struct shrink_control *sc, - long nr_objects_to_shrink, unsigned long pgs_scanned, - unsigned long lru_pgs, unsigned long cache_items, - unsigned long long delta, unsigned long total_scan), + long nr_objects_to_shrink, unsigned long cache_items, + unsigned long long delta, unsigned long total_scan, + int priority), - TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs, - cache_items, delta, total_scan), + TP_ARGS(shr, sc, nr_objects_to_shrink, cache_items, delta, total_scan, + priority), TP_STRUCT__entry( __field(struct shrinker *, shr) @@ -205,11 +205,10 @@ TRACE_EVENT(mm_shrink_slab_start, __field(int, nid) __field(long, nr_objects_to_shrink) __field(gfp_t, gfp_flags) - __field(unsigned long, pgs_scanned) - __field(unsigned long, lru_pgs) __field(unsigned long, cache_items) __field(unsigned long long, delta) __field(unsigned long, total_scan) + __field(int, priority) ), TP_fast_assign( @@ -218,24 +217,22 @@ TRACE_EVENT(mm_shrink_slab_start, __entry->nid = sc->nid; __entry->nr_objects_to_shrink = nr_objects_to_shrink; __entry->gfp_flags = sc->gfp_mask; - __entry->pgs_scanned = pgs_scanned; - __entry->lru_pgs = lru_pgs; __entry->cache_items = cache_items; __entry->delta = delta; __entry->total_scan = total_scan; + __entry->priority = priority; ), - TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld", + TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d", __entry->shrink, __entry->shr, __entry->nid, __entry->nr_objects_to_shrink, show_gfp_flags(__entry->gfp_flags), - __entry->pgs_scanned, - __entry->lru_pgs, __entry->cache_items, __entry->delta, - __entry->total_scan) + __entry->total_scan, + __entry->priority) ); TRACE_EVENT(mm_shrink_slab_end, diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h index fefb3d2c3fac..639fade14b23 100644 --- a/include/uapi/asm-generic/poll.h +++ b/include/uapi/asm-generic/poll.h @@ -3,35 +3,49 @@ #define __ASM_GENERIC_POLL_H /* These are specified by iBCS2 */ -#define POLLIN 0x0001 -#define POLLPRI 0x0002 -#define POLLOUT 0x0004 -#define POLLERR 0x0008 -#define POLLHUP 0x0010 -#define POLLNVAL 0x0020 +#define POLLIN (__force __poll_t)0x0001 +#define POLLPRI (__force __poll_t)0x0002 +#define POLLOUT (__force __poll_t)0x0004 +#define POLLERR (__force __poll_t)0x0008 +#define POLLHUP (__force __poll_t)0x0010 +#define POLLNVAL (__force __poll_t)0x0020 /* The rest seem to be more-or-less nonstandard. Check them! */ -#define POLLRDNORM 0x0040 -#define POLLRDBAND 0x0080 +#define POLLRDNORM (__force __poll_t)0x0040 +#define POLLRDBAND (__force __poll_t)0x0080 #ifndef POLLWRNORM -#define POLLWRNORM 0x0100 +#define POLLWRNORM (__force __poll_t)0x0100 #endif #ifndef POLLWRBAND -#define POLLWRBAND 0x0200 +#define POLLWRBAND (__force __poll_t)0x0200 #endif #ifndef POLLMSG -#define POLLMSG 0x0400 +#define POLLMSG (__force __poll_t)0x0400 #endif #ifndef POLLREMOVE -#define POLLREMOVE 0x1000 +#define POLLREMOVE (__force __poll_t)0x1000 #endif #ifndef POLLRDHUP -#define POLLRDHUP 0x2000 +#define POLLRDHUP (__force __poll_t)0x2000 #endif -#define POLLFREE 0x4000 /* currently only for epoll */ +#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */ -#define POLL_BUSY_LOOP 0x8000 +#define POLL_BUSY_LOOP (__force __poll_t)0x8000 + +#ifdef __KERNEL__ +#ifndef __ARCH_HAS_MANGLED_POLL +static inline __u16 mangle_poll(__poll_t val) +{ + return (__force __u16)val; +} + +static inline __poll_t demangle_poll(__u16 v) +{ + return (__force __poll_t)v; +} +#endif +#endif struct pollfd { int fd; diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index e447283b8f52..254afc31e3be 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -23,10 +23,6 @@ typedef union sigval { #define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int)) #endif -#ifndef __ARCH_SI_UID_T -#define __ARCH_SI_UID_T __kernel_uid32_t -#endif - /* * The default "si_band" type is "long", as specified by POSIX. * However, some architectures want to override this to "int" @@ -44,12 +40,15 @@ typedef union sigval { #define __ARCH_SI_ATTRIBUTES #endif -#ifndef HAVE_ARCH_SIGINFO_T - typedef struct siginfo { int si_signo; +#ifndef __ARCH_HAS_SWAPPED_SIGINFO int si_errno; int si_code; +#else + int si_code; + int si_errno; +#endif union { int _pad[SI_PAD_SIZE]; @@ -57,14 +56,13 @@ typedef struct siginfo { /* kill() */ struct { __kernel_pid_t _pid; /* sender's pid */ - __ARCH_SI_UID_T _uid; /* sender's uid */ + __kernel_uid32_t _uid; /* sender's uid */ } _kill; /* POSIX.1b timers */ struct { __kernel_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ - char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)]; sigval_t _sigval; /* same as below */ int _sys_private; /* not to be passed to user */ } _timer; @@ -72,34 +70,47 @@ typedef struct siginfo { /* POSIX.1b signals */ struct { __kernel_pid_t _pid; /* sender's pid */ - __ARCH_SI_UID_T _uid; /* sender's uid */ + __kernel_uid32_t _uid; /* sender's uid */ sigval_t _sigval; } _rt; /* SIGCHLD */ struct { __kernel_pid_t _pid; /* which child */ - __ARCH_SI_UID_T _uid; /* sender's uid */ + __kernel_uid32_t _uid; /* sender's uid */ int _status; /* exit code */ __ARCH_SI_CLOCK_T _utime; __ARCH_SI_CLOCK_T _stime; } _sigchld; - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ struct { void __user *_addr; /* faulting insn/memory ref. */ #ifdef __ARCH_SI_TRAPNO int _trapno; /* TRAP # which caused the signal */ #endif - short _addr_lsb; /* LSB of the reported address */ +#ifdef __ia64__ + int _imm; /* immediate value for "break" */ + unsigned int _flags; /* see ia64 si_flags */ + unsigned long _isr; /* isr */ +#endif union { + /* + * used when si_code=BUS_MCEERR_AR or + * used when si_code=BUS_MCEERR_AO + */ + short _addr_lsb; /* LSB of the reported address */ /* used when si_code=SEGV_BNDERR */ struct { + short _dummy_bnd; void __user *_lower; void __user *_upper; } _addr_bnd; /* used when si_code=SEGV_PKUERR */ - __u32 _pkey; + struct { + short _dummy_pkey; + __u32 _pkey; + } _addr_pkey; }; } _sigfault; @@ -118,10 +129,6 @@ typedef struct siginfo { } _sifields; } __ARCH_SI_ATTRIBUTES siginfo_t; -/* If the arch shares siginfo, then it has SIGSYS. */ -#define __ARCH_SIGSYS -#endif - /* * How these fields are to be accessed. */ @@ -143,14 +150,12 @@ typedef struct siginfo { #define si_addr_lsb _sifields._sigfault._addr_lsb #define si_lower _sifields._sigfault._addr_bnd._lower #define si_upper _sifields._sigfault._addr_bnd._upper -#define si_pkey _sifields._sigfault._pkey +#define si_pkey _sifields._sigfault._addr_pkey._pkey #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd -#ifdef __ARCH_SIGSYS #define si_call_addr _sifields._sigsys._call_addr #define si_syscall _sifields._sigsys._syscall #define si_arch _sifields._sigsys._arch -#endif /* * si_code values @@ -165,6 +170,7 @@ typedef struct siginfo { #define SI_SIGIO -5 /* sent by queued SIGIO */ #define SI_TKILL -6 /* sent by tkill system call */ #define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */ +#define SI_ASYNCNL -60 /* sent by glibc async name lookup completion */ #define SI_FROMUSER(siptr) ((siptr)->si_code <= 0) #define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0) @@ -173,14 +179,34 @@ typedef struct siginfo { * SIGILL si_codes */ #define ILL_ILLOPC 1 /* illegal opcode */ +#ifdef __bfin__ +# define ILL_ILLPARAOP 2 /* illegal opcode combine */ +#endif #define ILL_ILLOPN 2 /* illegal operand */ #define ILL_ILLADR 3 /* illegal addressing mode */ #define ILL_ILLTRP 4 /* illegal trap */ +#ifdef __bfin__ +# define ILL_ILLEXCPT 4 /* unrecoverable exception */ +#endif #define ILL_PRVOPC 5 /* privileged opcode */ #define ILL_PRVREG 6 /* privileged register */ #define ILL_COPROC 7 /* coprocessor error */ #define ILL_BADSTK 8 /* internal stack error */ -#define NSIGILL 8 +#ifdef __bfin__ +# define ILL_CPLB_VI 9 /* D/I CPLB protect violation */ +# define ILL_CPLB_MISS 10 /* D/I CPLB miss */ +# define ILL_CPLB_MULHIT 11 /* D/I CPLB multiple hit */ +#endif +#ifdef __tile__ +# define ILL_DBLFLT 9 /* double fault */ +# define ILL_HARDWALL 10 /* user networks hardwall violation */ +#endif +#ifdef __ia64__ +# define ILL_BADIADDR 9 /* unimplemented instruction address */ +# define __ILL_BREAK 10 /* illegal break */ +# define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */ +#endif +#define NSIGILL 11 /* * SIGFPE si_codes @@ -193,15 +219,33 @@ typedef struct siginfo { #define FPE_FLTRES 6 /* floating point inexact result */ #define FPE_FLTINV 7 /* floating point invalid operation */ #define FPE_FLTSUB 8 /* subscript out of range */ -#define NSIGFPE 8 +#ifdef __frv__ +# define FPE_MDAOVF 9 /* media overflow */ +#endif +#ifdef __ia64__ +# define __FPE_DECOVF 9 /* decimal overflow */ +# define __FPE_DECDIV 10 /* decimal division by zero */ +# define __FPE_DECERR 11 /* packed decimal error */ +# define __FPE_INVASC 12 /* invalid ASCII digit */ +# define __FPE_INVDEC 13 /* invalid decimal digit */ +#endif +#define NSIGFPE 13 /* * SIGSEGV si_codes */ #define SEGV_MAPERR 1 /* address not mapped to object */ #define SEGV_ACCERR 2 /* invalid permissions for mapped object */ -#define SEGV_BNDERR 3 /* failed address bound checks */ -#define SEGV_PKUERR 4 /* failed protection key checks */ +#ifdef __bfin__ +# define SEGV_STACKFLOW 3 /* stack overflow */ +#else +# define SEGV_BNDERR 3 /* failed address bound checks */ +#endif +#ifdef __ia64__ +# define __SEGV_PSTKOVF 4 /* paragraph stack overflow */ +#else +# define SEGV_PKUERR 4 /* failed protection key checks */ +#endif #define NSIGSEGV 4 /* @@ -210,8 +254,12 @@ typedef struct siginfo { #define BUS_ADRALN 1 /* invalid address alignment */ #define BUS_ADRERR 2 /* non-existent physical address */ #define BUS_OBJERR 3 /* object specific hardware error */ +#ifdef __bfin__ +# define BUS_OPFETCH 4 /* error from instruction fetch */ +#else /* hardware memory error consumed on a machine check: action required */ -#define BUS_MCEERR_AR 4 +# define BUS_MCEERR_AR 4 +#endif /* hardware memory error detected in process but not consumed: action optional*/ #define BUS_MCEERR_AO 5 #define NSIGBUS 5 @@ -223,9 +271,20 @@ typedef struct siginfo { #define TRAP_TRACE 2 /* process trace trap */ #define TRAP_BRANCH 3 /* process taken branch trap */ #define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */ +#ifdef __bfin__ +# define TRAP_STEP 1 /* single-step breakpoint */ +# define TRAP_TRACEFLOW 2 /* trace buffer overflow */ +# define TRAP_WATCHPT 3 /* watchpoint match */ +# define TRAP_ILLTRAP 4 /* illegal trap */ +#endif #define NSIGTRAP 4 /* + * There are an additional set of SIGTRAP si_codes used by ptrace + * that of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP) + */ + +/* * SIGCHLD si_codes */ #define CLD_EXITED 1 /* child has exited */ diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 919248fb4028..4d21191aaed0 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -160,6 +160,7 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_OP_ALLOC_CTX 1 #define AMDGPU_CTX_OP_FREE_CTX 2 #define AMDGPU_CTX_OP_QUERY_STATE 3 +#define AMDGPU_CTX_OP_QUERY_STATE2 4 /* GPU reset status */ #define AMDGPU_CTX_NO_RESET 0 @@ -170,6 +171,13 @@ union drm_amdgpu_bo_list { /* unknown cause */ #define AMDGPU_CTX_UNKNOWN_RESET 3 +/* indicate gpu reset occured after ctx created */ +#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0) +/* indicate vram lost occured after ctx created */ +#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1) +/* indicate some job from this context once cause gpu hang */ +#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2) + /* Context priority level */ #define AMDGPU_CTX_PRIORITY_UNSET -2048 #define AMDGPU_CTX_PRIORITY_VERY_LOW -1023 @@ -869,6 +877,10 @@ struct drm_amdgpu_info_device { __u32 _pad1; /* always on cu bitmap */ __u32 cu_ao_bitmap[4][4]; + /** Starting high virtual address for UMDs. */ + __u64 high_va_offset; + /** The maximum high virtual address */ + __u64 high_va_max; }; struct drm_amdgpu_info_hw_ip { diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 3ad838d3f93f..e04613d30a13 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -178,7 +178,7 @@ extern "C" { #define DRM_FORMAT_MOD_VENDOR_NONE 0 #define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 #define DRM_FORMAT_MOD_VENDOR_AMD 0x02 -#define DRM_FORMAT_MOD_VENDOR_NV 0x03 +#define DRM_FORMAT_MOD_VENDOR_NVIDIA 0x03 #define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 #define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 #define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06 @@ -188,7 +188,7 @@ extern "C" { #define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) #define fourcc_mod_code(vendor, val) \ - ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) + ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL)) /* * Format Modifier tokens: @@ -338,29 +338,17 @@ extern "C" { */ #define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4) -/* NVIDIA Tegra frame buffer modifiers */ - -/* - * Some modifiers take parameters, for example the number of vertical GOBs in - * a block. Reserve the lower 32 bits for parameters - */ -#define __fourcc_mod_tegra_mode_shift 32 -#define fourcc_mod_tegra_code(val, params) \ - fourcc_mod_code(NV, ((((__u64)val) << __fourcc_mod_tegra_mode_shift) | params)) -#define fourcc_mod_tegra_mod(m) \ - (m & ~((1ULL << __fourcc_mod_tegra_mode_shift) - 1)) -#define fourcc_mod_tegra_param(m) \ - (m & ((1ULL << __fourcc_mod_tegra_mode_shift) - 1)) +/* NVIDIA frame buffer modifiers */ /* * Tegra Tiled Layout, used by Tegra 2, 3 and 4. * * Pixels are arranged in simple tiles of 16 x 16 bytes. */ -#define NV_FORMAT_MOD_TEGRA_TILED fourcc_mod_tegra_code(1, 0) +#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1) /* - * Tegra 16Bx2 Block Linear layout, used by TK1/TX1 + * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later * * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked * vertically by a power of 2 (1 to 32 GOBs) to form a block. @@ -380,7 +368,21 @@ extern "C" { * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format * in full detail. */ -#define NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(v) fourcc_mod_tegra_code(2, v) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \ + fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf)) + +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \ + fourcc_mod_code(NVIDIA, 0x10) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \ + fourcc_mod_code(NVIDIA, 0x11) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \ + fourcc_mod_code(NVIDIA, 0x12) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \ + fourcc_mod_code(NVIDIA, 0x13) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \ + fourcc_mod_code(NVIDIA, 0x14) +#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \ + fourcc_mod_code(NVIDIA, 0x15) /* * Broadcom VC4 "T" format diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h index d01087b2a651..4a54305120e0 100644 --- a/include/uapi/drm/exynos_drm.h +++ b/include/uapi/drm/exynos_drm.h @@ -135,172 +135,6 @@ struct drm_exynos_g2d_exec { __u64 async; }; -enum drm_exynos_ops_id { - EXYNOS_DRM_OPS_SRC, - EXYNOS_DRM_OPS_DST, - EXYNOS_DRM_OPS_MAX, -}; - -struct drm_exynos_sz { - __u32 hsize; - __u32 vsize; -}; - -struct drm_exynos_pos { - __u32 x; - __u32 y; - __u32 w; - __u32 h; -}; - -enum drm_exynos_flip { - EXYNOS_DRM_FLIP_NONE = (0 << 0), - EXYNOS_DRM_FLIP_VERTICAL = (1 << 0), - EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1), - EXYNOS_DRM_FLIP_BOTH = EXYNOS_DRM_FLIP_VERTICAL | - EXYNOS_DRM_FLIP_HORIZONTAL, -}; - -enum drm_exynos_degree { - EXYNOS_DRM_DEGREE_0, - EXYNOS_DRM_DEGREE_90, - EXYNOS_DRM_DEGREE_180, - EXYNOS_DRM_DEGREE_270, -}; - -enum drm_exynos_planer { - EXYNOS_DRM_PLANAR_Y, - EXYNOS_DRM_PLANAR_CB, - EXYNOS_DRM_PLANAR_CR, - EXYNOS_DRM_PLANAR_MAX, -}; - -/** - * A structure for ipp supported property list. - * - * @version: version of this structure. - * @ipp_id: id of ipp driver. - * @count: count of ipp driver. - * @writeback: flag of writeback supporting. - * @flip: flag of flip supporting. - * @degree: flag of degree information. - * @csc: flag of csc supporting. - * @crop: flag of crop supporting. - * @scale: flag of scale supporting. - * @refresh_min: min hz of refresh. - * @refresh_max: max hz of refresh. - * @crop_min: crop min resolution. - * @crop_max: crop max resolution. - * @scale_min: scale min resolution. - * @scale_max: scale max resolution. - */ -struct drm_exynos_ipp_prop_list { - __u32 version; - __u32 ipp_id; - __u32 count; - __u32 writeback; - __u32 flip; - __u32 degree; - __u32 csc; - __u32 crop; - __u32 scale; - __u32 refresh_min; - __u32 refresh_max; - __u32 reserved; - struct drm_exynos_sz crop_min; - struct drm_exynos_sz crop_max; - struct drm_exynos_sz scale_min; - struct drm_exynos_sz scale_max; -}; - -/** - * A structure for ipp config. - * - * @ops_id: property of operation directions. - * @flip: property of mirror, flip. - * @degree: property of rotation degree. - * @fmt: property of image format. - * @sz: property of image size. - * @pos: property of image position(src-cropped,dst-scaler). - */ -struct drm_exynos_ipp_config { - __u32 ops_id; - __u32 flip; - __u32 degree; - __u32 fmt; - struct drm_exynos_sz sz; - struct drm_exynos_pos pos; -}; - -enum drm_exynos_ipp_cmd { - IPP_CMD_NONE, - IPP_CMD_M2M, - IPP_CMD_WB, - IPP_CMD_OUTPUT, - IPP_CMD_MAX, -}; - -/** - * A structure for ipp property. - * - * @config: source, destination config. - * @cmd: definition of command. - * @ipp_id: id of ipp driver. - * @prop_id: id of property. - * @refresh_rate: refresh rate. - */ -struct drm_exynos_ipp_property { - struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX]; - __u32 cmd; - __u32 ipp_id; - __u32 prop_id; - __u32 refresh_rate; -}; - -enum drm_exynos_ipp_buf_type { - IPP_BUF_ENQUEUE, - IPP_BUF_DEQUEUE, -}; - -/** - * A structure for ipp buffer operations. - * - * @ops_id: operation directions. - * @buf_type: definition of buffer. - * @prop_id: id of property. - * @buf_id: id of buffer. - * @handle: Y, Cb, Cr each planar handle. - * @user_data: user data. - */ -struct drm_exynos_ipp_queue_buf { - __u32 ops_id; - __u32 buf_type; - __u32 prop_id; - __u32 buf_id; - __u32 handle[EXYNOS_DRM_PLANAR_MAX]; - __u32 reserved; - __u64 user_data; -}; - -enum drm_exynos_ipp_ctrl { - IPP_CTRL_PLAY, - IPP_CTRL_STOP, - IPP_CTRL_PAUSE, - IPP_CTRL_RESUME, - IPP_CTRL_MAX, -}; - -/** - * A structure for ipp start/stop operations. - * - * @prop_id: id of property. - * @ctrl: definition of control. - */ -struct drm_exynos_ipp_cmd_ctrl { - __u32 prop_id; - __u32 ctrl; -}; - #define DRM_EXYNOS_GEM_CREATE 0x00 #define DRM_EXYNOS_GEM_MAP 0x01 /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ @@ -312,11 +146,7 @@ struct drm_exynos_ipp_cmd_ctrl { #define DRM_EXYNOS_G2D_SET_CMDLIST 0x21 #define DRM_EXYNOS_G2D_EXEC 0x22 -/* IPP - Image Post Processing */ -#define DRM_EXYNOS_IPP_GET_PROPERTY 0x30 -#define DRM_EXYNOS_IPP_SET_PROPERTY 0x31 -#define DRM_EXYNOS_IPP_QUEUE_BUF 0x32 -#define DRM_EXYNOS_IPP_CMD_CTRL 0x33 +/* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */ #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) @@ -335,18 +165,8 @@ struct drm_exynos_ipp_cmd_ctrl { #define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) -#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \ - DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list) -#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \ - DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property) -#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF DRM_IOWR(DRM_COMMAND_BASE + \ - DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf) -#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL DRM_IOWR(DRM_COMMAND_BASE + \ - DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl) - /* EXYNOS specific events */ #define DRM_EXYNOS_G2D_EVENT 0x80000000 -#define DRM_EXYNOS_IPP_EVENT 0x80000001 struct drm_exynos_g2d_event { struct drm_event base; @@ -357,16 +177,6 @@ struct drm_exynos_g2d_event { __u32 reserved; }; -struct drm_exynos_ipp_event { - struct drm_event base; - __u64 user_data; - __u32 tv_sec; - __u32 tv_usec; - __u32 prop_id; - __u32 reserved; - __u32 buf_id[EXYNOS_DRM_OPS_MAX]; -}; - #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index ac3c6503ca27..536ee4febd74 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -86,6 +86,62 @@ enum i915_mocs_table_index { I915_MOCS_CACHED, }; +/* + * Different engines serve different roles, and there may be more than one + * engine serving each role. enum drm_i915_gem_engine_class provides a + * classification of the role of the engine, which may be used when requesting + * operations to be performed on a certain subset of engines, or for providing + * information about that group. + */ +enum drm_i915_gem_engine_class { + I915_ENGINE_CLASS_RENDER = 0, + I915_ENGINE_CLASS_COPY = 1, + I915_ENGINE_CLASS_VIDEO = 2, + I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, + + I915_ENGINE_CLASS_INVALID = -1 +}; + +/** + * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 + * + */ + +enum drm_i915_pmu_engine_sample { + I915_SAMPLE_BUSY = 0, + I915_SAMPLE_WAIT = 1, + I915_SAMPLE_SEMA = 2 +}; + +#define I915_PMU_SAMPLE_BITS (4) +#define I915_PMU_SAMPLE_MASK (0xf) +#define I915_PMU_SAMPLE_INSTANCE_BITS (8) +#define I915_PMU_CLASS_SHIFT \ + (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS) + +#define __I915_PMU_ENGINE(class, instance, sample) \ + ((class) << I915_PMU_CLASS_SHIFT | \ + (instance) << I915_PMU_SAMPLE_BITS | \ + (sample)) + +#define I915_PMU_ENGINE_BUSY(class, instance) \ + __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY) + +#define I915_PMU_ENGINE_WAIT(class, instance) \ + __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT) + +#define I915_PMU_ENGINE_SEMA(class, instance) \ + __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) + +#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) + +#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0) +#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1) +#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2) +#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3) + +#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY + /* Each region is a minimum of 16k, and there are at most 255 of them. */ #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use @@ -450,6 +506,27 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49 +/* + * Query whether every context (both per-file default and user created) is + * isolated (insofar as HW supports). If this parameter is not true, then + * freshly created contexts may inherit values from an existing context, + * rather than default HW values. If true, it also ensures (insofar as HW + * supports) that all state set by this context will not leak to any other + * context. + * + * As not every engine across every gen support contexts, the returned + * value reports the support of context isolation for individual engines by + * returning a bitmask of each engine class set to true if that class supports + * isolation. + */ +#define I915_PARAM_HAS_CONTEXT_ISOLATION 50 + +/* Frequency of the command streamer timestamps given by the *_TIMESTAMP + * registers. This used to be fixed per platform but from CNL onwards, this + * might vary depending on the parts. + */ +#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51 + typedef struct drm_i915_getparam { __s32 param; /* diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h new file mode 100644 index 000000000000..af0630ba5437 --- /dev/null +++ b/include/uapi/linux/arm_sdei.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2017 Arm Ltd. */ +#ifndef _UAPI_LINUX_ARM_SDEI_H +#define _UAPI_LINUX_ARM_SDEI_H + +#define SDEI_1_0_FN_BASE 0xC4000020 +#define SDEI_1_0_MASK 0xFFFFFFE0 +#define SDEI_1_0_FN(n) (SDEI_1_0_FN_BASE + (n)) + +#define SDEI_1_0_FN_SDEI_VERSION SDEI_1_0_FN(0x00) +#define SDEI_1_0_FN_SDEI_EVENT_REGISTER SDEI_1_0_FN(0x01) +#define SDEI_1_0_FN_SDEI_EVENT_ENABLE SDEI_1_0_FN(0x02) +#define SDEI_1_0_FN_SDEI_EVENT_DISABLE SDEI_1_0_FN(0x03) +#define SDEI_1_0_FN_SDEI_EVENT_CONTEXT SDEI_1_0_FN(0x04) +#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE SDEI_1_0_FN(0x05) +#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME SDEI_1_0_FN(0x06) +#define SDEI_1_0_FN_SDEI_EVENT_UNREGISTER SDEI_1_0_FN(0x07) +#define SDEI_1_0_FN_SDEI_EVENT_STATUS SDEI_1_0_FN(0x08) +#define SDEI_1_0_FN_SDEI_EVENT_GET_INFO SDEI_1_0_FN(0x09) +#define SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET SDEI_1_0_FN(0x0A) +#define SDEI_1_0_FN_SDEI_PE_MASK SDEI_1_0_FN(0x0B) +#define SDEI_1_0_FN_SDEI_PE_UNMASK SDEI_1_0_FN(0x0C) +#define SDEI_1_0_FN_SDEI_INTERRUPT_BIND SDEI_1_0_FN(0x0D) +#define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E) +#define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11) +#define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12) + +#define SDEI_VERSION_MAJOR_SHIFT 48 +#define SDEI_VERSION_MAJOR_MASK 0x7fff +#define SDEI_VERSION_MINOR_SHIFT 32 +#define SDEI_VERSION_MINOR_MASK 0xffff +#define SDEI_VERSION_VENDOR_SHIFT 0 +#define SDEI_VERSION_VENDOR_MASK 0xffffffff + +#define SDEI_VERSION_MAJOR(x) (x>>SDEI_VERSION_MAJOR_SHIFT & SDEI_VERSION_MAJOR_MASK) +#define SDEI_VERSION_MINOR(x) (x>>SDEI_VERSION_MINOR_SHIFT & SDEI_VERSION_MINOR_MASK) +#define SDEI_VERSION_VENDOR(x) (x>>SDEI_VERSION_VENDOR_SHIFT & SDEI_VERSION_VENDOR_MASK) + +/* SDEI return values */ +#define SDEI_SUCCESS 0 +#define SDEI_NOT_SUPPORTED -1 +#define SDEI_INVALID_PARAMETERS -2 +#define SDEI_DENIED -3 +#define SDEI_PENDING -5 +#define SDEI_OUT_OF_RESOURCE -10 + +/* EVENT_REGISTER flags */ +#define SDEI_EVENT_REGISTER_RM_ANY 0 +#define SDEI_EVENT_REGISTER_RM_PE 1 + +/* EVENT_STATUS return value bits */ +#define SDEI_EVENT_STATUS_RUNNING 2 +#define SDEI_EVENT_STATUS_ENABLED 1 +#define SDEI_EVENT_STATUS_REGISTERED 0 + +/* EVENT_COMPLETE status values */ +#define SDEI_EV_HANDLED 0 +#define SDEI_EV_FAILED 1 + +/* GET_INFO values */ +#define SDEI_EVENT_INFO_EV_TYPE 0 +#define SDEI_EVENT_INFO_EV_SIGNALED 1 +#define SDEI_EVENT_INFO_EV_PRIORITY 2 +#define SDEI_EVENT_INFO_EV_ROUTING_MODE 3 +#define SDEI_EVENT_INFO_EV_ROUTING_AFF 4 + +/* and their results */ +#define SDEI_EVENT_TYPE_PRIVATE 0 +#define SDEI_EVENT_TYPE_SHARED 1 +#define SDEI_EVENT_PRIORITY_NORMAL 0 +#define SDEI_EVENT_PRIORITY_CRITICAL 1 + +#endif /* _UAPI_LINUX_ARM_SDEI_H */ diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h new file mode 100644 index 000000000000..5cb360be2a11 --- /dev/null +++ b/include/uapi/linux/batadv_packet.h @@ -0,0 +1,644 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */ +/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors: + * + * Marek Lindner, Simon Wunderlich + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _UAPI_LINUX_BATADV_PACKET_H_ +#define _UAPI_LINUX_BATADV_PACKET_H_ + +#include <asm/byteorder.h> +#include <linux/if_ether.h> +#include <linux/types.h> + +/** + * batadv_tp_is_error() - Check throughput meter return code for error + * @n: throughput meter return code + * + * Return: 0 when not error was detected, != 0 otherwise + */ +#define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0) + +/** + * enum batadv_packettype - types for batman-adv encapsulated packets + * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV + * @BATADV_BCAST: broadcast packets carrying broadcast payload + * @BATADV_CODED: network coded packets + * @BATADV_ELP: echo location packets for B.A.T.M.A.N. V + * @BATADV_OGM2: originator messages for B.A.T.M.A.N. V + * + * @BATADV_UNICAST: unicast packets carrying unicast payload traffic + * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original + * payload packet + * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of + * the sender + * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute + * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers + */ +enum batadv_packettype { + /* 0x00 - 0x3f: local packets or special rules for handling */ + BATADV_IV_OGM = 0x00, + BATADV_BCAST = 0x01, + BATADV_CODED = 0x02, + BATADV_ELP = 0x03, + BATADV_OGM2 = 0x04, + /* 0x40 - 0x7f: unicast */ +#define BATADV_UNICAST_MIN 0x40 + BATADV_UNICAST = 0x40, + BATADV_UNICAST_FRAG = 0x41, + BATADV_UNICAST_4ADDR = 0x42, + BATADV_ICMP = 0x43, + BATADV_UNICAST_TVLV = 0x44, +#define BATADV_UNICAST_MAX 0x7f + /* 0x80 - 0xff: reserved */ +}; + +/** + * enum batadv_subtype - packet subtype for unicast4addr + * @BATADV_P_DATA: user payload + * @BATADV_P_DAT_DHT_GET: DHT request message + * @BATADV_P_DAT_DHT_PUT: DHT store message + * @BATADV_P_DAT_CACHE_REPLY: ARP reply generated by DAT + */ +enum batadv_subtype { + BATADV_P_DATA = 0x01, + BATADV_P_DAT_DHT_GET = 0x02, + BATADV_P_DAT_DHT_PUT = 0x03, + BATADV_P_DAT_CACHE_REPLY = 0x04, +}; + +/* this file is included by batctl which needs these defines */ +#define BATADV_COMPAT_VERSION 15 + +/** + * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets + * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was + * previously received from someone else than the best neighbor. + * @BATADV_PRIMARIES_FIRST_HOP: flag unused. + * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a + * one hop neighbor on the interface where it was originally received. + */ +enum batadv_iv_flags { + BATADV_NOT_BEST_NEXT_HOP = 1UL << 0, + BATADV_PRIMARIES_FIRST_HOP = 1UL << 1, + BATADV_DIRECTLINK = 1UL << 2, +}; + +/** + * enum batadv_icmp_packettype - ICMP message types + * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST + * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found + * @BATADV_ECHO_REQUEST: request BATADV_ECHO_REPLY from destination + * @BATADV_TTL_EXCEEDED: error after BATADV_ECHO_REQUEST traversed too many hops + * @BATADV_PARAMETER_PROBLEM: return code for malformed messages + * @BATADV_TP: throughput meter packet + */ +enum batadv_icmp_packettype { + BATADV_ECHO_REPLY = 0, + BATADV_DESTINATION_UNREACHABLE = 3, + BATADV_ECHO_REQUEST = 8, + BATADV_TTL_EXCEEDED = 11, + BATADV_PARAMETER_PROBLEM = 12, + BATADV_TP = 15, +}; + +/** + * enum batadv_mcast_flags - flags for multicast capabilities and settings + * @BATADV_MCAST_WANT_ALL_UNSNOOPABLES: we want all packets destined for + * 224.0.0.0/24 or ff02::1 + * @BATADV_MCAST_WANT_ALL_IPV4: we want all IPv4 multicast packets + * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets + */ +enum batadv_mcast_flags { + BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0, + BATADV_MCAST_WANT_ALL_IPV4 = 1UL << 1, + BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2, +}; + +/* tt data subtypes */ +#define BATADV_TT_DATA_TYPE_MASK 0x0F + +/** + * enum batadv_tt_data_flags - flags for tt data tvlv + * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM + * @BATADV_TT_REQUEST: TT request message + * @BATADV_TT_RESPONSE: TT response message + * @BATADV_TT_FULL_TABLE: contains full table to replace existing table + */ +enum batadv_tt_data_flags { + BATADV_TT_OGM_DIFF = 1UL << 0, + BATADV_TT_REQUEST = 1UL << 1, + BATADV_TT_RESPONSE = 1UL << 2, + BATADV_TT_FULL_TABLE = 1UL << 4, +}; + +/** + * enum batadv_vlan_flags - flags for the four MSB of any vlan ID field + * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not + */ +enum batadv_vlan_flags { + BATADV_VLAN_HAS_TAG = 1UL << 15, +}; + +/** + * enum batadv_bla_claimframe - claim frame types for the bridge loop avoidance + * @BATADV_CLAIM_TYPE_CLAIM: claim of a client mac address + * @BATADV_CLAIM_TYPE_UNCLAIM: unclaim of a client mac address + * @BATADV_CLAIM_TYPE_ANNOUNCE: announcement of backbone with current crc + * @BATADV_CLAIM_TYPE_REQUEST: request of full claim table + * @BATADV_CLAIM_TYPE_LOOPDETECT: mesh-traversing loop detect packet + */ +enum batadv_bla_claimframe { + BATADV_CLAIM_TYPE_CLAIM = 0x00, + BATADV_CLAIM_TYPE_UNCLAIM = 0x01, + BATADV_CLAIM_TYPE_ANNOUNCE = 0x02, + BATADV_CLAIM_TYPE_REQUEST = 0x03, + BATADV_CLAIM_TYPE_LOOPDETECT = 0x04, +}; + +/** + * enum batadv_tvlv_type - tvlv type definitions + * @BATADV_TVLV_GW: gateway tvlv + * @BATADV_TVLV_DAT: distributed arp table tvlv + * @BATADV_TVLV_NC: network coding tvlv + * @BATADV_TVLV_TT: translation table tvlv + * @BATADV_TVLV_ROAM: roaming advertisement tvlv + * @BATADV_TVLV_MCAST: multicast capability tvlv + */ +enum batadv_tvlv_type { + BATADV_TVLV_GW = 0x01, + BATADV_TVLV_DAT = 0x02, + BATADV_TVLV_NC = 0x03, + BATADV_TVLV_TT = 0x04, + BATADV_TVLV_ROAM = 0x05, + BATADV_TVLV_MCAST = 0x06, +}; + +#pragma pack(2) +/* the destination hardware field in the ARP frame is used to + * transport the claim type and the group id + */ +struct batadv_bla_claim_dst { + __u8 magic[3]; /* FF:43:05 */ + __u8 type; /* bla_claimframe */ + __be16 group; /* group id */ +}; + +#pragma pack() + +/** + * struct batadv_ogm_packet - ogm (routing protocol) packet + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @flags: contains routing relevant flags - see enum batadv_iv_flags + * @seqno: sequence identification + * @orig: address of the source node + * @prev_sender: address of the previous sender + * @reserved: reserved byte for alignment + * @tq: transmission quality + * @tvlv_len: length of tvlv data following the ogm header + */ +struct batadv_ogm_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 flags; + __be32 seqno; + __u8 orig[ETH_ALEN]; + __u8 prev_sender[ETH_ALEN]; + __u8 reserved; + __u8 tq; + __be16 tvlv_len; + /* __packed is not needed as the struct size is divisible by 4, + * and the largest data type in this struct has a size of 4. + */ +}; + +#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet) + +/** + * struct batadv_ogm2_packet - ogm2 (routing protocol) packet + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header + * @flags: reseved for routing relevant flags - currently always 0 + * @seqno: sequence number + * @orig: originator mac address + * @tvlv_len: length of the appended tvlv buffer (in bytes) + * @throughput: the currently flooded path throughput + */ +struct batadv_ogm2_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 flags; + __be32 seqno; + __u8 orig[ETH_ALEN]; + __be16 tvlv_len; + __be32 throughput; + /* __packed is not needed as the struct size is divisible by 4, + * and the largest data type in this struct has a size of 4. + */ +}; + +#define BATADV_OGM2_HLEN sizeof(struct batadv_ogm2_packet) + +/** + * struct batadv_elp_packet - elp (neighbor discovery) packet + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @orig: originator mac address + * @seqno: sequence number + * @elp_interval: currently used ELP sending interval in ms + */ +struct batadv_elp_packet { + __u8 packet_type; + __u8 version; + __u8 orig[ETH_ALEN]; + __be32 seqno; + __be32 elp_interval; +}; + +#define BATADV_ELP_HLEN sizeof(struct batadv_elp_packet) + +/** + * struct batadv_icmp_header - common members among all the ICMP packets + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @msg_type: ICMP packet type + * @dst: address of the destination node + * @orig: address of the source node + * @uid: local ICMP socket identifier + * @align: not used - useful for alignment purposes only + * + * This structure is used for ICMP packets parsing only and it is never sent + * over the wire. The alignment field at the end is there to ensure that + * members are padded the same way as they are in real packets. + */ +struct batadv_icmp_header { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 msg_type; /* see ICMP message types above */ + __u8 dst[ETH_ALEN]; + __u8 orig[ETH_ALEN]; + __u8 uid; + __u8 align[3]; +}; + +/** + * struct batadv_icmp_packet - ICMP packet + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @msg_type: ICMP packet type + * @dst: address of the destination node + * @orig: address of the source node + * @uid: local ICMP socket identifier + * @reserved: not used - useful for alignment + * @seqno: ICMP sequence number + */ +struct batadv_icmp_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 msg_type; /* see ICMP message types above */ + __u8 dst[ETH_ALEN]; + __u8 orig[ETH_ALEN]; + __u8 uid; + __u8 reserved; + __be16 seqno; +}; + +/** + * struct batadv_icmp_tp_packet - ICMP TP Meter packet + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @msg_type: ICMP packet type + * @dst: address of the destination node + * @orig: address of the source node + * @uid: local ICMP socket identifier + * @subtype: TP packet subtype (see batadv_icmp_tp_subtype) + * @session: TP session identifier + * @seqno: the TP sequence number + * @timestamp: time when the packet has been sent. This value is filled in a + * TP_MSG and echoed back in the next TP_ACK so that the sender can compute the + * RTT. Since it is read only by the host which wrote it, there is no need to + * store it using network order + */ +struct batadv_icmp_tp_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 msg_type; /* see ICMP message types above */ + __u8 dst[ETH_ALEN]; + __u8 orig[ETH_ALEN]; + __u8 uid; + __u8 subtype; + __u8 session[2]; + __be32 seqno; + __be32 timestamp; +}; + +/** + * enum batadv_icmp_tp_subtype - ICMP TP Meter packet subtypes + * @BATADV_TP_MSG: Msg from sender to receiver + * @BATADV_TP_ACK: acknowledgment from receiver to sender + */ +enum batadv_icmp_tp_subtype { + BATADV_TP_MSG = 0, + BATADV_TP_ACK, +}; + +#define BATADV_RR_LEN 16 + +/** + * struct batadv_icmp_packet_rr - ICMP RouteRecord packet + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @msg_type: ICMP packet type + * @dst: address of the destination node + * @orig: address of the source node + * @uid: local ICMP socket identifier + * @rr_cur: number of entries the rr array + * @seqno: ICMP sequence number + * @rr: route record array + */ +struct batadv_icmp_packet_rr { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 msg_type; /* see ICMP message types above */ + __u8 dst[ETH_ALEN]; + __u8 orig[ETH_ALEN]; + __u8 uid; + __u8 rr_cur; + __be16 seqno; + __u8 rr[BATADV_RR_LEN][ETH_ALEN]; +}; + +#define BATADV_ICMP_MAX_PACKET_SIZE sizeof(struct batadv_icmp_packet_rr) + +/* All packet headers in front of an ethernet header have to be completely + * divisible by 2 but not by 4 to make the payload after the ethernet + * header again 4 bytes boundary aligned. + * + * A packing of 2 is necessary to avoid extra padding at the end of the struct + * caused by a structure member which is larger than two bytes. Otherwise + * the structure would not fulfill the previously mentioned rule to avoid the + * misalignment of the payload after the ethernet header. It may also lead to + * leakage of information when the padding it not initialized before sending. + */ +#pragma pack(2) + +/** + * struct batadv_unicast_packet - unicast packet for network payload + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @ttvn: translation table version number + * @dest: originator destination of the unicast packet + */ +struct batadv_unicast_packet { + __u8 packet_type; + __u8 version; + __u8 ttl; + __u8 ttvn; /* destination translation table version number */ + __u8 dest[ETH_ALEN]; + /* "4 bytes boundary + 2 bytes" long to make the payload after the + * following ethernet header again 4 bytes boundary aligned + */ +}; + +/** + * struct batadv_unicast_4addr_packet - extended unicast packet + * @u: common unicast packet header + * @src: address of the source + * @subtype: packet subtype + * @reserved: reserved byte for alignment + */ +struct batadv_unicast_4addr_packet { + struct batadv_unicast_packet u; + __u8 src[ETH_ALEN]; + __u8 subtype; + __u8 reserved; + /* "4 bytes boundary + 2 bytes" long to make the payload after the + * following ethernet header again 4 bytes boundary aligned + */ +}; + +/** + * struct batadv_frag_packet - fragmented packet + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @dest: final destination used when routing fragments + * @orig: originator of the fragment used when merging the packet + * @no: fragment number within this sequence + * @priority: priority of frame, from ToS IP precedence or 802.1p + * @reserved: reserved byte for alignment + * @seqno: sequence identification + * @total_size: size of the merged packet + */ +struct batadv_frag_packet { + __u8 packet_type; + __u8 version; /* batman version field */ + __u8 ttl; +#if defined(__BIG_ENDIAN_BITFIELD) + __u8 no:4; + __u8 priority:3; + __u8 reserved:1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + __u8 reserved:1; + __u8 priority:3; + __u8 no:4; +#else +#error "unknown bitfield endianness" +#endif + __u8 dest[ETH_ALEN]; + __u8 orig[ETH_ALEN]; + __be16 seqno; + __be16 total_size; +}; + +/** + * struct batadv_bcast_packet - broadcast packet for network payload + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @reserved: reserved byte for alignment + * @seqno: sequence identification + * @orig: originator of the broadcast packet + */ +struct batadv_bcast_packet { + __u8 packet_type; + __u8 version; /* batman version field */ + __u8 ttl; + __u8 reserved; + __be32 seqno; + __u8 orig[ETH_ALEN]; + /* "4 bytes boundary + 2 bytes" long to make the payload after the + * following ethernet header again 4 bytes boundary aligned + */ +}; + +/** + * struct batadv_coded_packet - network coded packet + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @first_source: original source of first included packet + * @first_orig_dest: original destinal of first included packet + * @first_crc: checksum of first included packet + * @first_ttvn: tt-version number of first included packet + * @second_ttl: ttl of second packet + * @second_dest: second receiver of this coded packet + * @second_source: original source of second included packet + * @second_orig_dest: original destination of second included packet + * @second_crc: checksum of second included packet + * @second_ttvn: tt version number of second included packet + * @coded_len: length of network coded part of the payload + */ +struct batadv_coded_packet { + __u8 packet_type; + __u8 version; /* batman version field */ + __u8 ttl; + __u8 first_ttvn; + /* __u8 first_dest[ETH_ALEN]; - saved in mac header destination */ + __u8 first_source[ETH_ALEN]; + __u8 first_orig_dest[ETH_ALEN]; + __be32 first_crc; + __u8 second_ttl; + __u8 second_ttvn; + __u8 second_dest[ETH_ALEN]; + __u8 second_source[ETH_ALEN]; + __u8 second_orig_dest[ETH_ALEN]; + __be32 second_crc; + __be16 coded_len; +}; + +#pragma pack() + +/** + * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload + * @packet_type: batman-adv packet type, part of the general header + * @version: batman-adv protocol version, part of the genereal header + * @ttl: time to live for this packet, part of the genereal header + * @reserved: reserved field (for packet alignment) + * @src: address of the source + * @dst: address of the destination + * @tvlv_len: length of tvlv data following the unicast tvlv header + * @align: 2 bytes to align the header to a 4 byte boundary + */ +struct batadv_unicast_tvlv_packet { + __u8 packet_type; + __u8 version; /* batman version field */ + __u8 ttl; + __u8 reserved; + __u8 dst[ETH_ALEN]; + __u8 src[ETH_ALEN]; + __be16 tvlv_len; + __u16 align; +}; + +/** + * struct batadv_tvlv_hdr - base tvlv header struct + * @type: tvlv container type (see batadv_tvlv_type) + * @version: tvlv container version + * @len: tvlv container length + */ +struct batadv_tvlv_hdr { + __u8 type; + __u8 version; + __be16 len; +}; + +/** + * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv + * container + * @bandwidth_down: advertised uplink download bandwidth + * @bandwidth_up: advertised uplink upload bandwidth + */ +struct batadv_tvlv_gateway_data { + __be32 bandwidth_down; + __be32 bandwidth_up; +}; + +/** + * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container + * @flags: translation table flags (see batadv_tt_data_flags) + * @ttvn: translation table version number + * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by + * one batadv_tvlv_tt_vlan_data object per announced vlan + */ +struct batadv_tvlv_tt_data { + __u8 flags; + __u8 ttvn; + __be16 num_vlan; +}; + +/** + * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through + * the tt tvlv container + * @crc: crc32 checksum of the entries belonging to this vlan + * @vid: vlan identifier + * @reserved: unused, useful for alignment purposes + */ +struct batadv_tvlv_tt_vlan_data { + __be32 crc; + __be16 vid; + __u16 reserved; +}; + +/** + * struct batadv_tvlv_tt_change - translation table diff data + * @flags: status indicators concerning the non-mesh client (see + * batadv_tt_client_flags) + * @reserved: reserved field - useful for alignment purposes only + * @addr: mac address of non-mesh client that triggered this tt change + * @vid: VLAN identifier + */ +struct batadv_tvlv_tt_change { + __u8 flags; + __u8 reserved[3]; + __u8 addr[ETH_ALEN]; + __be16 vid; +}; + +/** + * struct batadv_tvlv_roam_adv - roaming advertisement + * @client: mac address of roaming client + * @vid: VLAN identifier + */ +struct batadv_tvlv_roam_adv { + __u8 client[ETH_ALEN]; + __be16 vid; +}; + +/** + * struct batadv_tvlv_mcast_data - payload of a multicast tvlv + * @flags: multicast flags announced by the orig node + * @reserved: reserved field + */ +struct batadv_tvlv_mcast_data { + __u8 flags; + __u8 reserved[3]; +}; + +#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */ diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index efd641c8a5d6..ae00c99cbed0 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -1,18 +1,25 @@ +/* SPDX-License-Identifier: MIT */ /* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors: * * Matthias Schiffer * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #ifndef _UAPI_LINUX_BATMAN_ADV_H_ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4c223ab30293..db6bdc375126 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -17,7 +17,7 @@ #define BPF_ALU64 0x07 /* alu mode in double word width */ /* ld/ldx fields */ -#define BPF_DW 0x18 /* double word */ +#define BPF_DW 0x18 /* double word (64-bit) */ #define BPF_XADD 0xc0 /* exclusive add */ /* alu/jmp fields */ @@ -197,8 +197,14 @@ enum bpf_attach_type { */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) +/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ #define BPF_PSEUDO_MAP_FD 1 +/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative + * offset to another bpf function + */ +#define BPF_PSEUDO_CALL 1 + /* flags for BPF_MAP_UPDATE_ELEM command */ #define BPF_ANY 0 /* create new element or update existing */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */ @@ -239,6 +245,7 @@ union bpf_attr { * BPF_F_NUMA_NODE is set). */ char map_name[BPF_OBJ_NAME_LEN]; + __u32 map_ifindex; /* ifindex of netdev to create on */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -635,6 +642,14 @@ union bpf_attr { * @optlen: length of optval in bytes * Return: 0 or negative error * + * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags) + * Set callback flags for sock_ops + * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct + * @flags: flags value + * Return: 0 for no error + * -EINVAL if there is no full tcp socket + * bits in flags that are not supported by current kernel + * * int bpf_skb_adjust_room(skb, len_diff, mode, flags) * Grow or shrink room in sk_buff. * @skb: pointer to skb @@ -677,6 +692,10 @@ union bpf_attr { * @buf: buf to fill * @buf_size: size of the buf * Return : 0 on success or negative error code + * + * int bpf_override_return(pt_regs, rc) + * @pt_regs: pointer to struct pt_regs + * @rc: the return value to set */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -736,7 +755,9 @@ union bpf_attr { FN(xdp_adjust_meta), \ FN(perf_event_read_value), \ FN(perf_prog_read_value), \ - FN(getsockopt), + FN(getsockopt), \ + FN(override_return), \ + FN(sock_ops_cb_flags_set), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -888,6 +909,9 @@ struct xdp_md { __u32 data; __u32 data_end; __u32 data_meta; + /* Below access go through struct xdp_rxq_info */ + __u32 ingress_ifindex; /* rxq->dev->ifindex */ + __u32 rx_queue_index; /* rxq->queue_index */ }; enum sk_action { @@ -910,6 +934,9 @@ struct bpf_prog_info { __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; + __u32 ifindex; + __u64 netns_dev; + __u64 netns_ino; } __attribute__((aligned(8))); struct bpf_map_info { @@ -920,6 +947,9 @@ struct bpf_map_info { __u32 max_entries; __u32 map_flags; char name[BPF_OBJ_NAME_LEN]; + __u32 ifindex; + __u64 netns_dev; + __u64 netns_ino; } __attribute__((aligned(8))); /* User bpf_sock_ops struct to access socket values and specify request ops @@ -931,8 +961,9 @@ struct bpf_map_info { struct bpf_sock_ops { __u32 op; union { - __u32 reply; - __u32 replylong[4]; + __u32 args[4]; /* Optionally passed to bpf program */ + __u32 reply; /* Returned by bpf program */ + __u32 replylong[4]; /* Optionally returned by bpf prog */ }; __u32 family; __u32 remote_ip4; /* Stored in network byte order */ @@ -941,8 +972,45 @@ struct bpf_sock_ops { __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ + __u32 is_fullsock; /* Some TCP fields are only valid if + * there is a full socket. If not, the + * fields read as zero. + */ + __u32 snd_cwnd; + __u32 srtt_us; /* Averaged RTT << 3 in usecs */ + __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ + __u32 state; + __u32 rtt_min; + __u32 snd_ssthresh; + __u32 rcv_nxt; + __u32 snd_nxt; + __u32 snd_una; + __u32 mss_cache; + __u32 ecn_flags; + __u32 rate_delivered; + __u32 rate_interval_us; + __u32 packets_out; + __u32 retrans_out; + __u32 total_retrans; + __u32 segs_in; + __u32 data_segs_in; + __u32 segs_out; + __u32 data_segs_out; + __u32 lost_out; + __u32 sacked_out; + __u32 sk_txhash; + __u64 bytes_received; + __u64 bytes_acked; }; +/* Definitions for bpf_sock_ops_cb_flags */ +#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) +#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) +#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) +#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently + * supported cb flags + */ + /* List of known BPF sock_ops operators. * New entries can only be added at the end */ @@ -976,6 +1044,43 @@ enum { * a congestion threshold. RTTs above * this indicate congestion */ + BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. + * Arg1: value of icsk_retransmits + * Arg2: value of icsk_rto + * Arg3: whether RTO has expired + */ + BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. + * Arg1: sequence number of 1st byte + * Arg2: # segments + * Arg3: return value of + * tcp_transmit_skb (0 => success) + */ + BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. + * Arg1: old_state + * Arg2: new_state + */ +}; + +/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect + * changes between the TCP and BPF versions. Ideally this should never happen. + * If it does, we need to add code to convert them before calling + * the BPF sock_ops function. + */ +enum { + BPF_TCP_ESTABLISHED = 1, + BPF_TCP_SYN_SENT, + BPF_TCP_SYN_RECV, + BPF_TCP_FIN_WAIT1, + BPF_TCP_FIN_WAIT2, + BPF_TCP_TIME_WAIT, + BPF_TCP_CLOSE, + BPF_TCP_CLOSE_WAIT, + BPF_TCP_LAST_ACK, + BPF_TCP_LISTEN, + BPF_TCP_CLOSING, /* Now a valid state */ + BPF_TCP_NEW_SYN_RECV, + + BPF_TCP_MAX_STATES /* Leave at the end! */ }; #define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ @@ -995,7 +1100,8 @@ struct bpf_perf_event_value { #define BPF_DEVCG_DEV_CHAR (1ULL << 1) struct bpf_cgroup_dev_ctx { - __u32 access_type; /* (access << 16) | type */ + /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ + __u32 access_type; __u32 major; __u32 minor; }; diff --git a/include/uapi/linux/bpf_common.h b/include/uapi/linux/bpf_common.h index 18be90725ab0..ee97668bdadb 100644 --- a/include/uapi/linux/bpf_common.h +++ b/include/uapi/linux/bpf_common.h @@ -15,9 +15,10 @@ /* ld/ldx fields */ #define BPF_SIZE(code) ((code) & 0x18) -#define BPF_W 0x00 -#define BPF_H 0x08 -#define BPF_B 0x10 +#define BPF_W 0x00 /* 32-bit */ +#define BPF_H 0x08 /* 16-bit */ +#define BPF_B 0x10 /* 8-bit */ +/* eBPF BPF_DW 0x18 64-bit */ #define BPF_MODE(code) ((code) & 0xe0) #define BPF_IMM 0x00 #define BPF_ABS 0x20 diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h index 96710e76d5ce..9f56fad4785b 100644 --- a/include/uapi/linux/can/netlink.h +++ b/include/uapi/linux/can/netlink.h @@ -132,6 +132,7 @@ enum { IFLA_CAN_TERMINATION_CONST, IFLA_CAN_BITRATE_CONST, IFLA_CAN_DATA_BITRATE_CONST, + IFLA_CAN_BITRATE_MAX, __IFLA_CAN_MAX }; diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 6665df69e26a..1df65a4c2044 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -70,6 +70,13 @@ enum devlink_command { DEVLINK_CMD_DPIPE_ENTRIES_GET, DEVLINK_CMD_DPIPE_HEADERS_GET, DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET, + DEVLINK_CMD_RESOURCE_SET, + DEVLINK_CMD_RESOURCE_DUMP, + + /* Hot driver reload, makes configuration changes take place. The + * devlink instance is not released during the process. + */ + DEVLINK_CMD_RELOAD, /* add new commands above here */ __DEVLINK_CMD_MAX, @@ -202,6 +209,20 @@ enum devlink_attr { DEVLINK_ATTR_PAD, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */ + DEVLINK_ATTR_RESOURCE_LIST, /* nested */ + DEVLINK_ATTR_RESOURCE, /* nested */ + DEVLINK_ATTR_RESOURCE_NAME, /* string */ + DEVLINK_ATTR_RESOURCE_ID, /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE, /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_NEW, /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_VALID, /* u8 */ + DEVLINK_ATTR_RESOURCE_SIZE_MIN, /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_MAX, /* u64 */ + DEVLINK_ATTR_RESOURCE_SIZE_GRAN, /* u64 */ + DEVLINK_ATTR_RESOURCE_UNIT, /* u8 */ + DEVLINK_ATTR_RESOURCE_OCC, /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID, /* u64 */ + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,/* u64 */ /* add new attributes above here, update the policy in devlink.c */ @@ -245,4 +266,8 @@ enum devlink_dpipe_header_id { DEVLINK_DPIPE_HEADER_IPV6, }; +enum devlink_resource_unit { + DEVLINK_RESOURCE_UNIT_ENTRY, +}; + #endif /* _UAPI_LINUX_DEVLINK_H_ */ diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h index c10f1324b4ca..5f3c5a918f00 100644 --- a/include/uapi/linux/dvb/dmx.h +++ b/include/uapi/linux/dvb/dmx.h @@ -211,6 +211,61 @@ struct dmx_stc { __u64 stc; }; +/** + * struct dmx_buffer - dmx buffer info + * + * @index: id number of the buffer + * @bytesused: number of bytes occupied by data in the buffer (payload); + * @offset: for buffers with memory == DMX_MEMORY_MMAP; + * offset from the start of the device memory for this plane, + * (or a "cookie" that should be passed to mmap() as offset) + * @length: size in bytes of the buffer + * + * Contains data exchanged by application and driver using one of the streaming + * I/O methods. + */ +struct dmx_buffer { + __u32 index; + __u32 bytesused; + __u32 offset; + __u32 length; +}; + +/** + * struct dmx_requestbuffers - request dmx buffer information + * + * @count: number of requested buffers, + * @size: size in bytes of the requested buffer + * + * Contains data used for requesting a dmx buffer. + * All reserved fields must be set to zero. + */ +struct dmx_requestbuffers { + __u32 count; + __u32 size; +}; + +/** + * struct dmx_exportbuffer - export of dmx buffer as DMABUF file descriptor + * + * @index: id number of the buffer + * @flags: flags for newly created file, currently only O_CLOEXEC is + * supported, refer to manual of open syscall for more details + * @fd: file descriptor associated with DMABUF (set by driver) + * + * Contains data used for exporting a dmx buffer as DMABUF file descriptor. + * The buffer is identified by a 'cookie' returned by DMX_QUERYBUF + * (identical to the cookie used to mmap() the buffer to userspace). All + * reserved fields must be set to zero. The field reserved0 is expected to + * become a structure 'type' allowing an alternative layout of the structure + * content. Therefore this field should not be used for any other extensions. + */ +struct dmx_exportbuffer { + __u32 index; + __u32 flags; + __s32 fd; +}; + #define DMX_START _IO('o', 41) #define DMX_STOP _IO('o', 42) #define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params) @@ -231,4 +286,10 @@ typedef struct dmx_filter dmx_filter_t; #endif -#endif /* _UAPI_DVBDMX_H_ */ +#define DMX_REQBUFS _IOWR('o', 60, struct dmx_requestbuffers) +#define DMX_QUERYBUF _IOWR('o', 61, struct dmx_buffer) +#define DMX_EXPBUF _IOWR('o', 62, struct dmx_exportbuffer) +#define DMX_QBUF _IOWR('o', 63, struct dmx_buffer) +#define DMX_DQBUF _IOWR('o', 64, struct dmx_buffer) + +#endif /* _DVBDMX_H_ */ diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h index b297b65845d6..4f9b4551c534 100644 --- a/include/uapi/linux/dvb/frontend.h +++ b/include/uapi/linux/dvb/frontend.h @@ -547,7 +547,10 @@ enum fe_interleaving { #define DTV_STAT_ERROR_BLOCK_COUNT 68 #define DTV_STAT_TOTAL_BLOCK_COUNT 69 -#define DTV_MAX_COMMAND DTV_STAT_TOTAL_BLOCK_COUNT +/* Physical layer scrambling */ +#define DTV_SCRAMBLING_SEQUENCE_INDEX 70 + +#define DTV_MAX_COMMAND DTV_SCRAMBLING_SEQUENCE_INDEX /** * enum fe_pilot - Type of pilot tone @@ -756,16 +759,15 @@ enum fecap_scale_params { /** * struct dtv_stats - Used for reading a DTV status property * - * @scale: Filled with enum fecap_scale_params - the scale - * in usage for that parameter - * - * The ``{unnamed_union}`` may have either one of the values below: + * @scale: + * Filled with enum fecap_scale_params - the scale in usage + * for that parameter * - * %svalue + * @svalue: * integer value of the measure, for %FE_SCALE_DECIBEL, * used for dB measures. The unit is 0.001 dB. * - * %uvalue + * @uvalue: * unsigned integer value of the measure, used when @scale is * either %FE_SCALE_RELATIVE or %FE_SCALE_COUNTER. * @@ -828,19 +830,19 @@ struct dtv_fe_stats { /** * struct dtv_property - store one of frontend command and its value * - * @cmd: Digital TV command. - * @reserved: Not used. - * @u: Union with the values for the command. - * @result: Unused - * - * The @u union may have either one of the values below: + * @cmd: Digital TV command. + * @reserved: Not used. + * @u: Union with the values for the command. + * @u.data: A unsigned 32 bits integer with command value. + * @u.buffer: Struct to store bigger properties. + * Currently unused. + * @u.buffer.data: an unsigned 32-bits array. + * @u.buffer.len: number of elements of the buffer. + * @u.buffer.reserved1: Reserved. + * @u.buffer.reserved2: Reserved. + * @u.st: a &struct dtv_fe_stats array of statistics. + * @result: Currently unused. * - * %data - * an unsigned 32-bits number. - * %st - * a &struct dtv_fe_stats array of statistics. - * %buffer - * a buffer of up to 32 characters (currently unused). */ struct dtv_property { __u32 cmd; diff --git a/include/uapi/linux/dvb/version.h b/include/uapi/linux/dvb/version.h index 02e32ea83984..2c5cffe6d2a0 100644 --- a/include/uapi/linux/dvb/version.h +++ b/include/uapi/linux/dvb/version.h @@ -25,6 +25,6 @@ #define _DVBVERSION_H_ #define DVB_API_VERSION 5 -#define DVB_API_VERSION_MINOR 10 +#define DVB_API_VERSION_MINOR 11 #endif /*_DVBVERSION_H_*/ diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h index 4d51f98182bb..df3d7028c807 100644 --- a/include/uapi/linux/dvb/video.h +++ b/include/uapi/linux/dvb/video.h @@ -83,11 +83,11 @@ typedef enum { #define VIDEO_CMD_CONTINUE (3) /* Flags for VIDEO_CMD_FREEZE */ -#define VIDEO_CMD_FREEZE_TO_BLACK (1 << 0) +#define VIDEO_CMD_FREEZE_TO_BLACK (1 << 0) /* Flags for VIDEO_CMD_STOP */ -#define VIDEO_CMD_STOP_TO_BLACK (1 << 0) -#define VIDEO_CMD_STOP_IMMEDIATELY (1 << 1) +#define VIDEO_CMD_STOP_TO_BLACK (1 << 0) +#define VIDEO_CMD_STOP_IMMEDIATELY (1 << 1) /* Play input formats: */ /* The decoder has no special format requirements */ @@ -124,8 +124,8 @@ struct video_command { /* FIELD_UNKNOWN can be used if the hardware does not know whether the Vsync is for an odd, even or progressive (i.e. non-interlaced) field. */ -#define VIDEO_VSYNC_FIELD_UNKNOWN (0) -#define VIDEO_VSYNC_FIELD_ODD (1) +#define VIDEO_VSYNC_FIELD_UNKNOWN (0) +#define VIDEO_VSYNC_FIELD_ODD (1) #define VIDEO_VSYNC_FIELD_EVEN (2) #define VIDEO_VSYNC_FIELD_PROGRESSIVE (3) @@ -133,8 +133,8 @@ struct video_event { __s32 type; #define VIDEO_EVENT_SIZE_CHANGED 1 #define VIDEO_EVENT_FRAME_RATE_CHANGED 2 -#define VIDEO_EVENT_DECODER_STOPPED 3 -#define VIDEO_EVENT_VSYNC 4 +#define VIDEO_EVENT_DECODER_STOPPED 3 +#define VIDEO_EVENT_VSYNC 4 /* unused, make sure to use atomic time for y2038 if it ever gets used */ long timestamp; union { @@ -268,9 +268,9 @@ typedef __u16 video_attributes_t; #define VIDEO_GET_PTS _IOR('o', 57, __u64) /* Read the number of displayed frames since the decoder was started */ -#define VIDEO_GET_FRAME_COUNT _IOR('o', 58, __u64) +#define VIDEO_GET_FRAME_COUNT _IOR('o', 58, __u64) -#define VIDEO_COMMAND _IOWR('o', 59, struct video_command) -#define VIDEO_TRY_COMMAND _IOWR('o', 60, struct video_command) +#define VIDEO_COMMAND _IOWR('o', 59, struct video_command) +#define VIDEO_TRY_COMMAND _IOWR('o', 60, struct video_command) #endif /* _UAPI_DVBVIDEO_H_ */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index bb6836986200..3bf73fb58045 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -396,6 +396,7 @@ typedef struct elf64_shdr { #define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */ #define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */ #define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */ +#define NT_PPC_PKEY 0x110 /* Memory Protection Keys registers */ #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ #define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */ diff --git a/include/uapi/linux/erspan.h b/include/uapi/linux/erspan.h new file mode 100644 index 000000000000..841573019ae1 --- /dev/null +++ b/include/uapi/linux/erspan.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * ERSPAN Tunnel Metadata + * + * Copyright (c) 2018 VMware + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Userspace API for metadata mode ERSPAN tunnel + */ +#ifndef _UAPI_ERSPAN_H +#define _UAPI_ERSPAN_H + +#include <linux/types.h> /* For __beXX in userspace */ +#include <asm/byteorder.h> + +/* ERSPAN version 2 metadata header */ +struct erspan_md2 { + __be32 timestamp; + __be16 sgt; /* security group tag */ +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 hwid_upper:2, + ft:5, + p:1; + __u8 o:1, + gra:2, + dir:1, + hwid:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 p:1, + ft:5, + hwid_upper:2; + __u8 hwid:4, + dir:1, + gra:2, + o:1; +#else +#error "Please fix <asm/byteorder.h>" +#endif +}; + +struct erspan_metadata { + int version; + union { + __be32 index; /* Version 1 (type II)*/ + struct erspan_md2 md2; /* Version 2 (type III) */ + } u; +}; + +#endif /* _UAPI_ERSPAN_H */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index ac71559314e7..44a0b675a6bc 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1686,6 +1686,7 @@ enum ethtool_reset_flags { ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */ ETH_RESET_RAM = 1 << 7, /* RAM shared between * multiple components */ + ETH_RESET_AP = 1 << 8, /* Application processor */ ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to * this interface */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 4199f8acbce5..d2a8313fabd7 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -377,7 +377,11 @@ typedef int __bitwise __kernel_rwf_t; /* per-IO, return -EAGAIN if operation would block */ #define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008) +/* per-IO O_APPEND */ +#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010) + /* mask of flags supported by the kernel */ -#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT) +#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ + RWF_APPEND) #endif /* _UAPI_LINUX_FS_H */ diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h index 5156bad77b47..2dc10a034de1 100644 --- a/include/uapi/linux/gfs2_ondisk.h +++ b/include/uapi/linux/gfs2_ondisk.h @@ -187,10 +187,19 @@ struct gfs2_rgrp { __be32 rg_flags; __be32 rg_free; __be32 rg_dinodes; - __be32 __pad; + union { + __be32 __pad; + __be32 rg_skip; /* Distance to the next rgrp in fs blocks */ + }; __be64 rg_igeneration; - - __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */ + /* The following 3 fields are duplicated from gfs2_rindex to reduce + reliance on the rindex */ + __be64 rg_data0; /* First data location */ + __be32 rg_data; /* Number of data blocks in rgrp */ + __be32 rg_bitbytes; /* Number of bytes in data bitmaps */ + __be32 rg_crc; /* crc32 of the structure with this field 0 */ + + __u8 rg_reserved[60]; /* Several fields from gfs1 now reserved */ }; /* @@ -394,7 +403,36 @@ struct gfs2_ea_header { * Log header structure */ -#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */ +#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */ +#define GFS2_LOG_HEAD_FLUSH_NORMAL 0x00000002 /* normal log flush */ +#define GFS2_LOG_HEAD_FLUSH_SYNC 0x00000004 /* Sync log flush */ +#define GFS2_LOG_HEAD_FLUSH_SHUTDOWN 0x00000008 /* Shutdown log flush */ +#define GFS2_LOG_HEAD_FLUSH_FREEZE 0x00000010 /* Freeze flush */ +#define GFS2_LOG_HEAD_RECOVERY 0x00000020 /* Journal recovery */ +#define GFS2_LOG_HEAD_USERSPACE 0x80000000 /* Written by gfs2-utils */ + +/* Log flush callers */ +#define GFS2_LFC_SHUTDOWN 0x00000100 +#define GFS2_LFC_JDATA_WPAGES 0x00000200 +#define GFS2_LFC_SET_FLAGS 0x00000400 +#define GFS2_LFC_AIL_EMPTY_GL 0x00000800 +#define GFS2_LFC_AIL_FLUSH 0x00001000 +#define GFS2_LFC_RGRP_GO_SYNC 0x00002000 +#define GFS2_LFC_INODE_GO_SYNC 0x00004000 +#define GFS2_LFC_INODE_GO_INVAL 0x00008000 +#define GFS2_LFC_FREEZE_GO_SYNC 0x00010000 +#define GFS2_LFC_KILL_SB 0x00020000 +#define GFS2_LFC_DO_SYNC 0x00040000 +#define GFS2_LFC_INPLACE_RESERVE 0x00080000 +#define GFS2_LFC_WRITE_INODE 0x00100000 +#define GFS2_LFC_MAKE_FS_RO 0x00200000 +#define GFS2_LFC_SYNC_FS 0x00400000 +#define GFS2_LFC_EVICT_INODE 0x00800000 +#define GFS2_LFC_TRANS_END 0x01000000 +#define GFS2_LFC_LOGD_JFLUSH_REQD 0x02000000 +#define GFS2_LFC_LOGD_AIL_FLUSH_REQD 0x04000000 + +#define LH_V1_SIZE (offsetofend(struct gfs2_log_header, lh_hash)) struct gfs2_log_header { struct gfs2_meta_header lh_header; @@ -403,7 +441,21 @@ struct gfs2_log_header { __be32 lh_flags; /* GFS2_LOG_HEAD_... */ __be32 lh_tail; /* Block number of log tail */ __be32 lh_blkno; - __be32 lh_hash; + __be32 lh_hash; /* crc up to here with this field 0 */ + + /* Version 2 additional fields start here */ + __be32 lh_crc; /* crc32c from lh_nsec to end of block */ + __be32 lh_nsec; /* Nanoseconds of timestamp */ + __be64 lh_sec; /* Seconds of timestamp */ + __be64 lh_addr; /* Block addr of this log header (absolute) */ + __be64 lh_jinode; /* Journal inode number */ + __be64 lh_statfs_addr; /* Local statfs inode number */ + __be64 lh_quota_addr; /* Local quota change inode number */ + + /* Statfs local changes (i.e. diff from global statfs) */ + __be64 lh_local_total; + __be64 lh_local_free; + __be64 lh_local_dinodes; }; /* diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h index fe648032d6b9..f71a1751cacf 100644 --- a/include/uapi/linux/i2c.h +++ b/include/uapi/linux/i2c.h @@ -72,6 +72,9 @@ struct i2c_msg { #define I2C_M_RD 0x0001 /* read data, from slave to master */ /* I2C_M_RD is guaranteed to be 0x0001! */ #define I2C_M_TEN 0x0010 /* this is a ten bit chip address */ +#define I2C_M_DMA_SAFE 0x0200 /* the buffer of this message is DMA safe */ + /* makes only sense in kernelspace */ + /* userspace buffers are copied anyway */ #define I2C_M_RECV_LEN 0x0400 /* length will be first received byte */ #define I2C_M_NO_RD_ACK 0x0800 /* if I2C_FUNC_PROTOCOL_MANGLING */ #define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */ diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 144de4d2f385..f8cb5760ea4f 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -48,6 +48,7 @@ #define ETH_P_PUP 0x0200 /* Xerox PUP packet */ #define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */ #define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */ +#define ETH_P_ERSPAN2 0x22EB /* ERSPAN version 2 (type III) */ #define ETH_P_IP 0x0800 /* Internet Protocol packet */ #define ETH_P_X25 0x0805 /* CCITT X.25 */ #define ETH_P_ARP 0x0806 /* Address Resolution packet */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 19fc02660e0c..6d9447700e18 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -161,6 +161,9 @@ enum { IFLA_EVENT, IFLA_NEW_NETNSID, IFLA_IF_NETNSID, + IFLA_CARRIER_UP_COUNT, + IFLA_CARRIER_DOWN_COUNT, + IFLA_NEW_IFINDEX, __IFLA_MAX }; @@ -732,6 +735,8 @@ enum { IFLA_VF_STATS_BROADCAST, IFLA_VF_STATS_MULTICAST, IFLA_VF_STATS_PAD, + IFLA_VF_STATS_RX_DROPPED, + IFLA_VF_STATS_TX_DROPPED, __IFLA_VF_STATS_MAX, }; diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h index 719d243471f4..98e4d5d7c45c 100644 --- a/include/uapi/linux/if_macsec.h +++ b/include/uapi/linux/if_macsec.h @@ -22,8 +22,13 @@ #define MACSEC_KEYID_LEN 16 -#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL -#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL +/* cipher IDs as per IEEE802.1AEbn-2011 */ +#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL +#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL + +/* deprecated cipher ID for GCM-AES-128 */ +#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL +#define MACSEC_DEFAULT_CIPHER_ALT MACSEC_CIPHER_ID_GCM_AES_128 #define MACSEC_MIN_ICV_LEN 8 #define MACSEC_MAX_ICV_LEN 32 diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index 030d3e6d6029..ee432cd3018c 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -57,6 +57,8 @@ */ #define TUNSETVNETBE _IOW('T', 222, int) #define TUNGETVNETBE _IOR('T', 223, int) +#define TUNSETSTEERINGEBPF _IOR('T', 224, int) +#define TUNSETFILTEREBPF _IOR('T', 225, int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index e68dadbd6d45..1b3d148c4560 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -137,6 +137,9 @@ enum { IFLA_GRE_IGNORE_DF, IFLA_GRE_FWMARK, IFLA_GRE_ERSPAN_INDEX, + IFLA_GRE_ERSPAN_VER, + IFLA_GRE_ERSPAN_DIR, + IFLA_GRE_ERSPAN_HWID, __IFLA_GRE_MAX, }; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 817d807e9481..14565d703291 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -92,6 +92,8 @@ enum { INET_DIAG_BC_D_COND, INET_DIAG_BC_DEV_COND, /* u32 ifindex */ INET_DIAG_BC_MARK_COND, + INET_DIAG_BC_S_EQ, + INET_DIAG_BC_D_EQ, }; struct inet_diag_hostcond { diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index 8c5a0bf6ee35..7288a7c573cc 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h @@ -21,10 +21,21 @@ /* * The event structure itself + * Note that __USE_TIME_BITS64 is defined by libc based on + * application's request to use 64 bit time_t. */ struct input_event { +#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL) struct timeval time; +#define input_event_sec time.tv_sec +#define input_event_usec time.tv_usec +#else + __kernel_ulong_t __sec; + __kernel_ulong_t __usec; +#define input_event_sec __sec +#define input_event_usec __usec +#endif __u16 type; __u16 code; __s32 value; diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 6e80501368ae..f4cab5b3ba9a 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -58,7 +58,8 @@ struct kfd_ioctl_create_queue_args { __u64 eop_buffer_address; /* to KFD */ __u64 eop_buffer_size; /* to KFD */ __u64 ctx_save_restore_address; /* to KFD */ - __u64 ctx_save_restore_size; /* to KFD */ + __u32 ctx_save_restore_size; /* to KFD */ + __u32 ctl_stack_size; /* to KFD */ }; struct kfd_ioctl_destroy_queue_args { @@ -261,6 +262,13 @@ struct kfd_ioctl_get_tile_config_args { */ }; +struct kfd_ioctl_set_trap_handler_args { + uint64_t tba_addr; /* to KFD */ + uint64_t tma_addr; /* to KFD */ + uint32_t gpu_id; /* to KFD */ + uint32_t pad; +}; + #define AMDKFD_IOCTL_BASE 'K' #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) @@ -321,7 +329,10 @@ struct kfd_ioctl_get_tile_config_args { #define AMDKFD_IOC_GET_TILE_CONFIG \ AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args) +#define AMDKFD_IOC_SET_TRAP_HANDLER \ + AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x13 +#define AMDKFD_COMMAND_END 0x14 #endif diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index d84ce5c1c9aa..7d570c7bd117 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -65,7 +65,7 @@ struct sockaddr_l2tpip6 { * TUNNEL_MODIFY - CONN_ID, udpcsum * TUNNEL_GETSTATS - CONN_ID, (stats) * TUNNEL_GET - CONN_ID, (...) - * SESSION_CREATE - SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec + * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec * SESSION_DELETE - SESSION_ID * SESSION_MODIFY - SESSION_ID, data_seq * SESSION_GET - SESSION_ID, (...) @@ -94,10 +94,10 @@ enum { L2TP_ATTR_NONE, /* no data */ L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */ L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */ - L2TP_ATTR_OFFSET, /* u16 */ + L2TP_ATTR_OFFSET, /* u16 (not used) */ L2TP_ATTR_DATA_SEQ, /* u16 */ L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */ - L2TP_ATTR_L2SPEC_LEN, /* u8, enum l2tp_l2spec_type */ + L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */ L2TP_ATTR_PROTO_VERSION, /* u8 */ L2TP_ATTR_IFNAME, /* string */ L2TP_ATTR_CONN_ID, /* u32 */ diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h index c3aef4316fbf..4fe580d36e41 100644 --- a/include/uapi/linux/lirc.h +++ b/include/uapi/linux/lirc.h @@ -47,12 +47,14 @@ #define LIRC_MODE_RAW 0x00000001 #define LIRC_MODE_PULSE 0x00000002 #define LIRC_MODE_MODE2 0x00000004 +#define LIRC_MODE_SCANCODE 0x00000008 #define LIRC_MODE_LIRCCODE 0x00000010 #define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) #define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) #define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) +#define LIRC_CAN_SEND_SCANCODE LIRC_MODE2SEND(LIRC_MODE_SCANCODE) #define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) #define LIRC_CAN_SEND_MASK 0x0000003f @@ -64,6 +66,7 @@ #define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) #define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) #define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) +#define LIRC_CAN_REC_SCANCODE LIRC_MODE2REC(LIRC_MODE_SCANCODE) #define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) #define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) @@ -131,4 +134,83 @@ #define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32) +/* + * struct lirc_scancode - decoded scancode with protocol for use with + * LIRC_MODE_SCANCODE + * + * @timestamp: Timestamp in nanoseconds using CLOCK_MONOTONIC when IR + * was decoded. + * @flags: should be 0 for transmit. When receiving scancodes, + * LIRC_SCANCODE_FLAG_TOGGLE or LIRC_SCANCODE_FLAG_REPEAT can be set + * depending on the protocol + * @rc_proto: see enum rc_proto + * @keycode: the translated keycode. Set to 0 for transmit. + * @scancode: the scancode received or to be sent + */ +struct lirc_scancode { + __u64 timestamp; + __u16 flags; + __u16 rc_proto; + __u32 keycode; + __u64 scancode; +}; + +/* Set if the toggle bit of rc-5 or rc-6 is enabled */ +#define LIRC_SCANCODE_FLAG_TOGGLE 1 +/* Set if this is a nec or sanyo repeat */ +#define LIRC_SCANCODE_FLAG_REPEAT 2 + +/** + * enum rc_proto - the Remote Controller protocol + * + * @RC_PROTO_UNKNOWN: Protocol not known + * @RC_PROTO_OTHER: Protocol known but proprietary + * @RC_PROTO_RC5: Philips RC5 protocol + * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol + * @RC_PROTO_RC5_SZ: StreamZap variant of RC5 + * @RC_PROTO_JVC: JVC protocol + * @RC_PROTO_SONY12: Sony 12 bit protocol + * @RC_PROTO_SONY15: Sony 15 bit protocol + * @RC_PROTO_SONY20: Sony 20 bit protocol + * @RC_PROTO_NEC: NEC protocol + * @RC_PROTO_NECX: Extended NEC protocol + * @RC_PROTO_NEC32: NEC 32 bit protocol + * @RC_PROTO_SANYO: Sanyo protocol + * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard + * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse + * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol + * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol + * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol + * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol + * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol + * @RC_PROTO_SHARP: Sharp protocol + * @RC_PROTO_XMP: XMP protocol + * @RC_PROTO_CEC: CEC protocol + */ +enum rc_proto { + RC_PROTO_UNKNOWN = 0, + RC_PROTO_OTHER = 1, + RC_PROTO_RC5 = 2, + RC_PROTO_RC5X_20 = 3, + RC_PROTO_RC5_SZ = 4, + RC_PROTO_JVC = 5, + RC_PROTO_SONY12 = 6, + RC_PROTO_SONY15 = 7, + RC_PROTO_SONY20 = 8, + RC_PROTO_NEC = 9, + RC_PROTO_NECX = 10, + RC_PROTO_NEC32 = 11, + RC_PROTO_SANYO = 12, + RC_PROTO_MCIR2_KBD = 13, + RC_PROTO_MCIR2_MSE = 14, + RC_PROTO_RC6_0 = 15, + RC_PROTO_RC6_6A_20 = 16, + RC_PROTO_RC6_6A_24 = 17, + RC_PROTO_RC6_6A_32 = 18, + RC_PROTO_RC6_MCE = 19, + RC_PROTO_SHARP = 20, + RC_PROTO_XMP = 21, + RC_PROTO_CEC = 22, +}; + #endif diff --git a/include/uapi/linux/lp.h b/include/uapi/linux/lp.h index dafcfe4e4834..8589a27037d7 100644 --- a/include/uapi/linux/lp.h +++ b/include/uapi/linux/lp.h @@ -8,6 +8,8 @@ #ifndef _UAPI_LINUX_LP_H #define _UAPI_LINUX_LP_H +#include <linux/types.h> +#include <linux/ioctl.h> /* * Per POSIX guidelines, this module reserves the LP and lp prefixes @@ -88,7 +90,15 @@ #define LPGETSTATS 0x060d /* get statistics (struct lp_stats) */ #endif #define LPGETFLAGS 0x060e /* get status flags */ -#define LPSETTIMEOUT 0x060f /* set parport timeout */ +#define LPSETTIMEOUT_OLD 0x060f /* set parport timeout */ +#define LPSETTIMEOUT_NEW \ + _IOW(0x6, 0xf, __s64[2]) /* set parport timeout */ +#if __BITS_PER_LONG == 64 +#define LPSETTIMEOUT LPSETTIMEOUT_OLD +#else +#define LPSETTIMEOUT (sizeof(time_t) > sizeof(__kernel_long_t) ? \ + LPSETTIMEOUT_NEW : LPSETTIMEOUT_OLD) +#endif /* timeout for printk'ing a timeout, in jiffies (100ths of a second). This is also used for re-checking error conditions if LP_ABORT is diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index 3f03567631cb..7e27070b9440 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -15,54 +15,6 @@ #include <linux/types.h> -struct nd_cmd_smart { - __u32 status; - __u8 data[128]; -} __packed; - -#define ND_SMART_HEALTH_VALID (1 << 0) -#define ND_SMART_SPARES_VALID (1 << 1) -#define ND_SMART_USED_VALID (1 << 2) -#define ND_SMART_TEMP_VALID (1 << 3) -#define ND_SMART_CTEMP_VALID (1 << 4) -#define ND_SMART_ALARM_VALID (1 << 9) -#define ND_SMART_SHUTDOWN_VALID (1 << 10) -#define ND_SMART_VENDOR_VALID (1 << 11) -#define ND_SMART_SPARE_TRIP (1 << 0) -#define ND_SMART_TEMP_TRIP (1 << 1) -#define ND_SMART_CTEMP_TRIP (1 << 2) -#define ND_SMART_NON_CRITICAL_HEALTH (1 << 0) -#define ND_SMART_CRITICAL_HEALTH (1 << 1) -#define ND_SMART_FATAL_HEALTH (1 << 2) - -struct nd_smart_payload { - __u32 flags; - __u8 reserved0[4]; - __u8 health; - __u8 spares; - __u8 life_used; - __u8 alarm_flags; - __u16 temperature; - __u16 ctrl_temperature; - __u8 reserved1[15]; - __u8 shutdown_state; - __u32 vendor_size; - __u8 vendor_data[92]; -} __packed; - -struct nd_cmd_smart_threshold { - __u32 status; - __u8 data[8]; -} __packed; - -struct nd_smart_threshold_payload { - __u8 alarm_control; - __u8 reserved0; - __u16 temperature; - __u8 spares; - __u8 reserved[3]; -} __packed; - struct nd_cmd_dimm_flags { __u32 status; __u32 flags; @@ -211,12 +163,6 @@ static inline const char *nvdimm_cmd_name(unsigned cmd) #define ND_IOCTL 'N' -#define ND_IOCTL_SMART _IOWR(ND_IOCTL, ND_CMD_SMART,\ - struct nd_cmd_smart) - -#define ND_IOCTL_SMART_THRESHOLD _IOWR(ND_IOCTL, ND_CMD_SMART_THRESHOLD,\ - struct nd_cmd_smart_threshold) - #define ND_IOCTL_DIMM_FLAGS _IOWR(ND_IOCTL, ND_CMD_DIMM_FLAGS,\ struct nd_cmd_dimm_flags) @@ -263,7 +209,7 @@ enum nd_driver_flags { }; enum { - ND_MIN_NAMESPACE_SIZE = 0x00400000, + ND_MIN_NAMESPACE_SIZE = PAGE_SIZE, }; enum ars_masks { diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 57ccfb32e87f..9574bd40870b 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -101,12 +101,16 @@ enum ip_conntrack_status { IPS_HELPER_BIT = 13, IPS_HELPER = (1 << IPS_HELPER_BIT), + /* Conntrack has been offloaded to flow table. */ + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT), + /* Be careful here, modifying these bits can make things messy, * so don't let users modify them directly. */ IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK | IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | - IPS_SEQ_ADJUST | IPS_TEMPLATE), + IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD), __IPS_MAX_BIT = 14, }; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index a3ee277b17a1..66dceee0ae30 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -92,6 +92,9 @@ enum nft_verdicts { * @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes) * @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes) * @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes) + * @NFT_MSG_NEWFLOWTABLE: add new flow table (enum nft_flowtable_attributes) + * @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes) + * @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes) */ enum nf_tables_msg_types { NFT_MSG_NEWTABLE, @@ -116,6 +119,9 @@ enum nf_tables_msg_types { NFT_MSG_GETOBJ, NFT_MSG_DELOBJ, NFT_MSG_GETOBJ_RESET, + NFT_MSG_NEWFLOWTABLE, + NFT_MSG_GETFLOWTABLE, + NFT_MSG_DELFLOWTABLE, NFT_MSG_MAX, }; @@ -168,6 +174,8 @@ enum nft_table_attributes { NFTA_TABLE_NAME, NFTA_TABLE_FLAGS, NFTA_TABLE_USE, + NFTA_TABLE_HANDLE, + NFTA_TABLE_PAD, __NFTA_TABLE_MAX }; #define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1) @@ -311,6 +319,7 @@ enum nft_set_desc_attributes { * @NFTA_SET_GC_INTERVAL: garbage collection interval (NLA_U32) * @NFTA_SET_USERDATA: user data (NLA_BINARY) * @NFTA_SET_OBJ_TYPE: stateful object type (NLA_U32: NFT_OBJECT_*) + * @NFTA_SET_HANDLE: set handle (NLA_U64) */ enum nft_set_attributes { NFTA_SET_UNSPEC, @@ -329,6 +338,7 @@ enum nft_set_attributes { NFTA_SET_USERDATA, NFTA_SET_PAD, NFTA_SET_OBJ_TYPE, + NFTA_SET_HANDLE, __NFTA_SET_MAX }; #define NFTA_SET_MAX (__NFTA_SET_MAX - 1) @@ -777,6 +787,7 @@ enum nft_exthdr_attributes { * @NFT_META_OIFGROUP: packet output interface group * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) * @NFT_META_PRANDOM: a 32bit pseudo-random number + * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp) */ enum nft_meta_keys { NFT_META_LEN, @@ -804,6 +815,7 @@ enum nft_meta_keys { NFT_META_OIFGROUP, NFT_META_CGROUP, NFT_META_PRANDOM, + NFT_META_SECPATH, }; /** @@ -949,6 +961,17 @@ enum nft_ct_attributes { }; #define NFTA_CT_MAX (__NFTA_CT_MAX - 1) +/** + * enum nft_flow_attributes - ct offload expression attributes + * @NFTA_FLOW_TABLE_NAME: flow table name (NLA_STRING) + */ +enum nft_offload_attributes { + NFTA_FLOW_UNSPEC, + NFTA_FLOW_TABLE_NAME, + __NFTA_FLOW_MAX, +}; +#define NFTA_FLOW_MAX (__NFTA_FLOW_MAX - 1) + enum nft_limit_type { NFT_LIMIT_PKTS, NFT_LIMIT_PKT_BYTES @@ -1295,6 +1318,7 @@ enum nft_ct_helper_attributes { * @NFTA_OBJ_TYPE: stateful object type (NLA_U32) * @NFTA_OBJ_DATA: stateful object data (NLA_NESTED) * @NFTA_OBJ_USE: number of references to this expression (NLA_U32) + * @NFTA_OBJ_HANDLE: object handle (NLA_U64) */ enum nft_object_attributes { NFTA_OBJ_UNSPEC, @@ -1303,11 +1327,63 @@ enum nft_object_attributes { NFTA_OBJ_TYPE, NFTA_OBJ_DATA, NFTA_OBJ_USE, + NFTA_OBJ_HANDLE, + NFTA_OBJ_PAD, __NFTA_OBJ_MAX }; #define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1) /** + * enum nft_flowtable_attributes - nf_tables flow table netlink attributes + * + * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING) + * @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING) + * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32) + * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32) + * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64) + */ +enum nft_flowtable_attributes { + NFTA_FLOWTABLE_UNSPEC, + NFTA_FLOWTABLE_TABLE, + NFTA_FLOWTABLE_NAME, + NFTA_FLOWTABLE_HOOK, + NFTA_FLOWTABLE_USE, + NFTA_FLOWTABLE_HANDLE, + NFTA_FLOWTABLE_PAD, + __NFTA_FLOWTABLE_MAX +}; +#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1) + +/** + * enum nft_flowtable_hook_attributes - nf_tables flow table hook netlink attributes + * + * @NFTA_FLOWTABLE_HOOK_NUM: netfilter hook number (NLA_U32) + * @NFTA_FLOWTABLE_HOOK_PRIORITY: netfilter hook priority (NLA_U32) + * @NFTA_FLOWTABLE_HOOK_DEVS: input devices this flow table is bound to (NLA_NESTED) + */ +enum nft_flowtable_hook_attributes { + NFTA_FLOWTABLE_HOOK_UNSPEC, + NFTA_FLOWTABLE_HOOK_NUM, + NFTA_FLOWTABLE_HOOK_PRIORITY, + NFTA_FLOWTABLE_HOOK_DEVS, + __NFTA_FLOWTABLE_HOOK_MAX +}; +#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1) + +/** + * enum nft_device_attributes - nf_tables device netlink attributes + * + * @NFTA_DEVICE_NAME: name of this device (NLA_STRING) + */ +enum nft_devices_attributes { + NFTA_DEVICE_UNSPEC, + NFTA_DEVICE_NAME, + __NFTA_DEVICE_MAX +}; +#define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1) + + +/** * enum nft_trace_attributes - nf_tables trace netlink attributes * * @NFTA_TRACE_TABLE: name of the table (NLA_STRING) diff --git a/include/uapi/linux/netfilter/xt_connlimit.h b/include/uapi/linux/netfilter/xt_connlimit.h index 07e5e9d47882..d4d1943dcd11 100644 --- a/include/uapi/linux/netfilter/xt_connlimit.h +++ b/include/uapi/linux/netfilter/xt_connlimit.h @@ -27,7 +27,7 @@ struct xt_connlimit_info { __u32 flags; /* Used internally by the kernel */ - struct xt_connlimit_data *data __attribute__((aligned(8))); + struct nf_conncount_data *data __attribute__((aligned(8))); }; #endif /* _XT_CONNLIMIT_H */ diff --git a/include/uapi/linux/netfilter_arp.h b/include/uapi/linux/netfilter_arp.h index 81b6a4cbcb72..791dfc5ae907 100644 --- a/include/uapi/linux/netfilter_arp.h +++ b/include/uapi/linux/netfilter_arp.h @@ -15,6 +15,9 @@ #define NF_ARP_IN 0 #define NF_ARP_OUT 1 #define NF_ARP_FORWARD 2 + +#ifndef __KERNEL__ #define NF_ARP_NUMHOOKS 3 +#endif #endif /* __LINUX_ARP_NETFILTER_H */ diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h index 9089c38f6abe..61f1c7dfd033 100644 --- a/include/uapi/linux/netfilter_decnet.h +++ b/include/uapi/linux/netfilter_decnet.h @@ -24,6 +24,9 @@ #define NFC_DN_IF_IN 0x0004 /* Output device. */ #define NFC_DN_IF_OUT 0x0008 + +/* kernel define is in netfilter_defs.h */ +#define NF_DN_NUMHOOKS 7 #endif /* ! __KERNEL__ */ /* DECnet Hooks */ @@ -41,7 +44,6 @@ #define NF_DN_HELLO 5 /* Input Routing Packets */ #define NF_DN_ROUTE 6 -#define NF_DN_NUMHOOKS 7 enum nf_dn_hook_priorities { NF_DN_PRI_FIRST = INT_MIN, diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h index e6b1a84f5dd3..c3b060775e13 100644 --- a/include/uapi/linux/netfilter_ipv4.h +++ b/include/uapi/linux/netfilter_ipv4.h @@ -57,6 +57,7 @@ enum nf_ip_hook_priorities { NF_IP_PRI_FIRST = INT_MIN, + NF_IP_PRI_RAW_BEFORE_DEFRAG = -450, NF_IP_PRI_CONNTRACK_DEFRAG = -400, NF_IP_PRI_RAW = -300, NF_IP_PRI_SELINUX_FIRST = -225, diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h index 2f9724611cc2..dc624fd24d25 100644 --- a/include/uapi/linux/netfilter_ipv6.h +++ b/include/uapi/linux/netfilter_ipv6.h @@ -62,6 +62,7 @@ enum nf_ip6_hook_priorities { NF_IP6_PRI_FIRST = INT_MIN, + NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450, NF_IP6_PRI_CONNTRACK_DEFRAG = -400, NF_IP6_PRI_RAW = -300, NF_IP6_PRI_SELINUX_FIRST = -225, diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_srh.h b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h new file mode 100644 index 000000000000..f3cc0ef514a7 --- /dev/null +++ b/include/uapi/linux/netfilter_ipv6/ip6t_srh.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _IP6T_SRH_H +#define _IP6T_SRH_H + +#include <linux/types.h> +#include <linux/netfilter.h> + +/* Values for "mt_flags" field in struct ip6t_srh */ +#define IP6T_SRH_NEXTHDR 0x0001 +#define IP6T_SRH_LEN_EQ 0x0002 +#define IP6T_SRH_LEN_GT 0x0004 +#define IP6T_SRH_LEN_LT 0x0008 +#define IP6T_SRH_SEGS_EQ 0x0010 +#define IP6T_SRH_SEGS_GT 0x0020 +#define IP6T_SRH_SEGS_LT 0x0040 +#define IP6T_SRH_LAST_EQ 0x0080 +#define IP6T_SRH_LAST_GT 0x0100 +#define IP6T_SRH_LAST_LT 0x0200 +#define IP6T_SRH_TAG 0x0400 +#define IP6T_SRH_MASK 0x07FF + +/* Values for "mt_invflags" field in struct ip6t_srh */ +#define IP6T_SRH_INV_NEXTHDR 0x0001 +#define IP6T_SRH_INV_LEN_EQ 0x0002 +#define IP6T_SRH_INV_LEN_GT 0x0004 +#define IP6T_SRH_INV_LEN_LT 0x0008 +#define IP6T_SRH_INV_SEGS_EQ 0x0010 +#define IP6T_SRH_INV_SEGS_GT 0x0020 +#define IP6T_SRH_INV_SEGS_LT 0x0040 +#define IP6T_SRH_INV_LAST_EQ 0x0080 +#define IP6T_SRH_INV_LAST_GT 0x0100 +#define IP6T_SRH_INV_LAST_LT 0x0200 +#define IP6T_SRH_INV_TAG 0x0400 +#define IP6T_SRH_INV_MASK 0x07FF + +/** + * struct ip6t_srh - SRH match options + * @ next_hdr: Next header field of SRH + * @ hdr_len: Extension header length field of SRH + * @ segs_left: Segments left field of SRH + * @ last_entry: Last entry field of SRH + * @ tag: Tag field of SRH + * @ mt_flags: match options + * @ mt_invflags: Invert the sense of match options + */ + +struct ip6t_srh { + __u8 next_hdr; + __u8 hdr_len; + __u8 segs_left; + __u8 last_entry; + __u16 tag; + __u16 mt_flags; + __u16 mt_invflags; +}; + +#endif /*_IP6T_SRH_H*/ diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h index 057d22a48416..946cb62d64b0 100644 --- a/include/uapi/linux/nfs.h +++ b/include/uapi/linux/nfs.h @@ -12,6 +12,7 @@ #define NFS_PROGRAM 100003 #define NFS_PORT 2049 +#define NFS_RDMA_PORT 20049 #define NFS_MAXDATA 8192 #define NFS_MAXPATHLEN 1024 #define NFS_MAXNAMLEN 255 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f882fe1f9709..c587a61c32bf 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3862,6 +3862,9 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_PARENT_BSSID. (u64). * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF * is set. + * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update. + * Contains a nested array of signal strength attributes (u8, dBm), + * using the nesting index as the antenna number. * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -3885,6 +3888,7 @@ enum nl80211_bss { NL80211_BSS_PAD, NL80211_BSS_PARENT_TSF, NL80211_BSS_PARENT_BSSID, + NL80211_BSS_CHAIN_SIGNAL, /* keep last */ __NL80211_BSS_AFTER_LAST, diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index dcfab5e3b55c..713e56ce681f 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -363,6 +363,7 @@ enum ovs_tunnel_key_attr { OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ OVS_TUNNEL_KEY_ATTR_PAD, + OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* struct erspan_metadata */ __OVS_TUNNEL_KEY_ATTR_MAX }; diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 70c2b2ade048..0c79eac5e9b8 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -622,15 +622,19 @@ * safely. */ #define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */ #define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */ #define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */ -#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */ +#define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */ +#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */ +#define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */ #define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ #define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ +#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */ #define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ #define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ #define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */ @@ -966,26 +970,28 @@ /* Downstream Port Containment */ #define PCI_EXP_DPC_CAP 4 /* DPC Capability */ -#define PCI_EXP_DPC_IRQ 0x1f /* DPC Interrupt Message Number */ -#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */ -#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */ -#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */ -#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0xF00 /* RP PIO log size */ +#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */ +#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */ +#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */ +#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x0080 /* Software Triggering Supported */ +#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */ #define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ #define PCI_EXP_DPC_CTL 6 /* DPC control */ -#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x02 /* Enable trigger on ERR_NONFATAL message */ -#define PCI_EXP_DPC_CTL_INT_EN 0x08 /* DPC Interrupt Enable */ +#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */ +#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */ #define PCI_EXP_DPC_STATUS 8 /* DPC Status */ -#define PCI_EXP_DPC_STATUS_TRIGGER 0x01 /* Trigger Status */ -#define PCI_EXP_DPC_STATUS_INTERRUPT 0x08 /* Interrupt Status */ -#define PCI_EXP_DPC_RP_BUSY 0x10 /* Root Port Busy */ +#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */ +#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */ +#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */ #define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */ #define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */ -#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO MASK */ +#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */ #define PCI_EXP_DPC_RP_PIO_SEVERITY 0x14 /* RP PIO Severity */ #define PCI_EXP_DPC_RP_PIO_SYSERROR 0x18 /* RP PIO SysError */ #define PCI_EXP_DPC_RP_PIO_EXCEPTION 0x1C /* RP PIO Exception */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index c77c9a2ebbbb..e0739a1aa4b2 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -418,6 +418,27 @@ struct perf_event_attr { __u16 __reserved_2; /* align to __u64 */ }; +/* + * Structure used by below PERF_EVENT_IOC_QUERY_BPF command + * to query bpf programs attached to the same perf tracepoint + * as the given perf event. + */ +struct perf_event_query_bpf { + /* + * The below ids array length + */ + __u32 ids_len; + /* + * Set by the kernel to indicate the number of + * available programs + */ + __u32 prog_cnt; + /* + * User provided buffer to store program ids + */ + __u32 ids[0]; +}; + #define perf_flags(attr) (*(&(attr)->read_format + 1)) /* @@ -433,6 +454,7 @@ struct perf_event_attr { #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) #define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) #define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) +#define PERF_EVENT_IOC_QUERY_BPF _IOWR('$', 10, struct perf_event_query_bpf *) enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index e3939e00980b..e46d82b91166 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -66,6 +66,12 @@ struct ptrace_peeksiginfo_args { #define PTRACE_SETSIGMASK 0x420b #define PTRACE_SECCOMP_GET_FILTER 0x420c +#define PTRACE_SECCOMP_GET_METADATA 0x420d + +struct seccomp_metadata { + unsigned long filter_off; /* Input: which filter */ + unsigned int flags; /* Output: filter's flags */ +}; /* Read signals from a shared (process wide) queue */ #define PTRACE_PEEKSIGINFO_SHARED (1 << 0) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 843e29aa3cac..9b15005955fa 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -541,9 +541,19 @@ struct tcmsg { int tcm_ifindex; __u32 tcm_handle; __u32 tcm_parent; +/* tcm_block_index is used instead of tcm_parent + * in case tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK + */ +#define tcm_block_index tcm_parent __u32 tcm_info; }; +/* For manipulation of filters in shared block, tcm_ifindex is set to + * TCM_IFINDEX_MAGIC_BLOCK, and tcm_parent is aliased to tcm_block_index + * which is the block index. + */ +#define TCM_IFINDEX_MAGIC_BLOCK (0xFFFFFFFFU) + enum { TCA_UNSPEC, TCA_KIND, @@ -558,6 +568,8 @@ enum { TCA_DUMP_INVISIBLE, TCA_CHAIN, TCA_HW_OFFLOAD, + TCA_INGRESS_BLOCK, + TCA_EGRESS_BLOCK, __TCA_MAX }; diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index d9adab32dbee..4c4db14786bd 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -125,6 +125,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_SOCKOPT_PEELOFF_FLAGS 122 #define SCTP_STREAM_SCHEDULER 123 #define SCTP_STREAM_SCHEDULER_VALUE 124 +#define SCTP_INTERLEAVING_SUPPORTED 125 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 @@ -459,6 +460,8 @@ struct sctp_pdapi_event { __u32 pdapi_length; __u32 pdapi_indication; sctp_assoc_t pdapi_assoc_id; + __u32 pdapi_stream; + __u32 pdapi_seq; }; enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h index 75df44373034..4f4daf8db954 100644 --- a/include/uapi/linux/switchtec_ioctl.h +++ b/include/uapi/linux/switchtec_ioctl.h @@ -88,7 +88,8 @@ struct switchtec_ioctl_event_summary { #define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26 #define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27 #define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28 -#define SWITCHTEC_IOCTL_MAX_EVENTS 29 +#define SWITCHTEC_IOCTL_EVENT_GFMS 29 +#define SWITCHTEC_IOCTL_MAX_EVENTS 30 #define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1 #define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2 diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index 688782e90140..4b9eb064d7e7 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -50,6 +50,7 @@ #define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ #define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */ +#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */ /* * TEE Implementation ID @@ -154,6 +155,13 @@ struct tee_ioctl_buf_data { */ #define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff +/* Meta parameter carrying extra information about the message. */ +#define TEE_IOCTL_PARAM_ATTR_META 0x100 + +/* Mask of all known attr bits */ +#define TEE_IOCTL_PARAM_ATTR_MASK \ + (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META) + /* * Matches TEEC_LOGIN_* in GP TEE Client API * Are only defined for GP compliant TEEs @@ -332,6 +340,35 @@ struct tee_iocl_supp_send_arg { #define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \ struct tee_ioctl_buf_data) +/** + * struct tee_ioctl_shm_register_data - Shared memory register argument + * @addr: [in] Start address of shared memory to register + * @length: [in/out] Length of shared memory to register + * @flags: [in/out] Flags to/from registration. + * @id: [out] Identifier of the shared memory + * + * The flags field should currently be zero as input. Updated by the call + * with actual flags as defined by TEE_IOCTL_SHM_* above. + * This structure is used as argument for TEE_IOC_SHM_REGISTER below. + */ +struct tee_ioctl_shm_register_data { + __u64 addr; + __u64 length; + __u32 flags; + __s32 id; +}; + +/** + * TEE_IOC_SHM_REGISTER - Register shared memory argument + * + * Registers shared memory between the user space process and secure OS. + * + * Returns a file descriptor on success or < 0 on failure + * + * The shared memory is unregisterred when the descriptor is closed. + */ +#define TEE_IOC_SHM_REGISTER _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 9, \ + struct tee_ioctl_shm_register_data) /* * Five syscalls are used when communicating with the TEE driver. * open(): opens the device associated with the driver diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h index 35f79d1f8c3a..14bacc7e6cef 100644 --- a/include/uapi/linux/tipc.h +++ b/include/uapi/linux/tipc.h @@ -117,10 +117,9 @@ static inline unsigned int tipc_node(__u32 addr) /* * Publication scopes when binding port names and port name sequences */ - -#define TIPC_ZONE_SCOPE 1 -#define TIPC_CLUSTER_SCOPE 2 -#define TIPC_NODE_SCOPE 3 +#define TIPC_ZONE_SCOPE 1 +#define TIPC_CLUSTER_SCOPE 2 +#define TIPC_NODE_SCOPE 3 /* * Limiting values for messages diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index e3d1d0c78f3c..cd4f0b897a48 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -49,5 +49,11 @@ typedef __u32 __bitwise __wsum; #define __aligned_be64 __be64 __attribute__((aligned(8))) #define __aligned_le64 __le64 __attribute__((aligned(8))) +#ifdef __CHECK_POLL +typedef unsigned __bitwise __poll_t; +#else +typedef unsigned __poll_t; +#endif + #endif /* __ASSEMBLY__ */ #endif /* _UAPI_LINUX_TYPES_H */ diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index c4c79aa331bd..d5a5caec8fbc 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -1077,9 +1077,9 @@ struct usb_ptm_cap_descriptor { #define USB_DT_USB_PTM_ID_SIZE 3 /* * The size of the descriptor for the Sublink Speed Attribute Count - * (SSAC) specified in bmAttributes[4:0]. + * (SSAC) specified in bmAttributes[4:0]. SSAC is zero-based */ -#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4) +#define USB_DT_USB_SSP_CAP_SIZE(ssac) (12 + (ssac + 1) * 4) /*-------------------------------------------------------------------------*/ diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h index 70ed5338d447..964e87217be4 100644 --- a/include/uapi/linux/usbdevice_fs.h +++ b/include/uapi/linux/usbdevice_fs.h @@ -79,7 +79,7 @@ struct usbdevfs_connectinfo { #define USBDEVFS_URB_SHORT_NOT_OK 0x01 #define USBDEVFS_URB_ISO_ASAP 0x02 #define USBDEVFS_URB_BULK_CONTINUATION 0x04 -#define USBDEVFS_URB_NO_FSBR 0x20 +#define USBDEVFS_URB_NO_FSBR 0x20 /* Not used */ #define USBDEVFS_URB_ZERO_PACKET 0x40 #define USBDEVFS_URB_NO_INTERRUPT 0x80 diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h index e80b4655d8cd..020714d2c5bd 100644 --- a/include/uapi/linux/uvcvideo.h +++ b/include/uapi/linux/uvcvideo.h @@ -68,4 +68,30 @@ struct uvc_xu_control_query { #define UVCIOC_CTRL_MAP _IOWR('u', 0x20, struct uvc_xu_control_mapping) #define UVCIOC_CTRL_QUERY _IOWR('u', 0x21, struct uvc_xu_control_query) +/* + * Metadata node + */ + +/** + * struct uvc_meta_buf - metadata buffer building block + * @ns - system timestamp of the payload in nanoseconds + * @sof - USB Frame Number + * @length - length of the payload header + * @flags - payload header flags + * @buf - optional device-specific header data + * + * UVC metadata nodes fill buffers with possibly multiple instances of this + * struct. The first two fields are added by the driver, they can be used for + * clock synchronisation. The rest is an exact copy of a UVC payload header. + * Only complete objects with complete buffers are included. Therefore it's + * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large. + */ +struct uvc_meta_buf { + __u64 ns; + __u16 sof; + __u8 length; + __u8 flags; + __u8 buf[]; +} __packed; + #endif diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index a692623e0236..cbbb750d87d1 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -67,8 +67,8 @@ /* User-class control IDs */ #define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900) -#define V4L2_CID_USER_BASE V4L2_CID_BASE -#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1) +#define V4L2_CID_USER_BASE V4L2_CID_BASE +#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1) #define V4L2_CID_BRIGHTNESS (V4L2_CID_BASE+0) #define V4L2_CID_CONTRAST (V4L2_CID_BASE+1) #define V4L2_CID_SATURATION (V4L2_CID_BASE+2) @@ -102,7 +102,7 @@ enum v4l2_power_line_frequency { #define V4L2_CID_HUE_AUTO (V4L2_CID_BASE+25) #define V4L2_CID_WHITE_BALANCE_TEMPERATURE (V4L2_CID_BASE+26) #define V4L2_CID_SHARPNESS (V4L2_CID_BASE+27) -#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) +#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) #define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) #define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) #define V4L2_CID_COLORFX (V4L2_CID_BASE+31) @@ -194,11 +194,11 @@ enum v4l2_colorfx { /* The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical */ -#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) -#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1) +#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) +#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1) /* MPEG streams, specific to multiplexed streams */ -#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0) +#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_MPEG_BASE+0) enum v4l2_mpeg_stream_type { V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */ V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */ @@ -207,26 +207,26 @@ enum v4l2_mpeg_stream_type { V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */ V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */ }; -#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1) -#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2) -#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3) -#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4) -#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5) -#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6) -#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7) +#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_MPEG_BASE+1) +#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_MPEG_BASE+2) +#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_MPEG_BASE+3) +#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_MPEG_BASE+4) +#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_MPEG_BASE+5) +#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_MPEG_BASE+6) +#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_MPEG_BASE+7) enum v4l2_mpeg_stream_vbi_fmt { V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */ V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */ }; /* MPEG audio controls specific to multiplexed streams */ -#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100) +#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_MPEG_BASE+100) enum v4l2_mpeg_audio_sampling_freq { V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0, V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1, V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2, }; -#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101) +#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_MPEG_BASE+101) enum v4l2_mpeg_audio_encoding { V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0, V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1, @@ -234,7 +234,7 @@ enum v4l2_mpeg_audio_encoding { V4L2_MPEG_AUDIO_ENCODING_AAC = 3, V4L2_MPEG_AUDIO_ENCODING_AC3 = 4, }; -#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102) +#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_MPEG_BASE+102) enum v4l2_mpeg_audio_l1_bitrate { V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0, V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1, @@ -251,7 +251,7 @@ enum v4l2_mpeg_audio_l1_bitrate { V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12, V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13, }; -#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103) +#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_MPEG_BASE+103) enum v4l2_mpeg_audio_l2_bitrate { V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0, V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1, @@ -268,7 +268,7 @@ enum v4l2_mpeg_audio_l2_bitrate { V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12, V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13, }; -#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104) +#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_MPEG_BASE+104) enum v4l2_mpeg_audio_l3_bitrate { V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0, V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1, @@ -285,32 +285,32 @@ enum v4l2_mpeg_audio_l3_bitrate { V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12, V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13, }; -#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105) +#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_MPEG_BASE+105) enum v4l2_mpeg_audio_mode { V4L2_MPEG_AUDIO_MODE_STEREO = 0, V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1, V4L2_MPEG_AUDIO_MODE_DUAL = 2, V4L2_MPEG_AUDIO_MODE_MONO = 3, }; -#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106) +#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_MPEG_BASE+106) enum v4l2_mpeg_audio_mode_extension { V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3, }; -#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107) +#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_MPEG_BASE+107) enum v4l2_mpeg_audio_emphasis { V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0, V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1, V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2, }; -#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108) +#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_MPEG_BASE+108) enum v4l2_mpeg_audio_crc { V4L2_MPEG_AUDIO_CRC_NONE = 0, V4L2_MPEG_AUDIO_CRC_CRC16 = 1, }; -#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109) +#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_MPEG_BASE+109) #define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_MPEG_BASE+110) #define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_MPEG_BASE+111) enum v4l2_mpeg_audio_ac3_bitrate { @@ -346,33 +346,33 @@ enum v4l2_mpeg_audio_dec_playback { #define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_MPEG_BASE+113) /* MPEG video controls specific to multiplexed streams */ -#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200) +#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_MPEG_BASE+200) enum v4l2_mpeg_video_encoding { V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0, V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2, }; -#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201) +#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_MPEG_BASE+201) enum v4l2_mpeg_video_aspect { V4L2_MPEG_VIDEO_ASPECT_1x1 = 0, V4L2_MPEG_VIDEO_ASPECT_4x3 = 1, V4L2_MPEG_VIDEO_ASPECT_16x9 = 2, V4L2_MPEG_VIDEO_ASPECT_221x100 = 3, }; -#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202) -#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203) -#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204) -#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205) -#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206) +#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_MPEG_BASE+202) +#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_MPEG_BASE+203) +#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_MPEG_BASE+204) +#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_MPEG_BASE+205) +#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_MPEG_BASE+206) enum v4l2_mpeg_video_bitrate_mode { V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1, }; -#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207) -#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208) +#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_MPEG_BASE+207) +#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_MPEG_BASE+208) #define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_MPEG_BASE+209) -#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) -#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) +#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) +#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) #define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_MPEG_BASE+212) #define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_MPEG_BASE+213) #define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_MPEG_BASE+214) @@ -590,14 +590,14 @@ enum v4l2_vp8_golden_frame_sel { #define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511) /* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */ -#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) -#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) +#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) +#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1, }; -#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1) -#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2) +#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+1) +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+2) enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type { V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1, @@ -605,18 +605,18 @@ enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type { V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4, }; -#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3) +#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+3) enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type { V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1, }; -#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4) +#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+4) enum v4l2_mpeg_cx2341x_video_temporal_filter_mode { V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1, }; -#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5) -#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6) +#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_MPEG_CX2341X_BASE+5) +#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_MPEG_CX2341X_BASE+6) enum v4l2_mpeg_cx2341x_video_median_filter_type { V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1, @@ -624,11 +624,11 @@ enum v4l2_mpeg_cx2341x_video_median_filter_type { V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4, }; -#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7) -#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8) +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+7) +#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+8) #define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_MPEG_CX2341X_BASE+9) -#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10) -#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11) +#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_MPEG_CX2341X_BASE+10) +#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_MPEG_CX2341X_BASE+11) /* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */ #define V4L2_CID_MPEG_MFC51_BASE (V4L2_CTRL_CLASS_MPEG | 0x1100) @@ -660,8 +660,8 @@ enum v4l2_mpeg_mfc51_video_force_frame_type { /* Camera class control IDs */ -#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900) -#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1) +#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900) +#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1) #define V4L2_CID_EXPOSURE_AUTO (V4L2_CID_CAMERA_CLASS_BASE+1) enum v4l2_exposure_auto_type { diff --git a/include/uapi/linux/vbox_err.h b/include/uapi/linux/vbox_err.h new file mode 100644 index 000000000000..7eae536ff1e6 --- /dev/null +++ b/include/uapi/linux/vbox_err.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright (C) 2017 Oracle Corporation */ + +#ifndef __UAPI_VBOX_ERR_H__ +#define __UAPI_VBOX_ERR_H__ + +#define VINF_SUCCESS 0 +#define VERR_GENERAL_FAILURE (-1) +#define VERR_INVALID_PARAMETER (-2) +#define VERR_INVALID_MAGIC (-3) +#define VERR_INVALID_HANDLE (-4) +#define VERR_LOCK_FAILED (-5) +#define VERR_INVALID_POINTER (-6) +#define VERR_IDT_FAILED (-7) +#define VERR_NO_MEMORY (-8) +#define VERR_ALREADY_LOADED (-9) +#define VERR_PERMISSION_DENIED (-10) +#define VERR_VERSION_MISMATCH (-11) +#define VERR_NOT_IMPLEMENTED (-12) +#define VERR_INVALID_FLAGS (-13) + +#define VERR_NOT_EQUAL (-18) +#define VERR_NOT_SYMLINK (-19) +#define VERR_NO_TMP_MEMORY (-20) +#define VERR_INVALID_FMODE (-21) +#define VERR_WRONG_ORDER (-22) +#define VERR_NO_TLS_FOR_SELF (-23) +#define VERR_FAILED_TO_SET_SELF_TLS (-24) +#define VERR_NO_CONT_MEMORY (-26) +#define VERR_NO_PAGE_MEMORY (-27) +#define VERR_THREAD_IS_DEAD (-29) +#define VERR_THREAD_NOT_WAITABLE (-30) +#define VERR_PAGE_TABLE_NOT_PRESENT (-31) +#define VERR_INVALID_CONTEXT (-32) +#define VERR_TIMER_BUSY (-33) +#define VERR_ADDRESS_CONFLICT (-34) +#define VERR_UNRESOLVED_ERROR (-35) +#define VERR_INVALID_FUNCTION (-36) +#define VERR_NOT_SUPPORTED (-37) +#define VERR_ACCESS_DENIED (-38) +#define VERR_INTERRUPTED (-39) +#define VERR_TIMEOUT (-40) +#define VERR_BUFFER_OVERFLOW (-41) +#define VERR_TOO_MUCH_DATA (-42) +#define VERR_MAX_THRDS_REACHED (-43) +#define VERR_MAX_PROCS_REACHED (-44) +#define VERR_SIGNAL_REFUSED (-45) +#define VERR_SIGNAL_PENDING (-46) +#define VERR_SIGNAL_INVALID (-47) +#define VERR_STATE_CHANGED (-48) +#define VERR_INVALID_UUID_FORMAT (-49) +#define VERR_PROCESS_NOT_FOUND (-50) +#define VERR_PROCESS_RUNNING (-51) +#define VERR_TRY_AGAIN (-52) +#define VERR_PARSE_ERROR (-53) +#define VERR_OUT_OF_RANGE (-54) +#define VERR_NUMBER_TOO_BIG (-55) +#define VERR_NO_DIGITS (-56) +#define VERR_NEGATIVE_UNSIGNED (-57) +#define VERR_NO_TRANSLATION (-58) + +#define VERR_NOT_FOUND (-78) +#define VERR_INVALID_STATE (-79) +#define VERR_OUT_OF_RESOURCES (-80) + +#define VERR_FILE_NOT_FOUND (-102) +#define VERR_PATH_NOT_FOUND (-103) +#define VERR_INVALID_NAME (-104) +#define VERR_ALREADY_EXISTS (-105) +#define VERR_TOO_MANY_OPEN_FILES (-106) +#define VERR_SEEK (-107) +#define VERR_NEGATIVE_SEEK (-108) +#define VERR_SEEK_ON_DEVICE (-109) +#define VERR_EOF (-110) +#define VERR_READ_ERROR (-111) +#define VERR_WRITE_ERROR (-112) +#define VERR_WRITE_PROTECT (-113) +#define VERR_SHARING_VIOLATION (-114) +#define VERR_FILE_LOCK_FAILED (-115) +#define VERR_FILE_LOCK_VIOLATION (-116) +#define VERR_CANT_CREATE (-117) +#define VERR_CANT_DELETE_DIRECTORY (-118) +#define VERR_NOT_SAME_DEVICE (-119) +#define VERR_FILENAME_TOO_LONG (-120) +#define VERR_MEDIA_NOT_PRESENT (-121) +#define VERR_MEDIA_NOT_RECOGNIZED (-122) +#define VERR_FILE_NOT_LOCKED (-123) +#define VERR_FILE_LOCK_LOST (-124) +#define VERR_DIR_NOT_EMPTY (-125) +#define VERR_NOT_A_DIRECTORY (-126) +#define VERR_IS_A_DIRECTORY (-127) +#define VERR_FILE_TOO_BIG (-128) + +#define VERR_NET_IO_ERROR (-400) +#define VERR_NET_OUT_OF_RESOURCES (-401) +#define VERR_NET_HOST_NOT_FOUND (-402) +#define VERR_NET_PATH_NOT_FOUND (-403) +#define VERR_NET_PRINT_ERROR (-404) +#define VERR_NET_NO_NETWORK (-405) +#define VERR_NET_NOT_UNIQUE_NAME (-406) + +#define VERR_NET_IN_PROGRESS (-436) +#define VERR_NET_ALREADY_IN_PROGRESS (-437) +#define VERR_NET_NOT_SOCKET (-438) +#define VERR_NET_DEST_ADDRESS_REQUIRED (-439) +#define VERR_NET_MSG_SIZE (-440) +#define VERR_NET_PROTOCOL_TYPE (-441) +#define VERR_NET_PROTOCOL_NOT_AVAILABLE (-442) +#define VERR_NET_PROTOCOL_NOT_SUPPORTED (-443) +#define VERR_NET_SOCKET_TYPE_NOT_SUPPORTED (-444) +#define VERR_NET_OPERATION_NOT_SUPPORTED (-445) +#define VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED (-446) +#define VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED (-447) +#define VERR_NET_ADDRESS_IN_USE (-448) +#define VERR_NET_ADDRESS_NOT_AVAILABLE (-449) +#define VERR_NET_DOWN (-450) +#define VERR_NET_UNREACHABLE (-451) +#define VERR_NET_CONNECTION_RESET (-452) +#define VERR_NET_CONNECTION_ABORTED (-453) +#define VERR_NET_CONNECTION_RESET_BY_PEER (-454) +#define VERR_NET_NO_BUFFER_SPACE (-455) +#define VERR_NET_ALREADY_CONNECTED (-456) +#define VERR_NET_NOT_CONNECTED (-457) +#define VERR_NET_SHUTDOWN (-458) +#define VERR_NET_TOO_MANY_REFERENCES (-459) +#define VERR_NET_CONNECTION_TIMED_OUT (-460) +#define VERR_NET_CONNECTION_REFUSED (-461) +#define VERR_NET_HOST_DOWN (-464) +#define VERR_NET_HOST_UNREACHABLE (-465) +#define VERR_NET_PROTOCOL_ERROR (-466) +#define VERR_NET_INCOMPLETE_TX_PACKET (-467) + +/* misc. unsorted codes */ +#define VERR_RESOURCE_BUSY (-138) +#define VERR_DISK_FULL (-152) +#define VERR_TOO_MANY_SYMLINKS (-156) +#define VERR_NO_MORE_FILES (-201) +#define VERR_INTERNAL_ERROR (-225) +#define VERR_INTERNAL_ERROR_2 (-226) +#define VERR_INTERNAL_ERROR_3 (-227) +#define VERR_INTERNAL_ERROR_4 (-228) +#define VERR_DEV_IO_ERROR (-250) +#define VERR_IO_BAD_LENGTH (-255) +#define VERR_BROKEN_PIPE (-301) +#define VERR_NO_DATA (-304) +#define VERR_SEM_DESTROYED (-363) +#define VERR_DEADLOCK (-365) +#define VERR_BAD_EXE_FORMAT (-608) +#define VINF_HGCM_ASYNC_EXECUTE (2903) + +#endif diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h new file mode 100644 index 000000000000..0e68024f36c7 --- /dev/null +++ b/include/uapi/linux/vbox_vmmdev_types.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* + * Virtual Device for Guest <-> VMM/Host communication, type definitions + * which are also used for the vboxguest ioctl interface / by vboxsf + * + * Copyright (C) 2006-2016 Oracle Corporation + */ + +#ifndef __UAPI_VBOX_VMMDEV_TYPES_H__ +#define __UAPI_VBOX_VMMDEV_TYPES_H__ + +#include <asm/bitsperlong.h> +#include <linux/types.h> + +/* + * We cannot use linux' compiletime_assert here because it expects to be used + * inside a function only. Use a typedef to a char array with a negative size. + */ +#define VMMDEV_ASSERT_SIZE(type, size) \ + typedef char type ## _asrt_size[1 - 2*!!(sizeof(struct type) != (size))] + +/** enum vmmdev_request_type - VMMDev request types. */ +enum vmmdev_request_type { + VMMDEVREQ_INVALID_REQUEST = 0, + VMMDEVREQ_GET_MOUSE_STATUS = 1, + VMMDEVREQ_SET_MOUSE_STATUS = 2, + VMMDEVREQ_SET_POINTER_SHAPE = 3, + VMMDEVREQ_GET_HOST_VERSION = 4, + VMMDEVREQ_IDLE = 5, + VMMDEVREQ_GET_HOST_TIME = 10, + VMMDEVREQ_GET_HYPERVISOR_INFO = 20, + VMMDEVREQ_SET_HYPERVISOR_INFO = 21, + VMMDEVREQ_REGISTER_PATCH_MEMORY = 22, /* since version 3.0.6 */ + VMMDEVREQ_DEREGISTER_PATCH_MEMORY = 23, /* since version 3.0.6 */ + VMMDEVREQ_SET_POWER_STATUS = 30, + VMMDEVREQ_ACKNOWLEDGE_EVENTS = 41, + VMMDEVREQ_CTL_GUEST_FILTER_MASK = 42, + VMMDEVREQ_REPORT_GUEST_INFO = 50, + VMMDEVREQ_REPORT_GUEST_INFO2 = 58, /* since version 3.2.0 */ + VMMDEVREQ_REPORT_GUEST_STATUS = 59, /* since version 3.2.8 */ + VMMDEVREQ_REPORT_GUEST_USER_STATE = 74, /* since version 4.3 */ + /* Retrieve a display resize request sent by the host, deprecated. */ + VMMDEVREQ_GET_DISPLAY_CHANGE_REQ = 51, + VMMDEVREQ_VIDEMODE_SUPPORTED = 52, + VMMDEVREQ_GET_HEIGHT_REDUCTION = 53, + /** + * @VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2: + * Retrieve a display resize request sent by the host. + * + * Queries a display resize request sent from the host. If the + * event_ack member is sent to true and there is an unqueried request + * available for one of the virtual display then that request will + * be returned. If several displays have unqueried requests the lowest + * numbered display will be chosen first. Only the most recent unseen + * request for each display is remembered. + * If event_ack is set to false, the last host request queried with + * event_ack set is resent, or failing that the most recent received + * from the host. If no host request was ever received then all zeros + * are returned. + */ + VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2 = 54, + VMMDEVREQ_REPORT_GUEST_CAPABILITIES = 55, + VMMDEVREQ_SET_GUEST_CAPABILITIES = 56, + VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */ + VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */ + VMMDEVREQ_HGCM_CONNECT = 60, + VMMDEVREQ_HGCM_DISCONNECT = 61, + VMMDEVREQ_HGCM_CALL32 = 62, + VMMDEVREQ_HGCM_CALL64 = 63, + VMMDEVREQ_HGCM_CANCEL = 64, + VMMDEVREQ_HGCM_CANCEL2 = 65, + VMMDEVREQ_VIDEO_ACCEL_ENABLE = 70, + VMMDEVREQ_VIDEO_ACCEL_FLUSH = 71, + VMMDEVREQ_VIDEO_SET_VISIBLE_REGION = 72, + VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ = 73, + VMMDEVREQ_QUERY_CREDENTIALS = 100, + VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT = 101, + VMMDEVREQ_REPORT_GUEST_STATS = 110, + VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ = 111, + VMMDEVREQ_GET_STATISTICS_CHANGE_REQ = 112, + VMMDEVREQ_CHANGE_MEMBALLOON = 113, + VMMDEVREQ_GET_VRDPCHANGE_REQ = 150, + VMMDEVREQ_LOG_STRING = 200, + VMMDEVREQ_GET_CPU_HOTPLUG_REQ = 210, + VMMDEVREQ_SET_CPU_HOTPLUG_STATUS = 211, + VMMDEVREQ_REGISTER_SHARED_MODULE = 212, + VMMDEVREQ_UNREGISTER_SHARED_MODULE = 213, + VMMDEVREQ_CHECK_SHARED_MODULES = 214, + VMMDEVREQ_GET_PAGE_SHARING_STATUS = 215, + VMMDEVREQ_DEBUG_IS_PAGE_SHARED = 216, + VMMDEVREQ_GET_SESSION_ID = 217, /* since version 3.2.8 */ + VMMDEVREQ_WRITE_COREDUMP = 218, + VMMDEVREQ_GUEST_HEARTBEAT = 219, + VMMDEVREQ_HEARTBEAT_CONFIGURE = 220, + /* Ensure the enum is a 32 bit data-type */ + VMMDEVREQ_SIZEHACK = 0x7fffffff +}; + +#if __BITS_PER_LONG == 64 +#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL64 +#else +#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32 +#endif + +/** HGCM service location types. */ +enum vmmdev_hgcm_service_location_type { + VMMDEV_HGCM_LOC_INVALID = 0, + VMMDEV_HGCM_LOC_LOCALHOST = 1, + VMMDEV_HGCM_LOC_LOCALHOST_EXISTING = 2, + /* Ensure the enum is a 32 bit data-type */ + VMMDEV_HGCM_LOC_SIZEHACK = 0x7fffffff +}; + +/** HGCM host service location. */ +struct vmmdev_hgcm_service_location_localhost { + /** Service name */ + char service_name[128]; +}; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_service_location_localhost, 128); + +/** HGCM service location. */ +struct vmmdev_hgcm_service_location { + /** Type of the location. */ + enum vmmdev_hgcm_service_location_type type; + + union { + struct vmmdev_hgcm_service_location_localhost localhost; + } u; +}; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_service_location, 128 + 4); + +/** HGCM function parameter type. */ +enum vmmdev_hgcm_function_parameter_type { + VMMDEV_HGCM_PARM_TYPE_INVALID = 0, + VMMDEV_HGCM_PARM_TYPE_32BIT = 1, + VMMDEV_HGCM_PARM_TYPE_64BIT = 2, + /** Deprecated Doesn't work, use PAGELIST. */ + VMMDEV_HGCM_PARM_TYPE_PHYSADDR = 3, + /** In and Out, user-memory */ + VMMDEV_HGCM_PARM_TYPE_LINADDR = 4, + /** In, user-memory (read; host<-guest) */ + VMMDEV_HGCM_PARM_TYPE_LINADDR_IN = 5, + /** Out, user-memory (write; host->guest) */ + VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT = 6, + /** In and Out, kernel-memory */ + VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL = 7, + /** In, kernel-memory (read; host<-guest) */ + VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN = 8, + /** Out, kernel-memory (write; host->guest) */ + VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT = 9, + /** Physical addresses of locked pages for a buffer. */ + VMMDEV_HGCM_PARM_TYPE_PAGELIST = 10, + /* Ensure the enum is a 32 bit data-type */ + VMMDEV_HGCM_PARM_TYPE_SIZEHACK = 0x7fffffff +}; + +/** HGCM function parameter, 32-bit client. */ +struct vmmdev_hgcm_function_parameter32 { + enum vmmdev_hgcm_function_parameter_type type; + union { + __u32 value32; + __u64 value64; + struct { + __u32 size; + union { + __u32 phys_addr; + __u32 linear_addr; + } u; + } pointer; + struct { + /** Size of the buffer described by the page list. */ + __u32 size; + /** Relative to the request header. */ + __u32 offset; + } page_list; + } u; +} __packed; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter32, 4 + 8); + +/** HGCM function parameter, 64-bit client. */ +struct vmmdev_hgcm_function_parameter64 { + enum vmmdev_hgcm_function_parameter_type type; + union { + __u32 value32; + __u64 value64; + struct { + __u32 size; + union { + __u64 phys_addr; + __u64 linear_addr; + } u; + } __packed pointer; + struct { + /** Size of the buffer described by the page list. */ + __u32 size; + /** Relative to the request header. */ + __u32 offset; + } page_list; + } __packed u; +} __packed; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_function_parameter64, 4 + 12); + +#if __BITS_PER_LONG == 64 +#define vmmdev_hgcm_function_parameter vmmdev_hgcm_function_parameter64 +#else +#define vmmdev_hgcm_function_parameter vmmdev_hgcm_function_parameter32 +#endif + +#define VMMDEV_HGCM_F_PARM_DIRECTION_NONE 0x00000000U +#define VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST 0x00000001U +#define VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST 0x00000002U +#define VMMDEV_HGCM_F_PARM_DIRECTION_BOTH 0x00000003U + +/** + * struct vmmdev_hgcm_pagelist - VMMDEV_HGCM_PARM_TYPE_PAGELIST parameters + * point to this structure to actually describe the buffer. + */ +struct vmmdev_hgcm_pagelist { + __u32 flags; /** VMMDEV_HGCM_F_PARM_*. */ + __u16 offset_first_page; /** Data offset in the first page. */ + __u16 page_count; /** Number of pages. */ + __u64 pages[1]; /** Page addresses. */ +}; +VMMDEV_ASSERT_SIZE(vmmdev_hgcm_pagelist, 4 + 2 + 2 + 8); + +#endif diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h new file mode 100644 index 000000000000..612f0c7d3558 --- /dev/null +++ b/include/uapi/linux/vboxguest.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* + * VBoxGuest - VirtualBox Guest Additions Driver Interface. + * + * Copyright (C) 2006-2016 Oracle Corporation + */ + +#ifndef __UAPI_VBOXGUEST_H__ +#define __UAPI_VBOXGUEST_H__ + +#include <asm/bitsperlong.h> +#include <linux/ioctl.h> +#include <linux/vbox_err.h> +#include <linux/vbox_vmmdev_types.h> + +/* Version of vbg_ioctl_hdr structure. */ +#define VBG_IOCTL_HDR_VERSION 0x10001 +/* Default request type. Use this for non-VMMDev requests. */ +#define VBG_IOCTL_HDR_TYPE_DEFAULT 0 + +/** + * Common ioctl header. + * + * This is a mirror of vmmdev_request_header to prevent duplicating data and + * needing to verify things multiple times. + */ +struct vbg_ioctl_hdr { + /** IN: The request input size, and output size if size_out is zero. */ + __u32 size_in; + /** IN: Structure version (VBG_IOCTL_HDR_VERSION) */ + __u32 version; + /** IN: The VMMDev request type or VBG_IOCTL_HDR_TYPE_DEFAULT. */ + __u32 type; + /** + * OUT: The VBox status code of the operation, out direction only. + * This is a VINF_ or VERR_ value as defined in vbox_err.h. + */ + __s32 rc; + /** IN: Output size. Set to zero to use size_in as output size. */ + __u32 size_out; + /** Reserved, MBZ. */ + __u32 reserved; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_hdr, 24); + + +/* + * The VBoxGuest I/O control version. + * + * As usual, the high word contains the major version and changes to it + * signifies incompatible changes. + * + * The lower word is the minor version number, it is increased when new + * functions are added or existing changed in a backwards compatible manner. + */ +#define VBG_IOC_VERSION 0x00010000u + +/** + * VBG_IOCTL_DRIVER_VERSION_INFO data structure + * + * Note VBG_IOCTL_DRIVER_VERSION_INFO may switch the session to a backwards + * compatible interface version if uClientVersion indicates older client code. + */ +struct vbg_ioctl_driver_version_info { + /** The header. */ + struct vbg_ioctl_hdr hdr; + union { + struct { + /** Requested interface version (VBG_IOC_VERSION). */ + __u32 req_version; + /** + * Minimum interface version number (typically the + * major version part of VBG_IOC_VERSION). + */ + __u32 min_version; + /** Reserved, MBZ. */ + __u32 reserved1; + /** Reserved, MBZ. */ + __u32 reserved2; + } in; + struct { + /** Version for this session (typ. VBG_IOC_VERSION). */ + __u32 session_version; + /** Version of the IDC interface (VBG_IOC_VERSION). */ + __u32 driver_version; + /** The SVN revision of the driver, or 0. */ + __u32 driver_revision; + /** Reserved \#1 (zero until defined). */ + __u32 reserved1; + /** Reserved \#2 (zero until defined). */ + __u32 reserved2; + } out; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20); + +#define VBG_IOCTL_DRIVER_VERSION_INFO \ + _IOWR('V', 0, struct vbg_ioctl_driver_version_info) + + +/* IOCTL to perform a VMM Device request less than 1KB in size. */ +#define VBG_IOCTL_VMMDEV_REQUEST(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 2, s) + + +/* IOCTL to perform a VMM Device request larger then 1KB. */ +#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) + + +/** VBG_IOCTL_HGCM_CONNECT data structure. */ +struct vbg_ioctl_hgcm_connect { + struct vbg_ioctl_hdr hdr; + union { + struct { + struct vmmdev_hgcm_service_location loc; + } in; + struct { + __u32 client_id; + } out; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_connect, 24 + 132); + +#define VBG_IOCTL_HGCM_CONNECT \ + _IOWR('V', 4, struct vbg_ioctl_hgcm_connect) + + +/** VBG_IOCTL_HGCM_DISCONNECT data structure. */ +struct vbg_ioctl_hgcm_disconnect { + struct vbg_ioctl_hdr hdr; + union { + struct { + __u32 client_id; + } in; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_disconnect, 24 + 4); + +#define VBG_IOCTL_HGCM_DISCONNECT \ + _IOWR('V', 5, struct vbg_ioctl_hgcm_disconnect) + + +/** VBG_IOCTL_HGCM_CALL data structure. */ +struct vbg_ioctl_hgcm_call { + /** The header. */ + struct vbg_ioctl_hdr hdr; + /** Input: The id of the caller. */ + __u32 client_id; + /** Input: Function number. */ + __u32 function; + /** + * Input: How long to wait (milliseconds) for completion before + * cancelling the call. Set to -1 to wait indefinitely. + */ + __u32 timeout_ms; + /** Interruptable flag, ignored for userspace calls. */ + __u8 interruptible; + /** Explicit padding, MBZ. */ + __u8 reserved; + /** + * Input: How many parameters following this structure. + * + * The parameters are either HGCMFunctionParameter64 or 32, + * depending on whether we're receiving a 64-bit or 32-bit request. + * + * The current maximum is 61 parameters (given a 1KB max request size, + * and a 64-bit parameter size of 16 bytes). + */ + __u16 parm_count; + /* + * Parameters follow in form: + * struct hgcm_function_parameter<32|64> parms[parm_count] + */ +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_hgcm_call, 24 + 16); + +#define VBG_IOCTL_HGCM_CALL_32(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 6, s) +#define VBG_IOCTL_HGCM_CALL_64(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 7, s) +#if __BITS_PER_LONG == 64 +#define VBG_IOCTL_HGCM_CALL(s) VBG_IOCTL_HGCM_CALL_64(s) +#else +#define VBG_IOCTL_HGCM_CALL(s) VBG_IOCTL_HGCM_CALL_32(s) +#endif + + +/** VBG_IOCTL_LOG data structure. */ +struct vbg_ioctl_log { + /** The header. */ + struct vbg_ioctl_hdr hdr; + union { + struct { + /** + * The log message, this may be zero terminated. If it + * is not zero terminated then the length is determined + * from the input size. + */ + char msg[1]; + } in; + } u; +}; + +#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) + + +/** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */ +struct vbg_ioctl_wait_for_events { + /** The header. */ + struct vbg_ioctl_hdr hdr; + union { + struct { + /** Timeout in milliseconds. */ + __u32 timeout_ms; + /** Events to wait for. */ + __u32 events; + } in; + struct { + /** Events that occurred. */ + __u32 events; + } out; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_wait_for_events, 24 + 8); + +#define VBG_IOCTL_WAIT_FOR_EVENTS \ + _IOWR('V', 10, struct vbg_ioctl_wait_for_events) + + +/* + * IOCTL to VBoxGuest to interrupt (cancel) any pending + * VBG_IOCTL_WAIT_FOR_EVENTS and return. + * + * Handled inside the vboxguest driver and not seen by the host at all. + * After calling this, VBG_IOCTL_WAIT_FOR_EVENTS should no longer be called in + * the same session. Any VBOXGUEST_IOCTL_WAITEVENT calls in the same session + * done after calling this will directly exit with -EINTR. + */ +#define VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS \ + _IOWR('V', 11, struct vbg_ioctl_hdr) + + +/** VBG_IOCTL_CHANGE_FILTER_MASK data structure. */ +struct vbg_ioctl_change_filter { + /** The header. */ + struct vbg_ioctl_hdr hdr; + union { + struct { + /** Flags to set. */ + __u32 or_mask; + /** Flags to remove. */ + __u32 not_mask; + } in; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8); + +/* IOCTL to VBoxGuest to control the event filter mask. */ +#define VBG_IOCTL_CHANGE_FILTER_MASK \ + _IOWR('V', 12, struct vbg_ioctl_change_filter) + + +/** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */ +struct vbg_ioctl_set_guest_caps { + /** The header. */ + struct vbg_ioctl_hdr hdr; + union { + struct { + /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */ + __u32 or_mask; + /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */ + __u32 not_mask; + } in; + struct { + /** Capabilities held by the session after the call. */ + __u32 session_caps; + /** Capabilities for all the sessions after the call. */ + __u32 global_caps; + } out; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_set_guest_caps, 24 + 8); + +#define VBG_IOCTL_CHANGE_GUEST_CAPABILITIES \ + _IOWR('V', 14, struct vbg_ioctl_set_guest_caps) + + +/** VBG_IOCTL_CHECK_BALLOON data structure. */ +struct vbg_ioctl_check_balloon { + /** The header. */ + struct vbg_ioctl_hdr hdr; + union { + struct { + /** The size of the balloon in chunks of 1MB. */ + __u32 balloon_chunks; + /** + * false = handled in R0, no further action required. + * true = allocate balloon memory in R3. + */ + __u8 handle_in_r3; + /** Explicit padding, MBZ. */ + __u8 padding[3]; + } out; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_check_balloon, 24 + 8); + +/* + * IOCTL to check memory ballooning. + * + * The guest kernel module will ask the host for the current size of the + * balloon and adjust the size. Or it will set handle_in_r3 = true and R3 is + * responsible for allocating memory and calling VBG_IOCTL_CHANGE_BALLOON. + */ +#define VBG_IOCTL_CHECK_BALLOON \ + _IOWR('V', 17, struct vbg_ioctl_check_balloon) + + +/** VBG_IOCTL_WRITE_CORE_DUMP data structure. */ +struct vbg_ioctl_write_coredump { + struct vbg_ioctl_hdr hdr; + union { + struct { + __u32 flags; /** Flags (reserved, MBZ). */ + } in; + } u; +}; +VMMDEV_ASSERT_SIZE(vbg_ioctl_write_coredump, 24 + 4); + +#define VBG_IOCTL_WRITE_CORE_DUMP \ + _IOWR('V', 19, struct vbg_ioctl_write_coredump) + +#endif diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index e3301dbd27d4..c74372163ed2 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -301,6 +301,16 @@ struct vfio_region_info_cap_type { #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2) #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) +/* + * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped + * which allows direct access to non-MSIX registers which happened to be within + * the same system page. + * + * Even though the userspace gets direct access to the MSIX data, the existing + * VFIO_DEVICE_SET_IRQS interface must still be used for MSIX configuration. + */ +#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3 + /** * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9, * struct vfio_irq_info) @@ -503,6 +513,68 @@ struct vfio_pci_hot_reset { #define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13) +/** + * VFIO_DEVICE_QUERY_GFX_PLANE - _IOW(VFIO_TYPE, VFIO_BASE + 14, + * struct vfio_device_query_gfx_plane) + * + * Set the drm_plane_type and flags, then retrieve the gfx plane info. + * + * flags supported: + * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_DMABUF are set + * to ask if the mdev supports dma-buf. 0 on support, -EINVAL on no + * support for dma-buf. + * - VFIO_GFX_PLANE_TYPE_PROBE and VFIO_GFX_PLANE_TYPE_REGION are set + * to ask if the mdev supports region. 0 on support, -EINVAL on no + * support for region. + * - VFIO_GFX_PLANE_TYPE_DMABUF or VFIO_GFX_PLANE_TYPE_REGION is set + * with each call to query the plane info. + * - Others are invalid and return -EINVAL. + * + * Note: + * 1. Plane could be disabled by guest. In that case, success will be + * returned with zero-initialized drm_format, size, width and height + * fields. + * 2. x_hot/y_hot is set to 0xFFFFFFFF if no hotspot information available + * + * Return: 0 on success, -errno on other failure. + */ +struct vfio_device_gfx_plane_info { + __u32 argsz; + __u32 flags; +#define VFIO_GFX_PLANE_TYPE_PROBE (1 << 0) +#define VFIO_GFX_PLANE_TYPE_DMABUF (1 << 1) +#define VFIO_GFX_PLANE_TYPE_REGION (1 << 2) + /* in */ + __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */ + /* out */ + __u32 drm_format; /* drm format of plane */ + __u64 drm_format_mod; /* tiled mode */ + __u32 width; /* width of plane */ + __u32 height; /* height of plane */ + __u32 stride; /* stride of plane */ + __u32 size; /* size of plane in bytes, align on page*/ + __u32 x_pos; /* horizontal position of cursor plane */ + __u32 y_pos; /* vertical position of cursor plane*/ + __u32 x_hot; /* horizontal position of cursor hotspot */ + __u32 y_hot; /* vertical position of cursor hotspot */ + union { + __u32 region_index; /* region index */ + __u32 dmabuf_id; /* dma-buf id */ + }; +}; + +#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14) + +/** + * VFIO_DEVICE_GET_GFX_DMABUF - _IOW(VFIO_TYPE, VFIO_BASE + 15, __u32) + * + * Return a new dma-buf file descriptor for an exposed guest framebuffer + * described by the provided dmabuf_id. The dmabuf_id is returned from VFIO_ + * DEVICE_QUERY_GFX_PLANE as a token of the exposed guest framebuffer. + */ + +#define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15) + /* -------- API for Type1 VFIO IOMMU -------- */ /** diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 1c095b5a99c5..982718965180 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -107,14 +107,14 @@ enum v4l2_field { transmitted first */ }; #define V4L2_FIELD_HAS_TOP(field) \ - ((field) == V4L2_FIELD_TOP ||\ + ((field) == V4L2_FIELD_TOP ||\ (field) == V4L2_FIELD_INTERLACED ||\ (field) == V4L2_FIELD_INTERLACED_TB ||\ (field) == V4L2_FIELD_INTERLACED_BT ||\ (field) == V4L2_FIELD_SEQ_TB ||\ (field) == V4L2_FIELD_SEQ_BT) #define V4L2_FIELD_HAS_BOTTOM(field) \ - ((field) == V4L2_FIELD_BOTTOM ||\ + ((field) == V4L2_FIELD_BOTTOM ||\ (field) == V4L2_FIELD_INTERLACED ||\ (field) == V4L2_FIELD_INTERLACED_TB ||\ (field) == V4L2_FIELD_INTERLACED_BT ||\ @@ -467,12 +467,12 @@ struct v4l2_capability { * V I D E O I M A G E F O R M A T */ struct v4l2_pix_format { - __u32 width; + __u32 width; __u32 height; __u32 pixelformat; __u32 field; /* enum v4l2_field */ - __u32 bytesperline; /* for padding, zero if unused */ - __u32 sizeimage; + __u32 bytesperline; /* for padding, zero if unused */ + __u32 sizeimage; __u32 colorspace; /* enum v4l2_colorspace */ __u32 priv; /* private data, depends on pixelformat */ __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */ @@ -669,6 +669,12 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */ #define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */ +/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ +#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */ +#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */ +#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */ +#define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */ + /* SDR formats - used only for Software Defined Radio devices */ #define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */ #define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */ @@ -688,6 +694,7 @@ struct v4l2_pix_format { /* Meta-data formats */ #define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */ #define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */ +#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */ /* priv field value to indicates that subsequent fields are valid. */ #define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe @@ -1166,7 +1173,7 @@ typedef __u64 v4l2_std_id; V4L2_STD_NTSC_M_JP |\ V4L2_STD_NTSC_M_KR) /* Secam macros */ -#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\ +#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\ V4L2_STD_SECAM_K |\ V4L2_STD_SECAM_K1) /* All Secam Standards */ @@ -1247,7 +1254,7 @@ struct v4l2_standard { }; /* - * D V B T T I M I N G S + * D V B T T I M I N G S */ /** struct v4l2_bt_timings - BT.656/BT.1120 timing data @@ -1588,7 +1595,7 @@ struct v4l2_ext_controls { struct v4l2_ext_control *controls; }; -#define V4L2_CTRL_ID_MASK (0x0fffffff) +#define V4L2_CTRL_ID_MASK (0x0fffffff) #ifndef __KERNEL__ #define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL) #endif @@ -1660,11 +1667,11 @@ struct v4l2_querymenu { /* Control flags */ #define V4L2_CTRL_FLAG_DISABLED 0x0001 #define V4L2_CTRL_FLAG_GRABBED 0x0002 -#define V4L2_CTRL_FLAG_READ_ONLY 0x0004 -#define V4L2_CTRL_FLAG_UPDATE 0x0008 -#define V4L2_CTRL_FLAG_INACTIVE 0x0010 -#define V4L2_CTRL_FLAG_SLIDER 0x0020 -#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040 +#define V4L2_CTRL_FLAG_READ_ONLY 0x0004 +#define V4L2_CTRL_FLAG_UPDATE 0x0008 +#define V4L2_CTRL_FLAG_INACTIVE 0x0010 +#define V4L2_CTRL_FLAG_SLIDER 0x0020 +#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040 #define V4L2_CTRL_FLAG_VOLATILE 0x0080 #define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100 #define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200 @@ -1778,21 +1785,21 @@ struct v4l2_hw_freq_seek { */ struct v4l2_rds_data { - __u8 lsb; - __u8 msb; - __u8 block; + __u8 lsb; + __u8 msb; + __u8 block; } __attribute__ ((packed)); -#define V4L2_RDS_BLOCK_MSK 0x7 -#define V4L2_RDS_BLOCK_A 0 -#define V4L2_RDS_BLOCK_B 1 -#define V4L2_RDS_BLOCK_C 2 -#define V4L2_RDS_BLOCK_D 3 -#define V4L2_RDS_BLOCK_C_ALT 4 -#define V4L2_RDS_BLOCK_INVALID 7 +#define V4L2_RDS_BLOCK_MSK 0x7 +#define V4L2_RDS_BLOCK_A 0 +#define V4L2_RDS_BLOCK_B 1 +#define V4L2_RDS_BLOCK_C 2 +#define V4L2_RDS_BLOCK_D 3 +#define V4L2_RDS_BLOCK_C_ALT 4 +#define V4L2_RDS_BLOCK_INVALID 7 #define V4L2_RDS_BLOCK_CORRECTED 0x40 -#define V4L2_RDS_BLOCK_ERROR 0x80 +#define V4L2_RDS_BLOCK_ERROR 0x80 /* * A U D I O @@ -2348,8 +2355,8 @@ struct v4l2_create_buffers { #define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop) #define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression) #define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression) -#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) -#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) +#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id) +#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format) #define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio) #define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout) #define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */ @@ -2370,8 +2377,8 @@ struct v4l2_create_buffers { * Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined. * You must be root to use these ioctls. Never use these in applications! */ -#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register) -#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register) +#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register) +#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register) #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) #define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings) diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index fc353b518288..5de6ed37695b 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -57,6 +57,8 @@ * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ +#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */ + #ifndef VIRTIO_NET_NO_LEGACY #define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ #endif /* VIRTIO_NET_NO_LEGACY */ @@ -76,6 +78,17 @@ struct virtio_net_config { __u16 max_virtqueue_pairs; /* Default maximum transmit unit advice */ __u16 mtu; + /* + * speed, in units of 1Mb. All values 0 to INT_MAX are legal. + * Any other value stands for unknown. + */ + __u32 speed; + /* + * 0x00 - half duplex + * 0x01 - full duplex + * Any other value stands for unknown. + */ + __u8 duplex; } __attribute__((packed)); /* diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h index 49e8fd08855a..56376d3907d8 100644 --- a/include/uapi/misc/cxl.h +++ b/include/uapi/misc/cxl.h @@ -20,20 +20,22 @@ struct cxl_ioctl_start_work { __u64 work_element_descriptor; __u64 amr; __s16 num_interrupts; - __s16 reserved1; - __s32 reserved2; + __u16 tid; + __s32 reserved1; + __u64 reserved2; __u64 reserved3; __u64 reserved4; __u64 reserved5; - __u64 reserved6; }; #define CXL_START_WORK_AMR 0x0000000000000001ULL #define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL #define CXL_START_WORK_ERR_FF 0x0000000000000004ULL +#define CXL_START_WORK_TID 0x0000000000000008ULL #define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\ CXL_START_WORK_NUM_IRQS |\ - CXL_START_WORK_ERR_FF) + CXL_START_WORK_ERR_FF |\ + CXL_START_WORK_TID) /* Possible modes that an afu can be in */ diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h new file mode 100644 index 000000000000..4b0b0b756f3e --- /dev/null +++ b/include/uapi/misc/ocxl.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* Copyright 2017 IBM Corp. */ +#ifndef _UAPI_MISC_OCXL_H +#define _UAPI_MISC_OCXL_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +enum ocxl_event_type { + OCXL_AFU_EVENT_XSL_FAULT_ERROR = 0, +}; + +#define OCXL_KERNEL_EVENT_FLAG_LAST 0x0001 /* This is the last event pending */ + +struct ocxl_kernel_event_header { + __u16 type; + __u16 flags; + __u32 reserved; +}; + +struct ocxl_kernel_event_xsl_fault_error { + __u64 addr; + __u64 dsisr; + __u64 count; + __u64 reserved; +}; + +struct ocxl_ioctl_attach { + __u64 amr; + __u64 reserved1; + __u64 reserved2; + __u64 reserved3; +}; + +struct ocxl_ioctl_irq_fd { + __u64 irq_offset; + __s32 eventfd; + __u32 reserved; +}; + +/* ioctl numbers */ +#define OCXL_MAGIC 0xCA +/* AFU devices */ +#define OCXL_IOCTL_ATTACH _IOW(OCXL_MAGIC, 0x10, struct ocxl_ioctl_attach) +#define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64) +#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64) +#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd) + +#endif /* _UAPI_MISC_OCXL_H */ diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h index 398a514ee446..db54115be044 100644 --- a/include/uapi/rdma/bnxt_re-abi.h +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -82,6 +82,15 @@ struct bnxt_re_qp_resp { __u32 rsvd; }; +struct bnxt_re_srq_req { + __u64 srqva; + __u64 srq_handle; +}; + +struct bnxt_re_srq_resp { + __u32 srqid; +}; + enum bnxt_re_shpg_offt { BNXT_RE_BEG_RESV_OFFT = 0x00, BNXT_RE_AVID_OFFT = 0x10, diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 7e11bb8651b6..04d0e67b1312 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -402,13 +402,18 @@ struct ib_uverbs_create_cq { __u64 driver_data[0]; }; +enum ib_uverbs_ex_create_cq_flags { + IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, + IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, +}; + struct ib_uverbs_ex_create_cq { __u64 user_handle; __u32 cqe; __u32 comp_vector; __s32 comp_channel; __u32 comp_mask; - __u32 flags; + __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */ __u32 reserved; }; @@ -449,7 +454,7 @@ struct ib_uverbs_wc { __u32 vendor_err; __u32 byte_len; union { - __u32 imm_data; + __be32 imm_data; __u32 invalidate_rkey; } ex; __u32 qp_num; @@ -765,7 +770,7 @@ struct ib_uverbs_send_wr { __u32 opcode; __u32 send_flags; union { - __u32 imm_data; + __be32 imm_data; __u32 invalidate_rkey; } ex; union { diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h index 224b52b6279c..7f9c37346613 100644 --- a/include/uapi/rdma/mlx4-abi.h +++ b/include/uapi/rdma/mlx4-abi.h @@ -97,8 +97,8 @@ struct mlx4_ib_create_srq_resp { }; struct mlx4_ib_create_qp_rss { - __u64 rx_hash_fields_mask; - __u8 rx_hash_function; + __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */ + __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */ __u8 reserved[7]; __u8 rx_hash_key[40]; __u32 comp_mask; @@ -152,7 +152,8 @@ enum mlx4_ib_rx_hash_fields { MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4, MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5, MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6, - MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7 + MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7, + MLX4_IB_RX_HASH_INNER = 1ULL << 31, }; #endif /* MLX4_ABI_USER_H */ diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index a33e0517d3fd..1111aa4e7c1e 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -41,6 +41,9 @@ enum { MLX5_QP_FLAG_SIGNATURE = 1 << 0, MLX5_QP_FLAG_SCATTER_CQE = 1 << 1, MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2, + MLX5_QP_FLAG_BFREG_INDEX = 1 << 3, + MLX5_QP_FLAG_TYPE_DCT = 1 << 4, + MLX5_QP_FLAG_TYPE_DCI = 1 << 5, }; enum { @@ -121,10 +124,12 @@ struct mlx5_ib_alloc_ucontext_resp { __u8 cqe_version; __u8 cmds_supp_uhw; __u8 eth_min_inline; - __u8 reserved2; + __u8 clock_info_versions; __u64 hca_core_clock_offset; __u32 log_uar_size; __u32 num_uars_per_page; + __u32 num_dyn_bfregs; + __u32 reserved3; }; struct mlx5_ib_alloc_pd_resp { @@ -280,8 +285,11 @@ struct mlx5_ib_create_qp { __u32 rq_wqe_shift; __u32 flags; __u32 uidx; - __u32 reserved0; - __u64 sq_buf_addr; + __u32 bfreg_index; + union { + __u64 sq_buf_addr; + __u64 access_key; + }; }; /* RX Hash function flags */ @@ -307,7 +315,7 @@ enum mlx5_rx_hash_fields { MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6, MLX5_RX_HASH_DST_PORT_UDP = 1 << 7, /* Save bits for future fields */ - MLX5_RX_HASH_INNER = 1 << 31 + MLX5_RX_HASH_INNER = (1UL << 31), }; struct mlx5_ib_create_qp_rss { @@ -354,6 +362,11 @@ struct mlx5_ib_create_ah_resp { __u8 reserved[6]; }; +struct mlx5_ib_modify_qp_resp { + __u32 response_length; + __u32 dctn; +}; + struct mlx5_ib_create_wq_resp { __u32 response_length; __u32 reserved; @@ -368,4 +381,36 @@ struct mlx5_ib_modify_wq { __u32 comp_mask; __u32 reserved; }; + +struct mlx5_ib_clock_info { + __u32 sign; + __u32 resv; + __u64 nsec; + __u64 cycles; + __u64 frac; + __u32 mult; + __u32 shift; + __u64 mask; + __u64 overflow_period; +}; + +enum mlx5_ib_mmap_cmd { + MLX5_IB_MMAP_REGULAR_PAGE = 0, + MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1, + MLX5_IB_MMAP_WC_PAGE = 2, + MLX5_IB_MMAP_NC_PAGE = 3, + /* 5 is chosen in order to be compatible with old versions of libmlx5 */ + MLX5_IB_MMAP_CORE_CLOCK = 5, + MLX5_IB_MMAP_ALLOC_WC = 6, + MLX5_IB_MMAP_CLOCK_INFO = 7, +}; + +enum { + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1, +}; + +/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */ +enum { + MLX5_IB_CLOCK_INFO_V1 = 0, +}; #endif /* MLX5_ABI_USER_H */ diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index cc002e316d09..4c77e2a7b07e 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -227,14 +227,16 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_UNSPEC, RDMA_NLDEV_CMD_GET, /* can dump */ - RDMA_NLDEV_CMD_SET, - RDMA_NLDEV_CMD_NEW, - RDMA_NLDEV_CMD_DEL, - RDMA_NLDEV_CMD_PORT_GET, /* can dump */ - RDMA_NLDEV_CMD_PORT_SET, - RDMA_NLDEV_CMD_PORT_NEW, - RDMA_NLDEV_CMD_PORT_DEL, + /* 2 - 4 are free to use */ + + RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */ + + /* 6 - 8 are free to use */ + + RDMA_NLDEV_CMD_RES_GET = 9, /* can dump */ + + RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */ RDMA_NLDEV_NUM_OPS }; @@ -303,6 +305,51 @@ enum rdma_nldev_attr { RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */ + RDMA_NLDEV_ATTR_RES_SUMMARY, /* nested table */ + RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, /* nested table */ + RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, /* string */ + RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, /* u64 */ + + RDMA_NLDEV_ATTR_RES_QP, /* nested table */ + RDMA_NLDEV_ATTR_RES_QP_ENTRY, /* nested table */ + /* + * Local QPN + */ + RDMA_NLDEV_ATTR_RES_LQPN, /* u32 */ + /* + * Remote QPN, + * Applicable for RC and UC only IBTA 11.2.5.3 QUERY QUEUE PAIR + */ + RDMA_NLDEV_ATTR_RES_RQPN, /* u32 */ + /* + * Receive Queue PSN, + * Applicable for RC and UC only 11.2.5.3 QUERY QUEUE PAIR + */ + RDMA_NLDEV_ATTR_RES_RQ_PSN, /* u32 */ + /* + * Send Queue PSN + */ + RDMA_NLDEV_ATTR_RES_SQ_PSN, /* u32 */ + RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, /* u8 */ + /* + * QP types as visible to RDMA/core, the reserved QPT + * are not exported through this interface. + */ + RDMA_NLDEV_ATTR_RES_TYPE, /* u8 */ + RDMA_NLDEV_ATTR_RES_STATE, /* u8 */ + /* + * Process ID which created object, + * in case of kernel origin, PID won't exist. + */ + RDMA_NLDEV_ATTR_RES_PID, /* u32 */ + /* + * The name of process created following resource. + * It will exist only for kernel objects. + * For user created objects, the user is supposed + * to read /proc/PID/comm file. + */ + RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */ + RDMA_NLDEV_ATTR_MAX }; #endif /* _UAPI_RDMA_NETLINK_H */ diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index aaa352f2f110..02ca0d0f1eb7 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h @@ -52,12 +52,14 @@ #define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */ #define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */ #define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */ -#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */ -#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */ +#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */ +#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */ #define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */ -#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */ -#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */ -#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */ +#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */ +#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */ +#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */ +#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */ +#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */ enum pvrdma_wr_opcode { PVRDMA_WR_RDMA_WRITE, diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h deleted file mode 100644 index 78957c9626f5..000000000000 --- a/include/video/exynos5433_decon.h +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (C) 2014 Samsung Electronics Co.Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundationr - */ - -#ifndef EXYNOS_REGS_DECON_H -#define EXYNOS_REGS_DECON_H - -/* Exynos543X DECON */ -#define DECON_VIDCON0 0x0000 -#define DECON_VIDOUTCON0 0x0010 -#define DECON_WINCONx(n) (0x0020 + ((n) * 4)) -#define DECON_VIDOSDxH(n) (0x0080 + ((n) * 4)) -#define DECON_SHADOWCON 0x00A0 -#define DECON_VIDOSDxA(n) (0x00B0 + ((n) * 0x20)) -#define DECON_VIDOSDxB(n) (0x00B4 + ((n) * 0x20)) -#define DECON_VIDOSDxC(n) (0x00B8 + ((n) * 0x20)) -#define DECON_VIDOSDxD(n) (0x00BC + ((n) * 0x20)) -#define DECON_VIDOSDxE(n) (0x00C0 + ((n) * 0x20)) -#define DECON_VIDW0xADD0B0(n) (0x0150 + ((n) * 0x10)) -#define DECON_VIDW0xADD0B1(n) (0x0154 + ((n) * 0x10)) -#define DECON_VIDW0xADD0B2(n) (0x0158 + ((n) * 0x10)) -#define DECON_VIDW0xADD1B0(n) (0x01A0 + ((n) * 0x10)) -#define DECON_VIDW0xADD1B1(n) (0x01A4 + ((n) * 0x10)) -#define DECON_VIDW0xADD1B2(n) (0x01A8 + ((n) * 0x10)) -#define DECON_VIDW0xADD2(n) (0x0200 + ((n) * 4)) -#define DECON_LOCALxSIZE(n) (0x0214 + ((n) * 4)) -#define DECON_VIDINTCON0 0x0220 -#define DECON_VIDINTCON1 0x0224 -#define DECON_WxKEYCON0(n) (0x0230 + ((n - 1) * 8)) -#define DECON_WxKEYCON1(n) (0x0234 + ((n - 1) * 8)) -#define DECON_WxKEYALPHA(n) (0x0250 + ((n - 1) * 4)) -#define DECON_WINxMAP(n) (0x0270 + ((n) * 4)) -#define DECON_QOSLUT07_00 0x02C0 -#define DECON_QOSLUT15_08 0x02C4 -#define DECON_QOSCTRL 0x02C8 -#define DECON_BLENDERQx(n) (0x0300 + ((n - 1) * 4)) -#define DECON_BLENDCON 0x0310 -#define DECON_OPE_VIDW0xADD0(n) (0x0400 + ((n) * 4)) -#define DECON_OPE_VIDW0xADD1(n) (0x0414 + ((n) * 4)) -#define DECON_FRAMEFIFO_REG7 0x051C -#define DECON_FRAMEFIFO_REG8 0x0520 -#define DECON_FRAMEFIFO_STATUS 0x0524 -#define DECON_CMU 0x1404 -#define DECON_UPDATE 0x1410 -#define DECON_CRFMID 0x1414 -#define DECON_UPDATE_SCHEME 0x1438 -#define DECON_VIDCON1 0x2000 -#define DECON_VIDCON2 0x2004 -#define DECON_VIDCON3 0x2008 -#define DECON_VIDCON4 0x200C -#define DECON_VIDTCON2 0x2028 -#define DECON_FRAME_SIZE 0x2038 -#define DECON_LINECNT_OP_THRESHOLD 0x203C -#define DECON_TRIGCON 0x2040 -#define DECON_TRIGSKIP 0x2050 -#define DECON_CRCRDATA 0x20B0 -#define DECON_CRCCTRL 0x20B4 - -/* Exynos5430 DECON */ -#define DECON_VIDTCON0 0x2020 -#define DECON_VIDTCON1 0x2024 - -/* Exynos5433 DECON */ -#define DECON_VIDTCON00 0x2010 -#define DECON_VIDTCON01 0x2014 -#define DECON_VIDTCON10 0x2018 -#define DECON_VIDTCON11 0x201C - -/* Exynos543X DECON Internal */ -#define DECON_W013DSTREOCON 0x0320 -#define DECON_W233DSTREOCON 0x0324 -#define DECON_FRAMEFIFO_REG0 0x0500 -#define DECON_ENHANCER_CTRL 0x2100 - -/* Exynos543X DECON TV */ -#define DECON_VCLKCON0 0x0014 -#define DECON_VIDINTCON2 0x0228 -#define DECON_VIDINTCON3 0x022C - -/* VIDCON0 */ -#define VIDCON0_SWRESET (1 << 28) -#define VIDCON0_CLKVALUP (1 << 14) -#define VIDCON0_VLCKFREE (1 << 5) -#define VIDCON0_STOP_STATUS (1 << 2) -#define VIDCON0_ENVID (1 << 1) -#define VIDCON0_ENVID_F (1 << 0) - -/* VIDOUTCON0 */ -#define VIDOUT_INTERLACE_FIELD_F (1 << 29) -#define VIDOUT_INTERLACE_EN_F (1 << 28) -#define VIDOUT_LCD_ON (1 << 24) -#define VIDOUT_IF_F_MASK (0x3 << 20) -#define VIDOUT_RGB_IF (0x0 << 20) -#define VIDOUT_COMMAND_IF (0x2 << 20) - -/* WINCONx */ -#define WINCONx_HAWSWP_F (1 << 16) -#define WINCONx_WSWP_F (1 << 15) -#define WINCONx_BURSTLEN_MASK (0x3 << 10) -#define WINCONx_BURSTLEN_16WORD (0x0 << 10) -#define WINCONx_BURSTLEN_8WORD (0x1 << 10) -#define WINCONx_BURSTLEN_4WORD (0x2 << 10) -#define WINCONx_BLD_PIX_F (1 << 6) -#define WINCONx_BPPMODE_MASK (0xf << 2) -#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2) -#define WINCONx_BPPMODE_16BPP_A1555 (0x6 << 2) -#define WINCONx_BPPMODE_16BPP_I1555 (0x7 << 2) -#define WINCONx_BPPMODE_24BPP_888 (0xb << 2) -#define WINCONx_BPPMODE_24BPP_A1887 (0xc << 2) -#define WINCONx_BPPMODE_25BPP_A1888 (0xd << 2) -#define WINCONx_BPPMODE_32BPP_A8888 (0xd << 2) -#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2) -#define WINCONx_ALPHA_SEL_F (1 << 1) -#define WINCONx_ENWIN_F (1 << 0) - -/* SHADOWCON */ -#define SHADOWCON_PROTECT_MASK GENMASK(14, 10) -#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n))) - -/* VIDOSDxD */ -#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16) -#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8) -#define VIDOSD_Wx_ALPHA_B_F(n) (((n) & 0xff) << 0) - -/* VIDINTCON0 */ -#define VIDINTCON0_FRAMEDONE (1 << 17) -#define VIDINTCON0_FRAMESEL_BP (0 << 15) -#define VIDINTCON0_FRAMESEL_VS (1 << 15) -#define VIDINTCON0_FRAMESEL_AC (2 << 15) -#define VIDINTCON0_FRAMESEL_FP (3 << 15) -#define VIDINTCON0_INTFRMEN (1 << 12) -#define VIDINTCON0_INTEN (1 << 0) - -/* VIDINTCON1 */ -#define VIDINTCON1_INTFRMDONEPEND (1 << 2) -#define VIDINTCON1_INTFRMPEND (1 << 1) -#define VIDINTCON1_INTFIFOPEND (1 << 0) - -/* DECON_CMU */ -#define CMU_CLKGAGE_MODE_SFR_F (1 << 1) -#define CMU_CLKGAGE_MODE_MEM_F (1 << 0) - -/* DECON_UPDATE */ -#define STANDALONE_UPDATE_F (1 << 0) - -/* DECON_VIDCON1 */ -#define VIDCON1_LINECNT_MASK (0x0fff << 16) -#define VIDCON1_I80_ACTIVE (1 << 15) -#define VIDCON1_VSTATUS_MASK (0x3 << 13) -#define VIDCON1_VSTATUS_VS (0 << 13) -#define VIDCON1_VSTATUS_BP (1 << 13) -#define VIDCON1_VSTATUS_AC (2 << 13) -#define VIDCON1_VSTATUS_FP (3 << 13) -#define VIDCON1_VCLK_MASK (0x3 << 9) -#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) -#define VIDCON1_VCLK_HOLD (0x0 << 9) -#define VIDCON1_VCLK_RUN (0x1 << 9) - - -/* DECON_VIDTCON00 */ -#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16) -#define VIDTCON00_VFPD_F(x) ((x) & 0xfff) - -/* DECON_VIDTCON01 */ -#define VIDTCON01_VSPW_F(x) (((x) & 0xfff) << 16) - -/* DECON_VIDTCON10 */ -#define VIDTCON10_HBPD_F(x) (((x) & 0xfff) << 16) -#define VIDTCON10_HFPD_F(x) ((x) & 0xfff) - -/* DECON_VIDTCON11 */ -#define VIDTCON11_HSPW_F(x) (((x) & 0xfff) << 16) - -/* DECON_VIDTCON2 */ -#define VIDTCON2_LINEVAL(x) (((x) & 0xfff) << 16) -#define VIDTCON2_HOZVAL(x) ((x) & 0xfff) - -/* TRIGCON */ -#define TRIGCON_TRIGEN_PER_F (1 << 31) -#define TRIGCON_TRIGEN_F (1 << 30) -#define TRIGCON_TE_AUTO_MASK (1 << 29) -#define TRIGCON_WB_SWTRIGCMD (1 << 28) -#define TRIGCON_SWTRIGCMD_W4BUF (1 << 26) -#define TRIGCON_TRIGMODE_W4BUF (1 << 25) -#define TRIGCON_SWTRIGCMD_W3BUF (1 << 21) -#define TRIGCON_TRIGMODE_W3BUF (1 << 20) -#define TRIGCON_SWTRIGCMD_W2BUF (1 << 16) -#define TRIGCON_TRIGMODE_W2BUF (1 << 15) -#define TRIGCON_SWTRIGCMD_W1BUF (1 << 11) -#define TRIGCON_TRIGMODE_W1BUF (1 << 10) -#define TRIGCON_SWTRIGCMD_W0BUF (1 << 6) -#define TRIGCON_TRIGMODE_W0BUF (1 << 5) -#define TRIGCON_HWTRIGMASK (1 << 4) -#define TRIGCON_HWTRIGEN (1 << 3) -#define TRIGCON_HWTRIG_INV (1 << 2) -#define TRIGCON_SWTRIGCMD (1 << 1) -#define TRIGCON_SWTRIGEN (1 << 0) - -/* DECON_CRCCTRL */ -#define CRCCTRL_CRCCLKEN (0x1 << 2) -#define CRCCTRL_CRCSTART_F (0x1 << 1) -#define CRCCTRL_CRCEN (0x1 << 0) -#define CRCCTRL_MASK (0x7) - -#endif /* EXYNOS_REGS_DECON_H */ diff --git a/include/video/exynos7_decon.h b/include/video/exynos7_decon.h deleted file mode 100644 index a62b11b613f6..000000000000 --- a/include/video/exynos7_decon.h +++ /dev/null @@ -1,349 +0,0 @@ -/* include/video/exynos7_decon.h - * - * Copyright (c) 2014 Samsung Electronics Co., Ltd. - * Author: Ajay Kumar <ajaykumar.rs@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -/* VIDCON0 */ -#define VIDCON0 0x00 - -#define VIDCON0_SWRESET (1 << 28) -#define VIDCON0_DECON_STOP_STATUS (1 << 2) -#define VIDCON0_ENVID (1 << 1) -#define VIDCON0_ENVID_F (1 << 0) - -/* VIDOUTCON0 */ -#define VIDOUTCON0 0x4 - -#define VIDOUTCON0_DUAL_MASK (0x3 << 24) -#define VIDOUTCON0_DUAL_ON (0x3 << 24) -#define VIDOUTCON0_DISP_IF_1_ON (0x2 << 24) -#define VIDOUTCON0_DISP_IF_0_ON (0x1 << 24) -#define VIDOUTCON0_DUAL_OFF (0x0 << 24) -#define VIDOUTCON0_IF_SHIFT 23 -#define VIDOUTCON0_IF_MASK (0x1 << 23) -#define VIDOUTCON0_RGBIF (0x0 << 23) -#define VIDOUTCON0_I80IF (0x1 << 23) - -/* VIDCON3 */ -#define VIDCON3 0x8 - -/* VIDCON4 */ -#define VIDCON4 0xC -#define VIDCON4_FIFOCNT_START_EN (1 << 0) - -/* VCLKCON0 */ -#define VCLKCON0 0x10 -#define VCLKCON0_CLKVALUP (1 << 8) -#define VCLKCON0_VCLKFREE (1 << 0) - -/* VCLKCON */ -#define VCLKCON1 0x14 -#define VCLKCON1_CLKVAL_NUM_VCLK(val) (((val) & 0xff) << 0) -#define VCLKCON2 0x18 - -/* SHADOWCON */ -#define SHADOWCON 0x30 - -#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win))) - -/* WINCONx */ -#define WINCON(_win) (0x50 + ((_win) * 4)) - -#define WINCONx_BUFSTATUS (0x3 << 30) -#define WINCONx_BUFSEL_MASK (0x3 << 28) -#define WINCONx_BUFSEL_SHIFT 28 -#define WINCONx_TRIPLE_BUF_MODE (0x1 << 18) -#define WINCONx_DOUBLE_BUF_MODE (0x0 << 18) -#define WINCONx_BURSTLEN_16WORD (0x0 << 11) -#define WINCONx_BURSTLEN_8WORD (0x1 << 11) -#define WINCONx_BURSTLEN_MASK (0x1 << 11) -#define WINCONx_BURSTLEN_SHIFT 11 -#define WINCONx_BLD_PLANE (0 << 8) -#define WINCONx_BLD_PIX (1 << 8) -#define WINCONx_ALPHA_MUL (1 << 7) - -#define WINCONx_BPPMODE_MASK (0xf << 2) -#define WINCONx_BPPMODE_SHIFT 2 -#define WINCONx_BPPMODE_16BPP_565 (0x8 << 2) -#define WINCONx_BPPMODE_24BPP_BGRx (0x7 << 2) -#define WINCONx_BPPMODE_24BPP_RGBx (0x6 << 2) -#define WINCONx_BPPMODE_24BPP_xBGR (0x5 << 2) -#define WINCONx_BPPMODE_24BPP_xRGB (0x4 << 2) -#define WINCONx_BPPMODE_32BPP_BGRA (0x3 << 2) -#define WINCONx_BPPMODE_32BPP_RGBA (0x2 << 2) -#define WINCONx_BPPMODE_32BPP_ABGR (0x1 << 2) -#define WINCONx_BPPMODE_32BPP_ARGB (0x0 << 2) -#define WINCONx_ALPHA_SEL (1 << 1) -#define WINCONx_ENWIN (1 << 0) - -#define WINCON1_ALPHA_MUL_F (1 << 7) -#define WINCON2_ALPHA_MUL_F (1 << 7) -#define WINCON3_ALPHA_MUL_F (1 << 7) -#define WINCON4_ALPHA_MUL_F (1 << 7) - -/* VIDOSDxH: The height for the OSD image(READ ONLY)*/ -#define VIDOSD_H(_x) (0x80 + ((_x) * 4)) - -/* Frame buffer start addresses: VIDWxxADD0n */ -#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10)) -#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10)) -#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10)) - -#define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8)) -#define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8)) -#define VIDW_OFFSET_X(_win) (0x0170 + ((_win) * 8)) -#define VIDW_OFFSET_Y(_win) (0x0174 + ((_win) * 8)) -#define VIDW_BLKOFFSET(_win) (0x01B0 + ((_win) * 4)) -#define VIDW_BLKSIZE(win) (0x0200 + ((_win) * 4)) - -/* Interrupt controls register */ -#define VIDINTCON2 0x228 - -#define VIDINTCON1_INTEXTRA1_EN (1 << 1) -#define VIDINTCON1_INTEXTRA0_EN (1 << 0) - -/* Interrupt controls and status register */ -#define VIDINTCON3 0x22C - -#define VIDINTCON1_INTEXTRA1_PEND (1 << 1) -#define VIDINTCON1_INTEXTRA0_PEND (1 << 0) - -/* VIDOSDxA ~ VIDOSDxE */ -#define VIDOSD_BASE 0x230 - -#define OSD_STRIDE 0x20 - -#define VIDOSD_A(_win) (VIDOSD_BASE + \ - ((_win) * OSD_STRIDE) + 0x00) -#define VIDOSD_B(_win) (VIDOSD_BASE + \ - ((_win) * OSD_STRIDE) + 0x04) -#define VIDOSD_C(_win) (VIDOSD_BASE + \ - ((_win) * OSD_STRIDE) + 0x08) -#define VIDOSD_D(_win) (VIDOSD_BASE + \ - ((_win) * OSD_STRIDE) + 0x0C) -#define VIDOSD_E(_win) (VIDOSD_BASE + \ - ((_win) * OSD_STRIDE) + 0x10) - -#define VIDOSDxA_TOPLEFT_X_MASK (0x1fff << 13) -#define VIDOSDxA_TOPLEFT_X_SHIFT 13 -#define VIDOSDxA_TOPLEFT_X_LIMIT 0x1fff -#define VIDOSDxA_TOPLEFT_X(_x) (((_x) & 0x1fff) << 13) - -#define VIDOSDxA_TOPLEFT_Y_MASK (0x1fff << 0) -#define VIDOSDxA_TOPLEFT_Y_SHIFT 0 -#define VIDOSDxA_TOPLEFT_Y_LIMIT 0x1fff -#define VIDOSDxA_TOPLEFT_Y(_x) (((_x) & 0x1fff) << 0) - -#define VIDOSDxB_BOTRIGHT_X_MASK (0x1fff << 13) -#define VIDOSDxB_BOTRIGHT_X_SHIFT 13 -#define VIDOSDxB_BOTRIGHT_X_LIMIT 0x1fff -#define VIDOSDxB_BOTRIGHT_X(_x) (((_x) & 0x1fff) << 13) - -#define VIDOSDxB_BOTRIGHT_Y_MASK (0x1fff << 0) -#define VIDOSDxB_BOTRIGHT_Y_SHIFT 0 -#define VIDOSDxB_BOTRIGHT_Y_LIMIT 0x1fff -#define VIDOSDxB_BOTRIGHT_Y(_x) (((_x) & 0x1fff) << 0) - -#define VIDOSDxC_ALPHA0_R_F(_x) (((_x) & 0xFF) << 16) -#define VIDOSDxC_ALPHA0_G_F(_x) (((_x) & 0xFF) << 8) -#define VIDOSDxC_ALPHA0_B_F(_x) (((_x) & 0xFF) << 0) - -#define VIDOSDxD_ALPHA1_R_F(_x) (((_x) & 0xFF) << 16) -#define VIDOSDxD_ALPHA1_G_F(_x) (((_x) & 0xFF) << 8) -#define VIDOSDxD_ALPHA1_B_F(_x) (((_x) & 0xFF) >> 0) - -/* Window MAP (Color map) */ -#define WINxMAP(_win) (0x340 + ((_win) * 4)) - -#define WINxMAP_MAP (1 << 24) -#define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0) -#define WINxMAP_MAP_COLOUR_SHIFT 0 -#define WINxMAP_MAP_COLOUR_LIMIT 0xffffff -#define WINxMAP_MAP_COLOUR(_x) ((_x) << 0) - -/* Window colour-key control registers */ -#define WKEYCON 0x370 - -#define WKEYCON0 0x00 -#define WKEYCON1 0x04 -#define WxKEYCON0_KEYBL_EN (1 << 26) -#define WxKEYCON0_KEYEN_F (1 << 25) -#define WxKEYCON0_DIRCON (1 << 24) -#define WxKEYCON0_COMPKEY_MASK (0xffffff << 0) -#define WxKEYCON0_COMPKEY_SHIFT 0 -#define WxKEYCON0_COMPKEY_LIMIT 0xffffff -#define WxKEYCON0_COMPKEY(_x) ((_x) << 0) -#define WxKEYCON1_COLVAL_MASK (0xffffff << 0) -#define WxKEYCON1_COLVAL_SHIFT 0 -#define WxKEYCON1_COLVAL_LIMIT 0xffffff -#define WxKEYCON1_COLVAL(_x) ((_x) << 0) - -/* color key control register for hardware window 1 ~ 4. */ -#define WKEYCON0_BASE(x) ((WKEYCON + WKEYCON0) + ((x - 1) * 8)) -/* color key value register for hardware window 1 ~ 4. */ -#define WKEYCON1_BASE(x) ((WKEYCON + WKEYCON1) + ((x - 1) * 8)) - -/* Window KEY Alpha value */ -#define WxKEYALPHA(_win) (0x3A0 + (((_win) - 1) * 0x4)) - -#define Wx_KEYALPHA_R_F_SHIFT 16 -#define Wx_KEYALPHA_G_F_SHIFT 8 -#define Wx_KEYALPHA_B_F_SHIFT 0 - -/* Blending equation */ -#define BLENDE(_win) (0x03C0 + ((_win) * 4)) -#define BLENDE_COEF_ZERO 0x0 -#define BLENDE_COEF_ONE 0x1 -#define BLENDE_COEF_ALPHA_A 0x2 -#define BLENDE_COEF_ONE_MINUS_ALPHA_A 0x3 -#define BLENDE_COEF_ALPHA_B 0x4 -#define BLENDE_COEF_ONE_MINUS_ALPHA_B 0x5 -#define BLENDE_COEF_ALPHA0 0x6 -#define BLENDE_COEF_A 0xA -#define BLENDE_COEF_ONE_MINUS_A 0xB -#define BLENDE_COEF_B 0xC -#define BLENDE_COEF_ONE_MINUS_B 0xD -#define BLENDE_Q_FUNC(_v) ((_v) << 18) -#define BLENDE_P_FUNC(_v) ((_v) << 12) -#define BLENDE_B_FUNC(_v) ((_v) << 6) -#define BLENDE_A_FUNC(_v) ((_v) << 0) - -/* Blending equation control */ -#define BLENDCON 0x3D8 -#define BLENDCON_NEW_MASK (1 << 0) -#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0) -#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0) - -/* Interrupt control register */ -#define VIDINTCON0 0x500 - -#define VIDINTCON0_WAKEUP_MASK (0x3f << 26) -#define VIDINTCON0_INTEXTRAEN (1 << 21) - -#define VIDINTCON0_FRAMESEL0_SHIFT 15 -#define VIDINTCON0_FRAMESEL0_MASK (0x3 << 15) -#define VIDINTCON0_FRAMESEL0_BACKPORCH (0x0 << 15) -#define VIDINTCON0_FRAMESEL0_VSYNC (0x1 << 15) -#define VIDINTCON0_FRAMESEL0_ACTIVE (0x2 << 15) -#define VIDINTCON0_FRAMESEL0_FRONTPORCH (0x3 << 15) - -#define VIDINTCON0_INT_FRAME (1 << 11) - -#define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 3) -#define VIDINTCON0_FIFOLEVEL_SHIFT 3 -#define VIDINTCON0_FIFOLEVEL_EMPTY (0x0 << 3) -#define VIDINTCON0_FIFOLEVEL_TO25PC (0x1 << 3) -#define VIDINTCON0_FIFOLEVEL_TO50PC (0x2 << 3) -#define VIDINTCON0_FIFOLEVEL_FULL (0x4 << 3) - -#define VIDINTCON0_FIFOSEL_MAIN_EN (1 << 1) -#define VIDINTCON0_INT_FIFO (1 << 1) - -#define VIDINTCON0_INT_ENABLE (1 << 0) - -/* Interrupt controls and status register */ -#define VIDINTCON1 0x504 - -#define VIDINTCON1_INT_EXTRA (1 << 3) -#define VIDINTCON1_INT_I80 (1 << 2) -#define VIDINTCON1_INT_FRAME (1 << 1) -#define VIDINTCON1_INT_FIFO (1 << 0) - -/* VIDCON1 */ -#define VIDCON1(_x) (0x0600 + ((_x) * 0x50)) -#define VIDCON1_LINECNT_GET(_v) (((_v) >> 17) & 0x1fff) -#define VIDCON1_VCLK_MASK (0x3 << 9) -#define VIDCON1_VCLK_HOLD (0x0 << 9) -#define VIDCON1_VCLK_RUN (0x1 << 9) -#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) -#define VIDCON1_RGB_ORDER_O_MASK (0x7 << 4) -#define VIDCON1_RGB_ORDER_O_RGB (0x0 << 4) -#define VIDCON1_RGB_ORDER_O_GBR (0x1 << 4) -#define VIDCON1_RGB_ORDER_O_BRG (0x2 << 4) -#define VIDCON1_RGB_ORDER_O_BGR (0x4 << 4) -#define VIDCON1_RGB_ORDER_O_RBG (0x5 << 4) -#define VIDCON1_RGB_ORDER_O_GRB (0x6 << 4) - -/* VIDTCON0 */ -#define VIDTCON0 0x610 - -#define VIDTCON0_VBPD_MASK (0xffff << 16) -#define VIDTCON0_VBPD_SHIFT 16 -#define VIDTCON0_VBPD_LIMIT 0xffff -#define VIDTCON0_VBPD(_x) ((_x) << 16) - -#define VIDTCON0_VFPD_MASK (0xffff << 0) -#define VIDTCON0_VFPD_SHIFT 0 -#define VIDTCON0_VFPD_LIMIT 0xffff -#define VIDTCON0_VFPD(_x) ((_x) << 0) - -/* VIDTCON1 */ -#define VIDTCON1 0x614 - -#define VIDTCON1_VSPW_MASK (0xffff << 16) -#define VIDTCON1_VSPW_SHIFT 16 -#define VIDTCON1_VSPW_LIMIT 0xffff -#define VIDTCON1_VSPW(_x) ((_x) << 16) - -/* VIDTCON2 */ -#define VIDTCON2 0x618 - -#define VIDTCON2_HBPD_MASK (0xffff << 16) -#define VIDTCON2_HBPD_SHIFT 16 -#define VIDTCON2_HBPD_LIMIT 0xffff -#define VIDTCON2_HBPD(_x) ((_x) << 16) - -#define VIDTCON2_HFPD_MASK (0xffff << 0) -#define VIDTCON2_HFPD_SHIFT 0 -#define VIDTCON2_HFPD_LIMIT 0xffff -#define VIDTCON2_HFPD(_x) ((_x) << 0) - -/* VIDTCON3 */ -#define VIDTCON3 0x61C - -#define VIDTCON3_HSPW_MASK (0xffff << 16) -#define VIDTCON3_HSPW_SHIFT 16 -#define VIDTCON3_HSPW_LIMIT 0xffff -#define VIDTCON3_HSPW(_x) ((_x) << 16) - -/* VIDTCON4 */ -#define VIDTCON4 0x620 - -#define VIDTCON4_LINEVAL_MASK (0xfff << 16) -#define VIDTCON4_LINEVAL_SHIFT 16 -#define VIDTCON4_LINEVAL_LIMIT 0xfff -#define VIDTCON4_LINEVAL(_x) (((_x) & 0xfff) << 16) - -#define VIDTCON4_HOZVAL_MASK (0xfff << 0) -#define VIDTCON4_HOZVAL_SHIFT 0 -#define VIDTCON4_HOZVAL_LIMIT 0xfff -#define VIDTCON4_HOZVAL(_x) (((_x) & 0xfff) << 0) - -/* LINECNT OP THRSHOLD*/ -#define LINECNT_OP_THRESHOLD 0x630 - -/* CRCCTRL */ -#define CRCCTRL 0x6C8 -#define CRCCTRL_CRCCLKEN (0x1 << 2) -#define CRCCTRL_CRCSTART_F (0x1 << 1) -#define CRCCTRL_CRCEN (0x1 << 0) - -/* DECON_CMU */ -#define DECON_CMU 0x704 - -#define DECON_CMU_ALL_CLKGATE_ENABLE 0x3 -#define DECON_CMU_SE_CLKGATE_ENABLE (0x1 << 2) -#define DECON_CMU_SFR_CLKGATE_ENABLE (0x1 << 1) -#define DECON_CMU_MEM_CLKGATE_ENABLE (0x1 << 0) - -/* DECON_UPDATE */ -#define DECON_UPDATE 0x710 - -#define DECON_UPDATE_SLAVE_SYNC (1 << 4) -#define DECON_UPDATE_STANDALONE_F (1 << 0) diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index ce4c07688b13..abbad94e14a1 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -344,7 +344,7 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan); int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, unsigned int axi_id, unsigned int width, unsigned int height, unsigned int stride, - u32 format, unsigned long *eba); + u32 format, uint64_t modifier, unsigned long *eba); /* * IPU CMOS Sensor Interface (csi) functions |