aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorJakub Kicinski2022-06-16 20:13:52 -0700
committerJakub Kicinski2022-06-16 20:13:52 -0700
commit9cbc9911260f24cd3ce2bfb3bcdf4792366a5745 (patch)
tree4600a657ac1e8977ff706bf398eb27de2dedab7f /include
parent5dcb50c009c9f8ec1cfca6a81a05c0060a5bbf68 (diff)
parent48a23ec6ff2b2a5effe8d3ae5f17fc6b7f35df65 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h1
-rw-r--r--include/linux/cpu.h3
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/netfs.h25
-rw-r--r--include/linux/random.h3
-rw-r--r--include/linux/sunrpc/xdr.h16
-rw-r--r--include/linux/vdpa.h5
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/workqueue.h66
-rw-r--r--include/linux/xarray.h1
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/inet_hashtables.h68
-rw-r--r--include/net/sock.h14
-rw-r--r--include/trace/events/workqueue.h8
15 files changed, 103 insertions, 118 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 1cf3738ef1ea..992ee987f273 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -403,7 +403,6 @@ enum {
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
-extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 54dc2f9a2d56..2c7477354744 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -65,6 +65,9 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index a4367051e192..2f991a427ade 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -4,7 +4,7 @@
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^5 + 1)
* Init 0
*/
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index b34ff2cdbc4f..c29ab4c0cd5c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -227,6 +227,7 @@ struct page {
* struct folio - Represents a contiguous set of bytes.
* @flags: Identical to the page flags.
* @lru: Least Recently Used list; tracks how recently this folio was used.
+ * @mlock_count: Number of times this folio has been pinned by mlock().
* @mapping: The file this page belongs to, or refers to the anon_vma for
* anonymous memory.
* @index: Offset within the file, in units of pages. For anonymous memory,
@@ -255,10 +256,14 @@ struct folio {
unsigned long flags;
union {
struct list_head lru;
+ /* private: avoid cluttering the output */
struct {
void *__filler;
+ /* public: */
unsigned int mlock_count;
+ /* private: */
};
+ /* public: */
};
struct address_space *mapping;
pgoff_t index;
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 6dbb4c9ce50d..1773e5df8e65 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -206,7 +206,9 @@ struct netfs_io_request {
*/
struct netfs_request_ops {
int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+ void (*free_request)(struct netfs_io_request *rreq);
int (*begin_cache_operation)(struct netfs_io_request *rreq);
+
void (*expand_readahead)(struct netfs_io_request *rreq);
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
@@ -214,7 +216,6 @@ struct netfs_request_ops {
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
struct folio *folio, void **_fsdata);
void (*done)(struct netfs_io_request *rreq);
- void (*cleanup)(struct address_space *mapping, void *netfs_priv);
};
/*
@@ -277,7 +278,8 @@ struct netfs_cache_ops {
struct readahead_control;
extern void netfs_readahead(struct readahead_control *);
int netfs_read_folio(struct file *, struct folio *);
-extern int netfs_write_begin(struct file *, struct address_space *,
+extern int netfs_write_begin(struct netfs_inode *,
+ struct file *, struct address_space *,
loff_t, unsigned int, struct folio **,
void **);
@@ -302,19 +304,17 @@ static inline struct netfs_inode *netfs_inode(struct inode *inode)
/**
* netfs_inode_init - Initialise a netfslib inode context
- * @inode: The inode with which the context is associated
+ * @ctx: The netfs inode to initialise
* @ops: The netfs's operations list
*
* Initialise the netfs library context struct. This is expected to follow on
* directly from the VFS inode struct.
*/
-static inline void netfs_inode_init(struct inode *inode,
+static inline void netfs_inode_init(struct netfs_inode *ctx,
const struct netfs_request_ops *ops)
{
- struct netfs_inode *ctx = netfs_inode(inode);
-
ctx->ops = ops;
- ctx->remote_i_size = i_size_read(inode);
+ ctx->remote_i_size = i_size_read(&ctx->inode);
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
@@ -322,28 +322,25 @@ static inline void netfs_inode_init(struct inode *inode,
/**
* netfs_resize_file - Note that a file got resized
- * @inode: The inode being resized
+ * @ctx: The netfs inode being resized
* @new_i_size: The new file size
*
* Inform the netfs lib that a file got resized so that it can adjust its state.
*/
-static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size)
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
{
- struct netfs_inode *ctx = netfs_inode(inode);
-
ctx->remote_i_size = new_i_size;
}
/**
* netfs_i_cookie - Get the cache cookie from the inode
- * @inode: The inode to query
+ * @ctx: The netfs inode to query
*
* Get the caching cookie (if enabled) from the network filesystem's inode.
*/
-static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode)
+static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
{
#if IS_ENABLED(CONFIG_FSCACHE)
- struct netfs_inode *ctx = netfs_inode(inode);
return ctx->cache;
#else
return NULL;
diff --git a/include/linux/random.h b/include/linux/random.h
index fae0c84027fd..20e389a14e5c 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -13,7 +13,7 @@
struct notifier_block;
void add_device_randomness(const void *buf, size_t len);
-void add_bootloader_randomness(const void *buf, size_t len);
+void __init add_bootloader_randomness(const void *buf, size_t len);
void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) __latent_entropy;
void add_interrupt_randomness(int irq) __latent_entropy;
@@ -74,7 +74,6 @@ static inline unsigned long get_random_canary(void)
int __init random_init(const char *command_line);
bool rng_is_initialized(void);
-bool rng_has_arch_random(void);
int wait_for_random_bytes(void);
/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 4417f667c757..5860f32e3958 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -243,7 +243,7 @@ extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
size_t nbytes);
-extern void xdr_commit_encode(struct xdr_stream *xdr);
+extern void __xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
@@ -307,6 +307,20 @@ xdr_reset_scratch_buffer(struct xdr_stream *xdr)
}
/**
+ * xdr_commit_encode - Ensure all data is written to xdr->buf
+ * @xdr: pointer to xdr_stream
+ *
+ * Handle encoding across page boundaries by giving the caller a
+ * temporary location to write to, then later copying the data into
+ * place. __xdr_commit_encode() does that copying.
+ */
+static inline void xdr_commit_encode(struct xdr_stream *xdr)
+{
+ if (unlikely(xdr->scratch.iov_len))
+ __xdr_commit_encode(xdr);
+}
+
+/**
* xdr_stream_remaining - Return the number of bytes remaining in the stream
* @xdr: pointer to struct xdr_stream
*
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 4700a88a28f6..7b4a13d3bd91 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -178,7 +178,8 @@ struct vdpa_map_file {
* for the device
* @vdev: vdpa device
* Returns virtqueue algin requirement
- * @get_vq_group: Get the group id for a specific virtqueue
+ * @get_vq_group: Get the group id for a specific
+ * virtqueue (optional)
* @vdev: vdpa device
* @idx: virtqueue index
* Returns u32: group id for this virtqueue
@@ -243,7 +244,7 @@ struct vdpa_map_file {
* Returns the iova range supported by
* the device.
* @set_group_asid: Set address space identifier for a
- * virtqueue group
+ * virtqueue group (optional)
* @vdev: vdpa device
* @group: virtqueue group
* @asid: address space id for this group
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index b159c2789961..096d48aa3437 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -215,6 +215,7 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
+struct vmap_area *find_vmap_area(unsigned long addr);
static inline bool is_vm_area_hugepages(const void *addr)
{
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 7fee9b6cfede..62e75dd40d9a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -406,7 +406,7 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args...: args for @fmt
+ * @args: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
@@ -445,7 +445,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
-extern void flush_workqueue(struct workqueue_struct *wq);
+extern void __flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern int schedule_on_each_cpu(work_func_t func);
@@ -563,15 +563,23 @@ static inline bool schedule_work(struct work_struct *work)
return queue_work(system_wq, work);
}
+/*
+ * Detect attempt to flush system-wide workqueues at compile time when possible.
+ *
+ * See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
+ * for reasons and steps for converting system-wide workqueues into local workqueues.
+ */
+extern void __warn_flushing_systemwide_wq(void)
+ __compiletime_warning("Please avoid flushing system-wide workqueues.");
+
/**
* flush_scheduled_work - ensure that any scheduled work has run to completion.
*
* Forces execution of the kernel-global workqueue and blocks until its
* completion.
*
- * Think twice before calling this function! It's very easy to get into
- * trouble if you don't take great care. Either of the following situations
- * will lead to deadlock:
+ * It's very easy to get into trouble if you don't take great care.
+ * Either of the following situations will lead to deadlock:
*
* One of the work items currently on the workqueue needs to acquire
* a lock held by your code or its caller.
@@ -586,11 +594,51 @@ static inline bool schedule_work(struct work_struct *work)
* need to know that a particular work item isn't queued and isn't running.
* In such cases you should use cancel_delayed_work_sync() or
* cancel_work_sync() instead.
+ *
+ * Please stop calling this function! A conversion to stop flushing system-wide
+ * workqueues is in progress. This function will be removed after all in-tree
+ * users stopped calling this function.
*/
-static inline void flush_scheduled_work(void)
-{
- flush_workqueue(system_wq);
-}
+/*
+ * The background of commit 771c035372a036f8 ("deprecate the
+ * '__deprecated' attribute warnings entirely and for good") is that,
+ * since Linus builds all modules between every single pull he does,
+ * the standard kernel build needs to be _clean_ in order to be able to
+ * notice when new problems happen. Therefore, don't emit warning while
+ * there are in-tree users.
+ */
+#define flush_scheduled_work() \
+({ \
+ if (0) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(system_wq); \
+})
+
+/*
+ * Although there is no longer in-tree caller, for now just emit warning
+ * in order to give out-of-tree callers time to update.
+ */
+#define flush_workqueue(wq) \
+({ \
+ struct workqueue_struct *_wq = (wq); \
+ \
+ if ((__builtin_constant_p(_wq == system_wq) && \
+ _wq == system_wq) || \
+ (__builtin_constant_p(_wq == system_highpri_wq) && \
+ _wq == system_highpri_wq) || \
+ (__builtin_constant_p(_wq == system_long_wq) && \
+ _wq == system_long_wq) || \
+ (__builtin_constant_p(_wq == system_unbound_wq) && \
+ _wq == system_unbound_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_wq) && \
+ _wq == system_freezable_wq) || \
+ (__builtin_constant_p(_wq == system_power_efficient_wq) && \
+ _wq == system_power_efficient_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
+ _wq == system_freezable_power_efficient_wq)) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(_wq); \
+})
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 72feab5ea8d4..c29e11b2c073 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -1508,6 +1508,7 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
void xas_init_marks(const struct xa_state *);
bool xas_nomem(struct xa_state *, gfp_t);
+void xas_destroy(struct xa_state *);
void xas_pause(struct xa_state *);
void xas_create_range(struct xa_state *);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 077cd730ce2f..85cd695e7fd1 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -25,7 +25,6 @@
#undef INET_CSK_CLEAR_TIMERS
struct inet_bind_bucket;
-struct inet_bind2_bucket;
struct tcp_congestion_ops;
/*
@@ -58,7 +57,6 @@ struct inet_connection_sock_af_ops {
*
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
- * @icsk_bind2_hash: Bind node in the bhash2 table
* @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
@@ -85,7 +83,6 @@ struct inet_connection_sock {
struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
- struct inet_bind2_bucket *icsk_bind2_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index a0887b70967b..ebfa3df6f8dc 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -90,32 +90,11 @@ struct inet_bind_bucket {
struct hlist_head owners;
};
-struct inet_bind2_bucket {
- possible_net_t ib_net;
- int l3mdev;
- unsigned short port;
- union {
-#if IS_ENABLED(CONFIG_IPV6)
- struct in6_addr v6_rcv_saddr;
-#endif
- __be32 rcv_saddr;
- };
- /* Node in the inet2_bind_hashbucket chain */
- struct hlist_node node;
- /* List of sockets hashed to this bucket */
- struct hlist_head owners;
-};
-
static inline struct net *ib_net(struct inet_bind_bucket *ib)
{
return read_pnet(&ib->ib_net);
}
-static inline struct net *ib2_net(struct inet_bind2_bucket *ib)
-{
- return read_pnet(&ib->ib_net);
-}
-
#define inet_bind_bucket_for_each(tb, head) \
hlist_for_each_entry(tb, head, node)
@@ -124,15 +103,6 @@ struct inet_bind_hashbucket {
struct hlist_head chain;
};
-/* This is synchronized using the inet_bind_hashbucket's spinlock.
- * Instead of having separate spinlocks, the inet_bind2_hashbucket can share
- * the inet_bind_hashbucket's given that in every case where the bhash2 table
- * is useful, a lookup in the bhash table also occurs.
- */
-struct inet_bind2_hashbucket {
- struct hlist_head chain;
-};
-
/* Sockets can be hashed in established or listening table.
* We must use different 'nulls' end-of-chain value for all hash buckets :
* A socket might transition from ESTABLISH to LISTEN state without
@@ -164,12 +134,6 @@ struct inet_hashinfo {
*/
struct kmem_cache *bind_bucket_cachep;
struct inet_bind_hashbucket *bhash;
- /* The 2nd binding table hashed by port and address.
- * This is used primarily for expediting the resolution of bind
- * conflicts.
- */
- struct kmem_cache *bind2_bucket_cachep;
- struct inet_bind2_hashbucket *bhash2;
unsigned int bhash_size;
/* The 2nd listener table hashed by local port and address */
@@ -229,36 +193,6 @@ inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
-static inline bool check_bind_bucket_match(struct inet_bind_bucket *tb,
- struct net *net,
- const unsigned short port,
- int l3mdev)
-{
- return net_eq(ib_net(tb), net) && tb->port == port &&
- tb->l3mdev == l3mdev;
-}
-
-struct inet_bind2_bucket *
-inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
- struct inet_bind2_hashbucket *head,
- const unsigned short port, int l3mdev,
- const struct sock *sk);
-
-void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
- struct inet_bind2_bucket *tb);
-
-struct inet_bind2_bucket *
-inet_bind2_bucket_find(struct inet_hashinfo *hinfo, struct net *net,
- const unsigned short port, int l3mdev,
- struct sock *sk,
- struct inet_bind2_hashbucket **head);
-
-bool check_bind2_bucket_match_nulladdr(struct inet_bind2_bucket *tb,
- struct net *net,
- const unsigned short port,
- int l3mdev,
- const struct sock *sk);
-
static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
const u32 bhash_size)
{
@@ -266,7 +200,7 @@ static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
}
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
- struct inet_bind2_bucket *tb2, const unsigned short snum);
+ const unsigned short snum);
/* Caller must disable local BH processing. */
int __inet_inherit_port(const struct sock *sk, struct sock *child);
diff --git a/include/net/sock.h b/include/net/sock.h
index 304a5e39d41e..5bed1ea7a722 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -348,7 +348,6 @@ struct sk_filter;
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
- * @sk_bind2_node: bind node in the bhash2 table
*/
struct sock {
/*
@@ -538,7 +537,6 @@ struct sock {
#endif
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
- struct hlist_node sk_bind2_node;
};
enum sk_pacing {
@@ -819,16 +817,6 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_add_head(&sk->sk_bind_node, list);
}
-static inline void __sk_del_bind2_node(struct sock *sk)
-{
- __hlist_del(&sk->sk_bind2_node);
-}
-
-static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
-{
- hlist_add_head(&sk->sk_bind2_node, list);
-}
-
#define sk_for_each(__sk, list) \
hlist_for_each_entry(__sk, list, sk_node)
#define sk_for_each_rcu(__sk, list) \
@@ -846,8 +834,6 @@ static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
-#define sk_for_each_bound_bhash2(__sk, list) \
- hlist_for_each_entry(__sk, list, sk_bind2_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 6154a2e72bce..262d52021c23 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -22,7 +22,7 @@ struct pool_workqueue;
*/
TRACE_EVENT(workqueue_queue_work,
- TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
+ TP_PROTO(int req_cpu, struct pool_workqueue *pwq,
struct work_struct *work),
TP_ARGS(req_cpu, pwq, work),
@@ -31,8 +31,8 @@ TRACE_EVENT(workqueue_queue_work,
__field( void *, work )
__field( void *, function)
__string( workqueue, pwq->wq->name)
- __field( unsigned int, req_cpu )
- __field( unsigned int, cpu )
+ __field( int, req_cpu )
+ __field( int, cpu )
),
TP_fast_assign(
@@ -43,7 +43,7 @@ TRACE_EVENT(workqueue_queue_work,
__entry->cpu = pwq->pool->cpu;
),
- TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u",
+ TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
__entry->work, __entry->function, __get_str(workqueue),
__entry->req_cpu, __entry->cpu)
);