diff options
author | Vasily Averin | 2022-05-12 20:23:08 -0700 |
---|---|---|
committer | Andrew Morton | 2022-05-13 07:20:18 -0700 |
commit | fe573327ffb1deba802c91dd1d4ff41dafa97a0e (patch) | |
tree | 1bc1eaf3f2a2e4319a1dea4f5fb49f3fb498977b | |
parent | e7be8d1dd983156bbdd22c0319b71119a8fbb697 (diff) |
tracing: incorrect gfp_t conversion
Fixes the following sparse warnings:
include/trace/events/*: sparse: cast to restricted gfp_t
include/trace/events/*: sparse: restricted gfp_t degrades to integer
gfp_t type is bitwise and requires __force attributes for any casts.
Link: https://lkml.kernel.org/r/331d88fe-f4f7-657c-02a2-d977f15fbff6@openvz.org
Signed-off-by: Vasily Averin <vvs@openvz.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | include/linux/gfp.h | 2 | ||||
-rw-r--r-- | include/trace/events/btrfs.h | 4 | ||||
-rw-r--r-- | include/trace/events/compaction.h | 4 | ||||
-rw-r--r-- | include/trace/events/kmem.h | 12 | ||||
-rw-r--r-- | include/trace/events/mmflags.h | 78 | ||||
-rw-r--r-- | include/trace/events/vmscan.h | 16 | ||||
-rw-r--r-- | mm/compaction.c | 2 |
7 files changed, 59 insertions, 59 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2a08a3c4ba95..2d2ccae933c2 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -367,7 +367,7 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index f068ff30d654..ed92c80331ea 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1344,13 +1344,13 @@ TRACE_EVENT(alloc_extent_state, TP_STRUCT__entry( __field(const struct extent_state *, state) - __field(gfp_t, mask) + __field(unsigned long, mask) __field(const void*, ip) ), TP_fast_assign( __entry->state = state, - __entry->mask = mask, + __entry->mask = (__force unsigned long)mask, __entry->ip = (const void *)IP ), diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index c6d5d70dc7a5..3313eb83c117 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -162,13 +162,13 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages, TP_STRUCT__entry( __field(int, order) - __field(gfp_t, gfp_mask) + __field(unsigned long, gfp_mask) __field(int, prio) ), TP_fast_assign( __entry->order = order; - __entry->gfp_mask = gfp_mask; + __entry->gfp_mask = (__force unsigned long)gfp_mask; __entry->prio = prio; ), diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index ddc8c944f417..71c141804222 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -24,7 +24,7 @@ DECLARE_EVENT_CLASS(kmem_alloc, __field( const void *, ptr ) __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) ), TP_fast_assign( @@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(kmem_alloc, __entry->ptr = ptr; __entry->bytes_req = bytes_req; __entry->bytes_alloc = bytes_alloc; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", @@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __field( const void *, ptr ) __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) __field( int, node ) ), @@ -84,7 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->ptr = ptr; __entry->bytes_req = bytes_req; __entry->bytes_alloc = bytes_alloc; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->node = node; ), @@ -208,14 +208,14 @@ TRACE_EVENT(mm_page_alloc, TP_STRUCT__entry( __field( unsigned long, pfn ) __field( unsigned int, order ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) __field( int, migratetype ) ), TP_fast_assign( __entry->pfn = page ? page_to_pfn(page) : -1UL; __entry->order = order; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; __entry->migratetype = migratetype; ), diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 6532119a6bf1..fcb1ffb2cc8d 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -14,48 +14,48 @@ */ #define __def_gfpflag_names \ - {(unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ - {(unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \ - {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\ - {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ - {(unsigned long)GFP_USER, "GFP_USER"}, \ - {(unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \ - {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ - {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ - {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ - {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ - {(unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \ - {(unsigned long)GFP_DMA, "GFP_DMA"}, \ - {(unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \ - {(unsigned long)GFP_DMA32, "GFP_DMA32"}, \ - {(unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \ - {(unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \ - {(unsigned long)__GFP_IO, "__GFP_IO"}, \ - {(unsigned long)__GFP_FS, "__GFP_FS"}, \ - {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \ - {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \ - {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \ - {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \ - {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \ - {(unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \ - {(unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \ - {(unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \ - {(unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \ - {(unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \ - {(unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \ - {(unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \ - {(unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \ - {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ - {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ - {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ - {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ - {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \ + {(__force unsigned long)GFP_TRANSHUGE, "GFP_TRANSHUGE"}, \ + {(__force unsigned long)GFP_TRANSHUGE_LIGHT, "GFP_TRANSHUGE_LIGHT"}, \ + {(__force unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"},\ + {(__force unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ + {(__force unsigned long)GFP_USER, "GFP_USER"}, \ + {(__force unsigned long)GFP_KERNEL_ACCOUNT, "GFP_KERNEL_ACCOUNT"}, \ + {(__force unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ + {(__force unsigned long)GFP_NOFS, "GFP_NOFS"}, \ + {(__force unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ + {(__force unsigned long)GFP_NOIO, "GFP_NOIO"}, \ + {(__force unsigned long)GFP_NOWAIT, "GFP_NOWAIT"}, \ + {(__force unsigned long)GFP_DMA, "GFP_DMA"}, \ + {(__force unsigned long)__GFP_HIGHMEM, "__GFP_HIGHMEM"}, \ + {(__force unsigned long)GFP_DMA32, "GFP_DMA32"}, \ + {(__force unsigned long)__GFP_HIGH, "__GFP_HIGH"}, \ + {(__force unsigned long)__GFP_ATOMIC, "__GFP_ATOMIC"}, \ + {(__force unsigned long)__GFP_IO, "__GFP_IO"}, \ + {(__force unsigned long)__GFP_FS, "__GFP_FS"}, \ + {(__force unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \ + {(__force unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \ + {(__force unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \ + {(__force unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \ + {(__force unsigned long)__GFP_COMP, "__GFP_COMP"}, \ + {(__force unsigned long)__GFP_ZERO, "__GFP_ZERO"}, \ + {(__force unsigned long)__GFP_NOMEMALLOC, "__GFP_NOMEMALLOC"}, \ + {(__force unsigned long)__GFP_MEMALLOC, "__GFP_MEMALLOC"}, \ + {(__force unsigned long)__GFP_HARDWALL, "__GFP_HARDWALL"}, \ + {(__force unsigned long)__GFP_THISNODE, "__GFP_THISNODE"}, \ + {(__force unsigned long)__GFP_RECLAIMABLE, "__GFP_RECLAIMABLE"}, \ + {(__force unsigned long)__GFP_MOVABLE, "__GFP_MOVABLE"}, \ + {(__force unsigned long)__GFP_ACCOUNT, "__GFP_ACCOUNT"}, \ + {(__force unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ + {(__force unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ + {(__force unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ + {(__force unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ + {(__force unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \ #ifdef CONFIG_KASAN_HW_TAGS #define __def_gfpflag_names_kasan , \ - {(unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \ - {(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \ - {(unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"} + {(__force unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \ + {(__force unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \ + {(__force unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"} #else #define __def_gfpflag_names_kasan #endif diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index de136dbd623a..408c86244d64 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -96,14 +96,14 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd, __field( int, nid ) __field( int, zid ) __field( int, order ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) ), TP_fast_assign( __entry->nid = nid; __entry->zid = zid; __entry->order = order; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("nid=%d order=%d gfp_flags=%s", @@ -120,12 +120,12 @@ DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template, TP_STRUCT__entry( __field( int, order ) - __field( gfp_t, gfp_flags ) + __field( unsigned long, gfp_flags ) ), TP_fast_assign( __entry->order = order; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("order=%d gfp_flags=%s", @@ -210,7 +210,7 @@ TRACE_EVENT(mm_shrink_slab_start, __field(void *, shrink) __field(int, nid) __field(long, nr_objects_to_shrink) - __field(gfp_t, gfp_flags) + __field(unsigned long, gfp_flags) __field(unsigned long, cache_items) __field(unsigned long long, delta) __field(unsigned long, total_scan) @@ -222,7 +222,7 @@ TRACE_EVENT(mm_shrink_slab_start, __entry->shrink = shr->scan_objects; __entry->nid = sc->nid; __entry->nr_objects_to_shrink = nr_objects_to_shrink; - __entry->gfp_flags = sc->gfp_mask; + __entry->gfp_flags = (__force unsigned long)sc->gfp_mask; __entry->cache_items = cache_items; __entry->delta = delta; __entry->total_scan = total_scan; @@ -446,13 +446,13 @@ TRACE_EVENT(mm_vmscan_node_reclaim_begin, TP_STRUCT__entry( __field(int, nid) __field(int, order) - __field(gfp_t, gfp_flags) + __field(unsigned long, gfp_flags) ), TP_fast_assign( __entry->nid = nid; __entry->order = order; - __entry->gfp_flags = gfp_flags; + __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("nid=%d order=%d gfp_flags=%s", diff --git a/mm/compaction.c b/mm/compaction.c index 65970107b789..0e0ef9e44530 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2569,7 +2569,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, struct page **capture) { - int may_perform_io = gfp_mask & __GFP_IO; + int may_perform_io = (__force int)(gfp_mask & __GFP_IO); struct zoneref *z; struct zone *zone; enum compact_result rc = COMPACT_SKIPPED; |