aboutsummaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bitops.h10
-rw-r--r--include/linux/jump_label.h27
-rw-r--r--include/linux/perf_event.h8
3 files changed, 40 insertions, 5 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3ef66a2a083..3c1063acb2ab 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -22,8 +22,14 @@ extern unsigned long __sw_hweight64(__u64 w);
#include <asm/bitops.h>
#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_cont(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
static __inline__ int get_bitmask_order(unsigned int count)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 388b0d425b50..5ce8b140428f 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/workqueue.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -14,6 +15,12 @@ struct jump_label_key {
#endif
};
+struct jump_label_key_deferred {
+ struct jump_label_key key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
@@ -51,8 +58,11 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void jump_label_inc(struct jump_label_key *key);
extern void jump_label_dec(struct jump_label_key *key);
+extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
extern bool jump_label_enabled(struct jump_label_key *key);
extern void jump_label_apply_nops(struct module *mod);
+extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl);
#else /* !HAVE_JUMP_LABEL */
@@ -68,6 +78,10 @@ static __always_inline void jump_label_init(void)
{
}
+struct jump_label_key_deferred {
+ struct jump_label_key key;
+};
+
static __always_inline bool static_branch(struct jump_label_key *key)
{
if (unlikely(atomic_read(&key->enabled)))
@@ -85,6 +99,11 @@ static inline void jump_label_dec(struct jump_label_key *key)
atomic_dec(&key->enabled);
}
+static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+{
+ jump_label_dec(&key->key);
+}
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -102,6 +121,14 @@ static inline int jump_label_apply_nops(struct module *mod)
{
return 0;
}
+
+static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
+ unsigned long rl)
+{
+}
#endif /* HAVE_JUMP_LABEL */
+#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
+#define jump_label_key_disabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
+
#endif /* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b1f89122bf6a..08855613ceb3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -54,6 +54,7 @@ enum perf_hw_id {
PERF_COUNT_HW_BUS_CYCLES = 6,
PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
PERF_COUNT_HW_MAX, /* non-ABI */
};
@@ -890,6 +891,7 @@ struct perf_event_context {
int nr_active;
int is_active;
int nr_stat;
+ int nr_freq;
int rotate_disable;
atomic_t refcount;
struct task_struct *task;
@@ -1063,12 +1065,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
}
}
-extern struct jump_label_key perf_sched_events;
+extern struct jump_label_key_deferred perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
- if (static_branch(&perf_sched_events))
+ if (static_branch(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task);
}
@@ -1077,7 +1079,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
- if (static_branch(&perf_sched_events))
+ if (static_branch(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next);
}