diff options
-rw-r--r-- | arch/x86/kernel/kprobes/opt.c | 4 | ||||
-rw-r--r-- | include/linux/kprobes.h | 1 | ||||
-rw-r--r-- | kernel/kprobes.c | 2 |
3 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index e57e07b0edb6..f406bfa9a8cd 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -46,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) /* This function only handles jump-optimized kprobe */ if (kp && kprobe_optimized(kp)) { op = container_of(kp, struct optimized_kprobe, kp); - /* If op->list is not empty, op is under optimizing */ - if (list_empty(&op->list)) + /* If op is optimized or under unoptimizing */ + if (list_empty(&op->list) || optprobe_queued_unopt(op)) goto found; } } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index a0b92be98984..ab39285f71a6 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -378,6 +378,7 @@ extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs); DEFINE_INSN_CACHE_OPS(optinsn); extern void wait_for_kprobe_optimizer(void); +bool optprobe_queued_unopt(struct optimized_kprobe *op); #else /* !CONFIG_OPTPROBES */ static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1c18ecf9f98b..90b0fac6efa0 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -662,7 +662,7 @@ void wait_for_kprobe_optimizer(void) mutex_unlock(&kprobe_mutex); } -static bool optprobe_queued_unopt(struct optimized_kprobe *op) +bool optprobe_queued_unopt(struct optimized_kprobe *op) { struct optimized_kprobe *_op; |