diff options
author | David S. Miller | 2018-12-20 10:53:28 -0800 |
---|---|---|
committer | David S. Miller | 2018-12-20 11:53:36 -0800 |
commit | 2be09de7d6a06f58e768de1255a687c9aaa66606 (patch) | |
tree | 298f9e04caf105873d987e807eccba27710a49cc /kernel | |
parent | 44a7b3b6e3a458f9549c2cc28e74ecdc470e42f1 (diff) | |
parent | 1d51b4b1d3f2db0d6d144175e31a84e472fbd99a (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Lots of conflicts, by happily all cases of overlapping
changes, parallel adds, things of that nature.
Thanks to Stephen Rothwell, Saeed Mahameed, and others
for their guidance in these resolutions.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/core.c | 21 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 13 | ||||
-rw-r--r-- | kernel/dma/direct.c | 7 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_events_trigger.c | 6 |
6 files changed, 40 insertions, 13 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 5cdd8da0e7f2..38de580abcc2 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -474,13 +474,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) } #ifdef CONFIG_BPF_JIT -# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000) - /* All BPF JIT sysctl knobs here. */ int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); int bpf_jit_harden __read_mostly; int bpf_jit_kallsyms __read_mostly; -int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT; +long bpf_jit_limit __read_mostly; static __always_inline void bpf_get_prog_addr_region(const struct bpf_prog *prog, @@ -701,16 +699,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, static atomic_long_t bpf_jit_current; +/* Can be overridden by an arch's JIT compiler if it has a custom, + * dedicated BPF backend memory area, or if neither of the two + * below apply. + */ +u64 __weak bpf_jit_alloc_exec_limit(void) +{ #if defined(MODULES_VADDR) + return MODULES_END - MODULES_VADDR; +#else + return VMALLOC_END - VMALLOC_START; +#endif +} + static int __init bpf_jit_charge_init(void) { /* Only used as heuristic here to derive limit. */ - bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2, - PAGE_SIZE), INT_MAX); + bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, + PAGE_SIZE), LONG_MAX); return 0; } pure_initcall(bpf_jit_charge_init); -#endif static int bpf_jit_charge_modmem(u32 pages) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8b511a4fe84a..5b3c0a95244f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5381,9 +5381,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; - /* connect new state to parentage chain */ - for (i = 0; i < BPF_REG_FP; i++) - cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i]; + /* connect new state to parentage chain. Current frame needs all + * registers connected. Only r6 - r9 of the callers are alive (pushed + * to the stack implicitly by JITs) so in callers' frames connect just + * r6 - r9 as an optimization. Callers will have r1 - r5 connected to + * the state of the call instruction (with WRITTEN set), and r0 comes + * from callee with its full parentage chain, anyway. + */ + for (j = 0; j <= cur->curframe; j++) + for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) + cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 22a12ab5a5e9..375c77e8d52f 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -309,7 +309,12 @@ int dma_direct_supported(struct device *dev, u64 mask) min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); - return mask >= phys_to_dma(dev, min_mask); + /* + * This check needs to be against the actual bit mask value, so + * use __phys_to_dma() here so that the SME encryption mask isn't + * part of the check. + */ + return mask >= __phys_to_dma(dev, min_mask); } int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 77734451cb05..e23eb9fc77aa 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops) if (ops->flags & FTRACE_OPS_FL_ENABLED) ftrace_shutdown(ops, 0); ops->flags |= FTRACE_OPS_FL_DELETED; + ftrace_free_filter(ops); mutex_unlock(&ftrace_lock); } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 84a65173b1e9..5574e862de8d 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, } } + kfree(op_stack); + kfree(inverts); return prog; out_free: kfree(op_stack); - kfree(prog_stack); kfree(inverts); + kfree(prog_stack); return ERR_PTR(ret); } @@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call, err = process_preds(call, filter_string, *filterp, pe); if (err && set_str) append_filter_err(pe, *filterp); + create_filter_finish(pe); return err; } diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 2152d1e530cb..cd12ecb66eb9 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str, /* The filter is for the 'trigger' event, not the triggered event */ ret = create_event_filter(file->event_call, filter_str, false, &filter); - if (ret) - goto out; + /* + * If create_event_filter() fails, filter still needs to be freed. + * Which the calling code will do with data->filter. + */ assign: tmp = rcu_access_pointer(data->filter); |