diff options
author | Vlad Buslov | 2019-09-13 18:28:40 +0300 |
---|---|---|
committer | David S. Miller | 2019-09-16 09:18:03 +0200 |
commit | 4a5da47d5cb6aba3c26a5cc0dddfb2d577e851e9 (patch) | |
tree | c6f58d1ec493b1cd695eb7135af7255f2fc9c20d /net/psample | |
parent | 1158958a218bb55d1c358200d7f82808d11bf929 (diff) |
net: sched: take reference to psample group in flow_action infra
With recent patch set that removed rtnl lock dependency from cls hardware
offload API rtnl lock is only taken when reading action data and can be
released after action-specific data is parsed into intermediate
representation. However, sample action psample group is passed by pointer
without obtaining reference to it first, which makes it possible to
concurrently overwrite the action and deallocate object pointed by
psample_group pointer after rtnl lock is released but before driver
finished using the pointer.
To prevent such race condition, obtain reference to psample group while it
is used by flow_action infra. Extend psample API with function
psample_group_take() that increments psample group reference counter.
Extend struct tc_action_ops with new get_psample_group() API. Implement the
API for action sample using psample_group_take() and already existing
psample_group_put() as a destructor. Use it in tc_setup_flow_action() to
take reference to psample group pointed to by entry->sample.psample_group
and release it in tc_cleanup_flow_action().
Disable bh when taking psample_groups_lock. The lock is now taken while
holding action tcf_lock that is used by data path and requires bh to be
disabled, so doing the same for psample_groups_lock is necessary to
preserve SOFTIRQ-irq-safety.
Fixes: 918190f50eb6 ("net: sched: flower: don't take rtnl lock for cls hw offloads API")
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/psample')
-rw-r--r-- | net/psample/psample.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/net/psample/psample.c b/net/psample/psample.c index 66e4b61a350d..a6ceb0533b5b 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -73,7 +73,7 @@ static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg, int idx = 0; int err; - spin_lock(&psample_groups_lock); + spin_lock_bh(&psample_groups_lock); list_for_each_entry(group, &psample_groups_list, list) { if (!net_eq(group->net, sock_net(msg->sk))) continue; @@ -89,7 +89,7 @@ static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg, idx++; } - spin_unlock(&psample_groups_lock); + spin_unlock_bh(&psample_groups_lock); cb->args[0] = idx; return msg->len; } @@ -172,7 +172,7 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num) { struct psample_group *group; - spin_lock(&psample_groups_lock); + spin_lock_bh(&psample_groups_lock); group = psample_group_lookup(net, group_num); if (!group) { @@ -183,19 +183,27 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num) group->refcount++; out: - spin_unlock(&psample_groups_lock); + spin_unlock_bh(&psample_groups_lock); return group; } EXPORT_SYMBOL_GPL(psample_group_get); +void psample_group_take(struct psample_group *group) +{ + spin_lock_bh(&psample_groups_lock); + group->refcount++; + spin_unlock_bh(&psample_groups_lock); +} +EXPORT_SYMBOL_GPL(psample_group_take); + void psample_group_put(struct psample_group *group) { - spin_lock(&psample_groups_lock); + spin_lock_bh(&psample_groups_lock); if (--group->refcount == 0) psample_group_destroy(group); - spin_unlock(&psample_groups_lock); + spin_unlock_bh(&psample_groups_lock); } EXPORT_SYMBOL_GPL(psample_group_put); |