aboutsummaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/sch_cake.c8
-rw-r--r--net/sched/sch_netem.c22
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sched/sch_sfb.c2
6 files changed, 19 insertions, 19 deletions
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index abe1bcc5c797..62d682b96b88 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -25,7 +25,7 @@ static struct tc_action_ops act_gact_ops;
static int gact_net_rand(struct tcf_gact *gact)
{
smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
- if (prandom_u32() % gact->tcfg_pval)
+ if (prandom_u32_max(gact->tcfg_pval))
return gact->tcf_action;
return gact->tcfg_paction;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 5ba36f70e3a1..7a25477f5d99 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -168,7 +168,7 @@ static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
psample_group = rcu_dereference_bh(s->psample_group);
/* randomly sample packets according to rate */
- if (psample_group && (prandom_u32() % s->rate == 0)) {
+ if (psample_group && (prandom_u32_max(s->rate) == 0)) {
if (!skb_at_tc_ingress(skb)) {
md.in_ifindex = skb->skb_iif;
md.out_ifindex = skb->dev->ifindex;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 55c6879d2c7e..817cd0695b35 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -573,7 +573,7 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
/* Simple BLUE implementation. Lack of ECN is deliberate. */
if (vars->p_drop)
- drop |= (prandom_u32() < vars->p_drop);
+ drop |= (get_random_u32() < vars->p_drop);
/* Overload the drop_next field as an activity timeout */
if (!vars->count)
@@ -2092,11 +2092,11 @@ retry:
WARN_ON(host_load > CAKE_QUEUES);
- /* The shifted prandom_u32() is a way to apply dithering to
- * avoid accumulating roundoff errors
+ /* The get_random_u16() is a way to apply dithering to avoid
+ * accumulating roundoff errors
*/
flow->deficit += (b->flow_quantum * quantum_div[host_load] +
- (prandom_u32() >> 16)) >> 16;
+ get_random_u16()) >> 16;
list_move_tail(&flow->flowchain, &b->old_flows);
goto retry;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 18f4273a835b..fb00ac40ecb7 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -171,7 +171,7 @@ static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
static void init_crandom(struct crndstate *state, unsigned long rho)
{
state->rho = rho;
- state->last = prandom_u32();
+ state->last = get_random_u32();
}
/* get_crandom - correlated random number generator
@@ -184,9 +184,9 @@ static u32 get_crandom(struct crndstate *state)
unsigned long answer;
if (!state || state->rho == 0) /* no correlation */
- return prandom_u32();
+ return get_random_u32();
- value = prandom_u32();
+ value = get_random_u32();
rho = (u64)state->rho + 1;
answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
state->last = answer;
@@ -200,7 +200,7 @@ static u32 get_crandom(struct crndstate *state)
static bool loss_4state(struct netem_sched_data *q)
{
struct clgstate *clg = &q->clg;
- u32 rnd = prandom_u32();
+ u32 rnd = get_random_u32();
/*
* Makes a comparison between rnd and the transition
@@ -268,15 +268,15 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
switch (clg->state) {
case GOOD_STATE:
- if (prandom_u32() < clg->a1)
+ if (get_random_u32() < clg->a1)
clg->state = BAD_STATE;
- if (prandom_u32() < clg->a4)
+ if (get_random_u32() < clg->a4)
return true;
break;
case BAD_STATE:
- if (prandom_u32() < clg->a2)
+ if (get_random_u32() < clg->a2)
clg->state = GOOD_STATE;
- if (prandom_u32() > clg->a3)
+ if (get_random_u32() > clg->a3)
return true;
}
@@ -513,8 +513,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto finish_segs;
}
- skb->data[prandom_u32() % skb_headlen(skb)] ^=
- 1<<(prandom_u32() % 8);
+ skb->data[prandom_u32_max(skb_headlen(skb))] ^=
+ 1<<prandom_u32_max(8);
}
if (unlikely(sch->q.qlen >= sch->limit)) {
@@ -632,7 +632,7 @@ static void get_slot_next(struct netem_sched_data *q, u64 now)
if (!q->slot_dist)
next_delay = q->slot_config.min_delay +
- (prandom_u32() *
+ (get_random_u32() *
(q->slot_config.max_delay -
q->slot_config.min_delay) >> 32);
else
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 974038ba6c7b..265c238047a4 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -72,7 +72,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
if (vars->accu_prob >= (MAX_PROB / 2) * 17)
return true;
- prandom_bytes(&rnd, 8);
+ get_random_bytes(&rnd, 8);
if ((rnd >> BITS_PER_BYTE) < local_prob) {
vars->accu_prob = 0;
return true;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index e2389fa3cff8..0366a1a029a9 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -379,7 +379,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto enqueue;
}
- r = prandom_u32() & SFB_MAX_PROB;
+ r = get_random_u16() & SFB_MAX_PROB;
if (unlikely(r < p_min)) {
if (unlikely(p_min > SFB_MAX_PROB / 2)) {