mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
synced 2025-09-20 15:25:12 +10:00
netem: use seeded PRNG for correlated loss events
Use prandom_u32_state() instead of get_random_u32() to generate the correlated loss events of netem. Signed-off-by: François Michel <francois.michel@uclouvain.be> Reviewed-by: Simon Horman <horms@kernel.org> Acked-by: Stephen Hemminger <stephen@networkplumber.org> Link: https://lore.kernel.org/r/20230815092348.1449179-4-francois.michel@uclouvain.be Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
9c87b2aecc
commit
3cad70bc74
@ -184,15 +184,16 @@ static void init_crandom(struct crndstate *state, unsigned long rho)
|
|||||||
* Next number depends on last value.
|
* Next number depends on last value.
|
||||||
* rho is scaled to avoid floating point.
|
* rho is scaled to avoid floating point.
|
||||||
*/
|
*/
|
||||||
static u32 get_crandom(struct crndstate *state)
|
static u32 get_crandom(struct crndstate *state, struct prng *p)
|
||||||
{
|
{
|
||||||
u64 value, rho;
|
u64 value, rho;
|
||||||
unsigned long answer;
|
unsigned long answer;
|
||||||
|
struct rnd_state *s = &p->prng_state;
|
||||||
|
|
||||||
if (!state || state->rho == 0) /* no correlation */
|
if (!state || state->rho == 0) /* no correlation */
|
||||||
return get_random_u32();
|
return prandom_u32_state(s);
|
||||||
|
|
||||||
value = get_random_u32();
|
value = prandom_u32_state(s);
|
||||||
rho = (u64)state->rho + 1;
|
rho = (u64)state->rho + 1;
|
||||||
answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
|
answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
|
||||||
state->last = answer;
|
state->last = answer;
|
||||||
@ -295,7 +296,7 @@ static bool loss_event(struct netem_sched_data *q)
|
|||||||
switch (q->loss_model) {
|
switch (q->loss_model) {
|
||||||
case CLG_RANDOM:
|
case CLG_RANDOM:
|
||||||
/* Random packet drop 0 => none, ~0 => all */
|
/* Random packet drop 0 => none, ~0 => all */
|
||||||
return q->loss && q->loss >= get_crandom(&q->loss_cor);
|
return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng);
|
||||||
|
|
||||||
case CLG_4_STATES:
|
case CLG_4_STATES:
|
||||||
/* 4state loss model algorithm (used also for GI model)
|
/* 4state loss model algorithm (used also for GI model)
|
||||||
@ -324,6 +325,7 @@ static bool loss_event(struct netem_sched_data *q)
|
|||||||
*/
|
*/
|
||||||
static s64 tabledist(s64 mu, s32 sigma,
|
static s64 tabledist(s64 mu, s32 sigma,
|
||||||
struct crndstate *state,
|
struct crndstate *state,
|
||||||
|
struct prng *prng,
|
||||||
const struct disttable *dist)
|
const struct disttable *dist)
|
||||||
{
|
{
|
||||||
s64 x;
|
s64 x;
|
||||||
@ -333,7 +335,7 @@ static s64 tabledist(s64 mu, s32 sigma,
|
|||||||
if (sigma == 0)
|
if (sigma == 0)
|
||||||
return mu;
|
return mu;
|
||||||
|
|
||||||
rnd = get_crandom(state);
|
rnd = get_crandom(state, prng);
|
||||||
|
|
||||||
/* default uniform distribution */
|
/* default uniform distribution */
|
||||||
if (dist == NULL)
|
if (dist == NULL)
|
||||||
@ -455,7 +457,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
skb->prev = NULL;
|
skb->prev = NULL;
|
||||||
|
|
||||||
/* Random duplication */
|
/* Random duplication */
|
||||||
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
|
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng))
|
||||||
++count;
|
++count;
|
||||||
|
|
||||||
/* Drop packet? */
|
/* Drop packet? */
|
||||||
@ -498,7 +500,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
* If packet is going to be hardware checksummed, then
|
* If packet is going to be hardware checksummed, then
|
||||||
* do it now in software before we mangle it.
|
* do it now in software before we mangle it.
|
||||||
*/
|
*/
|
||||||
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
|
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) {
|
||||||
if (skb_is_gso(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
skb = netem_segment(skb, sch, to_free);
|
skb = netem_segment(skb, sch, to_free);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
@ -536,12 +538,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
cb = netem_skb_cb(skb);
|
cb = netem_skb_cb(skb);
|
||||||
if (q->gap == 0 || /* not doing reordering */
|
if (q->gap == 0 || /* not doing reordering */
|
||||||
q->counter < q->gap - 1 || /* inside last reordering gap */
|
q->counter < q->gap - 1 || /* inside last reordering gap */
|
||||||
q->reorder < get_crandom(&q->reorder_cor)) {
|
q->reorder < get_crandom(&q->reorder_cor, &q->prng)) {
|
||||||
u64 now;
|
u64 now;
|
||||||
s64 delay;
|
s64 delay;
|
||||||
|
|
||||||
delay = tabledist(q->latency, q->jitter,
|
delay = tabledist(q->latency, q->jitter,
|
||||||
&q->delay_cor, q->delay_dist);
|
&q->delay_cor, &q->prng, q->delay_dist);
|
||||||
|
|
||||||
now = ktime_get_ns();
|
now = ktime_get_ns();
|
||||||
|
|
||||||
@ -645,7 +647,7 @@ static void get_slot_next(struct netem_sched_data *q, u64 now)
|
|||||||
else
|
else
|
||||||
next_delay = tabledist(q->slot_config.dist_delay,
|
next_delay = tabledist(q->slot_config.dist_delay,
|
||||||
(s32)(q->slot_config.dist_jitter),
|
(s32)(q->slot_config.dist_jitter),
|
||||||
NULL, q->slot_dist);
|
NULL, &q->prng, q->slot_dist);
|
||||||
|
|
||||||
q->slot.slot_next = now + next_delay;
|
q->slot.slot_next = now + next_delay;
|
||||||
q->slot.packets_left = q->slot_config.max_packets;
|
q->slot.packets_left = q->slot_config.max_packets;
|
||||||
|
Loading…
Reference in New Issue
Block a user