mqprio: Correct stats in mqprio_dump_class_stats().
Introduction of lockless subqueues broke the class statistics.
Before the change stats were accumulated in `bstats' and `qstats'
on the stack which was then copied to struct gnet_dump.
After the change the `bstats' and `qstats' are initialized to 0
and never updated, yet still fed to gnet_dump. The code updates
the global qdisc->cpu_bstats and qdisc->cpu_qstats instead,
clobbering them. Most likely a copy-paste error from the code in
mqprio_dump().
__gnet_stats_copy_basic() and __gnet_stats_copy_queue() accumulate
the values for per-CPU case but for global stats they overwrite
the value, so only stats from the last loop iteration / tc end up
in sch->[bq]stats.
Use the on-stack [bq]stats variables again and add the stats manually
in the global case.
Fixes: ce679e8df7
("net: sched: add support for TCQ_F_NOLOCK subqueues to sch_mqprio")
Cc: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
https://lore.kernel.org/all/20211007175000.2334713-2-bigeasy@linutronix.de/
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
bccf56c4cb
commit
1413269086
1 changed files with 19 additions and 13 deletions
|
@ -529,22 +529,28 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
|
||||||
for (i = tc.offset; i < tc.offset + tc.count; i++) {
|
for (i = tc.offset; i < tc.offset + tc.count; i++) {
|
||||||
struct netdev_queue *q = netdev_get_tx_queue(dev, i);
|
struct netdev_queue *q = netdev_get_tx_queue(dev, i);
|
||||||
struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
|
struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
|
||||||
struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
|
|
||||||
struct gnet_stats_queue __percpu *cpu_qstats = NULL;
|
|
||||||
|
|
||||||
spin_lock_bh(qdisc_lock(qdisc));
|
spin_lock_bh(qdisc_lock(qdisc));
|
||||||
if (qdisc_is_percpu_stats(qdisc)) {
|
|
||||||
cpu_bstats = qdisc->cpu_bstats;
|
|
||||||
cpu_qstats = qdisc->cpu_qstats;
|
|
||||||
}
|
|
||||||
|
|
||||||
qlen = qdisc_qlen_sum(qdisc);
|
if (qdisc_is_percpu_stats(qdisc)) {
|
||||||
__gnet_stats_copy_basic(NULL, &sch->bstats,
|
qlen = qdisc_qlen_sum(qdisc);
|
||||||
cpu_bstats, &qdisc->bstats);
|
|
||||||
__gnet_stats_copy_queue(&sch->qstats,
|
__gnet_stats_copy_basic(NULL, &bstats,
|
||||||
cpu_qstats,
|
qdisc->cpu_bstats,
|
||||||
&qdisc->qstats,
|
&qdisc->bstats);
|
||||||
qlen);
|
__gnet_stats_copy_queue(&qstats,
|
||||||
|
qdisc->cpu_qstats,
|
||||||
|
&qdisc->qstats,
|
||||||
|
qlen);
|
||||||
|
} else {
|
||||||
|
qlen += qdisc->q.qlen;
|
||||||
|
bstats.bytes += qdisc->bstats.bytes;
|
||||||
|
bstats.packets += qdisc->bstats.packets;
|
||||||
|
qstats.backlog += qdisc->qstats.backlog;
|
||||||
|
qstats.drops += qdisc->qstats.drops;
|
||||||
|
qstats.requeues += qdisc->qstats.requeues;
|
||||||
|
qstats.overlimits += qdisc->qstats.overlimits;
|
||||||
|
}
|
||||||
spin_unlock_bh(qdisc_lock(qdisc));
|
spin_unlock_bh(qdisc_lock(qdisc));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue