aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/net/sch_generic.h14
-rw-r--r--net/sched/sch_atm.c12
-rw-r--r--net/sched/sch_cbq.c23
-rw-r--r--net/sched/sch_dsmark.c8
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_htb.c18
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/sched/sch_prio.c8
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_tbf.c3
12 files changed, 68 insertions, 34 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ee583f642a9..abbf5d52ec8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -64,6 +64,7 @@ struct wireless_dev;
#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
(TC use only - dev_queue_xmit
returns this as NET_XMIT_SUCCESS) */
+#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c5bb1306505..f15b045a85e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -343,6 +343,18 @@ static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
return qdisc_skb_cb(skb)->pkt_len;
}
+#ifdef CONFIG_NET_CLS_ACT
+/* additional qdisc xmit flags */
+enum net_xmit_qdisc_t {
+ __NET_XMIT_STOLEN = 0x00010000,
+};
+
+#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
+
+#else
+#define net_xmit_drop_count(e) (1)
+#endif
+
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
#ifdef CONFIG_NET_SCHED
@@ -355,7 +367,7 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
{
qdisc_skb_cb(skb)->pkt_len = skb->len;
- return qdisc_enqueue(skb, sch);
+ return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
}
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 6b517b9dac5..27dd773481b 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
kfree_skb(skb);
- return NET_XMIT_SUCCESS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
kfree_skb(skb);
goto drop;
@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, flow->q);
if (ret != 0) {
drop: __maybe_unused
- sch->qstats.drops++;
- if (flow)
- flow->qstats.drops++;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ if (flow)
+ flow->qstats.drops++;
+ }
return ret;
}
sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
if (!ret) {
sch->q.qlen++;
sch->qstats.requeues++;
- } else {
+ } else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
p->link.qstats.drops++;
}
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 14954bf4a68..765ae565900 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
case TC_ACT_RECLASSIFY:
@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
- sch->qstats.drops++;
- cbq_mark_toplevel(q, cl);
- cl->qstats.drops++;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cbq_mark_toplevel(q, cl);
+ cl->qstats.drops++;
+ }
return ret;
}
@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
cbq_activate_class(cl);
return 0;
}
- sch->qstats.drops++;
- cl->qstats.drops++;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ }
return ret;
}
@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
q->rx_class = NULL;
if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
+ int ret;
cbq_mark_toplevel(q, cl);
q->rx_class = cl;
cl->q->__parent = sch;
- if (qdisc_enqueue(skb, cl->q) == 0) {
+ ret = qdisc_enqueue(skb, cl->q);
+ if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
sch->bstats.packets++;
sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
cbq_activate_class(cl);
return 0;
}
- sch->qstats.drops++;
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
return 0;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index a935676987e..7170275d9f9 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
kfree_skb(skb);
- return NET_XMIT_SUCCESS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
goto drop;
@@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = qdisc_enqueue(skb, p->q);
if (err != NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
+ if (net_xmit_drop_count(err))
+ sch->qstats.drops++;
return err;
}
@@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
err = p->q->ops->requeue(skb, p->q);
if (err != NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
+ if (net_xmit_drop_count(err))
+ sch->qstats.drops++;
return err;
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 0ae7d19dcba..5cf9ae71611 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1166,7 +1166,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) {
- cl->qstats.drops++;
- sch->qstats.drops++;
+ if (net_xmit_drop_count(err)) {
+ cl->qstats.drops++;
+ sch->qstats.drops++;
+ }
return err;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 75a40951c4f..538d79b489a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -221,7 +221,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
@@ -572,9 +572,11 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
kfree_skb(skb);
return ret;
#endif
- } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
+ } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ }
return NET_XMIT_DROP;
} else {
cl->bstats.packets +=
@@ -615,10 +617,12 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
kfree_skb(skb);
return ret;
#endif
- } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
+ } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
+ if (net_xmit_drop_count(ret)) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ }
return NET_XMIT_DROP;
} else
htb_activate(q, cl);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a5908570067..6cd6f2bc749 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++;
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
- } else
+ } else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
+ }
pr_debug("netem: enqueue ret %d\n", ret);
return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f849243eb09..adb1a52b77d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -45,7 +45,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
@@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
- sch->qstats.drops++;
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
return ret;
}
@@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
sch->qstats.requeues++;
return 0;
}
- sch->qstats.drops++;
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
return NET_XMIT_DROP;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3f2d1d7f3bb..5da05839e22 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
sch->q.qlen++;
- } else {
+ } else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
sch->qstats.drops++;
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8589da66656..3a456e1b829 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -178,7 +178,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
switch (result) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
- *qerr = NET_XMIT_SUCCESS;
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return 0;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b296672f763..7d3b7ff3bf0 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != 0) {
- sch->qstats.drops++;
+ if (net_xmit_drop_count(ret))
+ sch->qstats.drops++;
return ret;
}