From 3015f3d2a3cd9614294025849d3ed89fd2f3a7f5 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Mon, 5 Nov 2012 20:29:24 +0000 Subject: pkt_sched: enable QFQ to support TSO/GSO If the max packet size for some class (configured through tc) is violated by the actual size of the packets of that class, then QFQ would not schedule classes correctly, and the data structures implementing the bucket lists may get corrupted. This problem occurs with TSO/GSO even if the max packet size is set to the MTU, and is, e.g., the cause of the failure reported in [1]. Two patches have been proposed to solve this problem in [2], one of them is a preliminary version of this patch. This patch addresses the above issues by: 1) setting QFQ parameters to proper values for supporting TSO/GSO (in particular, setting the maximum possible packet size to 64KB), 2) automatically increasing the max packet size for a class, lmax, when a packet with a larger size than the current value of lmax arrives. The drawback of the first point is that the maximum weight for a class is now limited to 4096, which is equal to 1/16 of the maximum weight sum. Finally, this patch also forcibly caps the timestamps of a class if they are too high to be stored in the bucket list. This capping, taken from QFQ+ [3], handles the unfrequent case described in the comment to the function slot_insert. [1] http://marc.info/?l=linux-netdev&m=134968777902077&w=2 [2] http://marc.info/?l=linux-netdev&m=135096573507936&w=2 [3] http://marc.info/?l=linux-netdev&m=134902691421670&w=2 Signed-off-by: Paolo Valente Tested-by: Cong Wang Acked-by: Stephen Hemminger Acked-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/sched/sch_qfq.c | 109 +++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 79 insertions(+), 30 deletions(-) (limited to 'net/sched/sch_qfq.c') diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index f0dd83cff90..9687fa1c227 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -84,18 +84,19 @@ * grp->index is the index of the group; and grp->slot_shift * is the shift for the corresponding (scaled) sigma_i. */ -#define QFQ_MAX_INDEX 19 -#define QFQ_MAX_WSHIFT 16 +#define QFQ_MAX_INDEX 24 +#define QFQ_MAX_WSHIFT 12 #define QFQ_MAX_WEIGHT (1<wsum += delta_w; } +static void qfq_update_reactivate_class(struct qfq_sched *q, + struct qfq_class *cl, + u32 inv_w, u32 lmax, int delta_w) +{ + bool need_reactivation = false; + int i = qfq_calc_index(inv_w, lmax); + + if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { + /* + * shift cl->F back, to not charge the + * class for the not-yet-served head + * packet + */ + cl->F = cl->S; + /* remove class from its slot in the old group */ + qfq_deactivate_class(q, cl); + need_reactivation = true; + } + + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); + + if (need_reactivation) /* activate in new group */ + qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); +} + + static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { @@ -238,7 +265,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct qfq_class *cl = (struct qfq_class *)*arg; struct nlattr *tb[TCA_QFQ_MAX + 1]; u32 weight, lmax, inv_w; - int i, err; + int err; int delta_w; if (tca[TCA_OPTIONS] == NULL) { @@ -270,16 +297,14 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tb[TCA_QFQ_LMAX]) { lmax = nla_get_u32(tb[TCA_QFQ_LMAX]); - if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) { + if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { pr_notice("qfq: invalid max length %u\n", lmax); return -EINVAL; } } else - lmax = 1UL << QFQ_MTU_SHIFT; + lmax = psched_mtu(qdisc_dev(sch)); if (cl != NULL) { - bool need_reactivation = false; - if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), @@ -291,24 +316,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (lmax == cl->lmax && inv_w == cl->inv_w) return 0; /* nothing to update */ - i = qfq_calc_index(inv_w, lmax); sch_tree_lock(sch); - if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { - /* - * shift cl->F back, to not charge the - * class for the not-yet-served head - * packet - */ - cl->F = cl->S; - /* remove class from its slot in the old group */ - qfq_deactivate_class(q, cl); - need_reactivation = true; - } - - qfq_update_class_params(q, cl, lmax, inv_w, delta_w); - - if (need_reactivation) /* activate in new group */ - qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); + qfq_update_reactivate_class(q, cl, inv_w, lmax, delta_w); sch_tree_unlock(sch); return 0; @@ -663,15 +672,48 @@ static void qfq_make_eligible(struct qfq_sched *q, u64 old_V) /* - * XXX we should make sure that slot becomes less than 32. - * This is guaranteed by the input values. - * roundedS is always cl->S rounded on grp->slot_shift bits. + * If the weight and lmax (max_pkt_size) of the classes do not change, + * then QFQ guarantees that the slot index is never higher than + * 2 + ((1<S) >> grp->slot_shift; - unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS; + unsigned int i; /* slot index in the bucket list */ + + if (unlikely(slot > QFQ_MAX_SLOTS - 2)) { + u64 deltaS = roundedS - grp->S - + ((u64)(QFQ_MAX_SLOTS - 2)<slot_shift); + cl->S -= deltaS; + cl->F -= deltaS; + slot = QFQ_MAX_SLOTS - 2; + } + + i = (grp->front + slot) % QFQ_MAX_SLOTS; hlist_add_head(&cl->next, &grp->slots[i]); __set_bit(slot, &grp->full_slots); @@ -892,6 +934,13 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) } pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); + if (unlikely(cl->lmax < qdisc_pkt_len(skb))) { + pr_debug("qfq: increasing maxpkt from %u to %u for class %u", + cl->lmax, qdisc_pkt_len(skb), cl->common.classid); + qfq_update_reactivate_class(q, cl, cl->inv_w, + qdisc_pkt_len(skb), 0); + } + err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); -- cgit v1.2.3