aboutsummaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/sch_api.c11
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c42
-rw-r--r--net/sched/sch_tbf.c8
5 files changed, 40 insertions, 37 deletions
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 823463adbd2..189e3c5b3d0 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -231,14 +231,14 @@ override:
}
if (R_tab) {
police->rate_present = true;
- psched_ratecfg_precompute(&police->rate, R_tab->rate.rate);
+ psched_ratecfg_precompute(&police->rate, &R_tab->rate);
qdisc_put_rtab(R_tab);
} else {
police->rate_present = false;
}
if (P_tab) {
police->peak_present = true;
- psched_ratecfg_precompute(&police->peak, P_tab->rate.rate);
+ psched_ratecfg_precompute(&police->peak, &P_tab->rate);
qdisc_put_rtab(P_tab);
} else {
police->peak_present = false;
@@ -376,9 +376,9 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
};
if (police->rate_present)
- opt.rate.rate = psched_ratecfg_getrate(&police->rate);
+ psched_ratecfg_getrate(&opt.rate, &police->rate);
if (police->peak_present)
- opt.peakrate.rate = psched_ratecfg_getrate(&police->peak);
+ psched_ratecfg_getrate(&opt.peakrate, &police->peak);
if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
goto nla_put_failure;
if (police->tcfp_result &&
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2b935e7cfe7..281c1bded1f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -291,17 +291,18 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
{
struct qdisc_rate_table *rtab;
+ if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+ nla_len(tab) != TC_RTAB_SIZE)
+ return NULL;
+
for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
- if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
+ if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
+ !memcmp(&rtab->data, nla_data(tab), 1024)) {
rtab->refcnt++;
return rtab;
}
}
- if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
- nla_len(tab) != TC_RTAB_SIZE)
- return NULL;
-
rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
if (rtab) {
rtab->rate = *r;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index eac7e0ee23c..20224086cc2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -898,14 +898,16 @@ void dev_shutdown(struct net_device *dev)
WARN_ON(timer_pending(&dev->watchdog_timer));
}
-void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+ const struct tc_ratespec *conf)
{
u64 factor;
u64 mult;
int shift;
- r->rate_bps = (u64)rate << 3;
- r->shift = 0;
+ memset(r, 0, sizeof(*r));
+ r->overhead = conf->overhead;
+ r->rate_bps = (u64)conf->rate << 3;
r->mult = 1;
/*
* Calibrate mult, shift so that token counting is accurate
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 79b1876b6cd..adaedd79389 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -109,7 +109,7 @@ struct htb_class {
} un;
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_node pq_node; /* node for event queue */
- psched_time_t pq_key;
+ s64 pq_key;
int prio_activity; /* for which prios are we active */
enum htb_cmode cmode; /* current mode of the class */
@@ -121,10 +121,10 @@ struct htb_class {
/* token bucket parameters */
struct psched_ratecfg rate;
struct psched_ratecfg ceil;
- s64 buffer, cbuffer; /* token bucket depth/rate */
- psched_tdiff_t mbuffer; /* max wait time */
- s64 tokens, ctokens; /* current number of tokens */
- psched_time_t t_c; /* checkpoint time */
+ s64 buffer, cbuffer; /* token bucket depth/rate */
+ s64 mbuffer; /* max wait time */
+ s64 tokens, ctokens; /* current number of tokens */
+ s64 t_c; /* checkpoint time */
};
struct htb_sched {
@@ -141,15 +141,15 @@ struct htb_sched {
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* time of nearest event per level (row) */
- psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
+ s64 near_ev_cache[TC_HTB_MAXDEPTH];
int defcls; /* class where unclassified flows go to */
/* filters for qdisc itself */
struct tcf_proto *filter_list;
- int rate2quantum; /* quant = rate / rate2quantum */
- psched_time_t now; /* cached dequeue time */
+ int rate2quantum; /* quant = rate / rate2quantum */
+ s64 now; /* cached dequeue time */
struct qdisc_watchdog watchdog;
/* non shaped skbs; let them go directly thru */
@@ -664,8 +664,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
* next pending event (0 for no event in pq, q->now for too many events).
* Note: Applied are events whose have cl->pq_key <= q->now.
*/
-static psched_time_t htb_do_events(struct htb_sched *q, int level,
- unsigned long start)
+static s64 htb_do_events(struct htb_sched *q, int level,
+ unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
* 1 to simplify things when jiffy is going to be incremented
@@ -857,7 +857,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct sk_buff *skb;
struct htb_sched *q = qdisc_priv(sch);
int level;
- psched_time_t next_event;
+ s64 next_event;
unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
@@ -880,7 +880,7 @@ ok:
for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
/* common case optimization - skip event handler quickly */
int m;
- psched_time_t event;
+ s64 event;
if (q->now >= q->near_ev_cache[level]) {
event = htb_do_events(q, level, start_at);
@@ -1090,9 +1090,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
memset(&opt, 0, sizeof(opt));
- opt.rate.rate = psched_ratecfg_getrate(&cl->rate);
+ psched_ratecfg_getrate(&opt.rate, &cl->rate);
opt.buffer = PSCHED_NS2TICKS(cl->buffer);
- opt.ceil.rate = psched_ratecfg_getrate(&cl->ceil);
+ psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
opt.quantum = cl->quantum;
opt.prio = cl->prio;
@@ -1117,8 +1117,8 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (!cl->level && cl->un.leaf.q)
cl->qstats.qlen = cl->un.leaf.q->q.qlen;
- cl->xstats.tokens = cl->tokens;
- cl->xstats.ctokens = cl->ctokens;
+ cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
+ cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
@@ -1200,7 +1200,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
- parent->t_c = psched_get_time();
+ parent->t_c = ktime_to_ns(ktime_get());
parent->cmode = HTB_CAN_SEND;
}
@@ -1417,8 +1417,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* set class to be in HTB_CAN_SEND state */
cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
- cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC; /* 1min */
- cl->t_c = psched_get_time();
+ cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
+ cl->t_c = ktime_to_ns(ktime_get());
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
@@ -1459,8 +1459,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->prio = TC_HTB_NUMPRIO - 1;
}
- psched_ratecfg_precompute(&cl->rate, hopt->rate.rate);
- psched_ratecfg_precompute(&cl->ceil, hopt->ceil.rate);
+ psched_ratecfg_precompute(&cl->rate, &hopt->rate);
+ psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
cl->cbuffer = PSCHED_TICKS2NS(hopt->buffer);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index c8388f3c342..e478d316602 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -298,9 +298,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
q->tokens = q->buffer;
q->ptokens = q->mtu;
- psched_ratecfg_precompute(&q->rate, rtab->rate.rate);
+ psched_ratecfg_precompute(&q->rate, &rtab->rate);
if (ptab) {
- psched_ratecfg_precompute(&q->peak, ptab->rate.rate);
+ psched_ratecfg_precompute(&q->peak, &ptab->rate);
q->peak_present = true;
} else {
q->peak_present = false;
@@ -350,9 +350,9 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure;
opt.limit = q->limit;
- opt.rate.rate = psched_ratecfg_getrate(&q->rate);
+ psched_ratecfg_getrate(&opt.rate, &q->rate);
if (q->peak_present)
- opt.peakrate.rate = psched_ratecfg_getrate(&q->peak);
+ psched_ratecfg_getrate(&opt.peakrate, &q->peak);
else
memset(&opt.peakrate, 0, sizeof(opt.peakrate));
opt.mtu = PSCHED_NS2TICKS(q->mtu);