aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPriyanka Jain <Priyanka.Jain@freescale.com>2012-05-17 09:35:11 +0530
committerSteven Rostedt <rostedt@rostedt.homelinux.com>2012-10-26 14:42:50 -0400
commitf93560c2eebf395e3e18b2200a2746b7df9bb29d (patch)
tree0f6d7e5b97b19d59a351061f3c0e205d91d4019c
parent338a46a8c428eb6e3f7d71c7d7fdaa8226bf39a8 (diff)
net,RT:REmove preemption disabling in netif_rx()
1)enqueue_to_backlog() (called from netif_rx) should be bind to a particluar CPU. This can be achieved by disabling migration. No need to disable preemption 2)Fixes crash "BUG: scheduling while atomic: ksoftirqd" in case of RT. If preemption is disabled, enqueue_to_backog() is called in atomic context. And if backlog exceeds its count, kfree_skb() is called. But in RT, kfree_skb() might gets scheduled out, so it expects non atomic context. 3)When CONFIG_PREEMPT_RT_FULL is not defined, migrate_enable(), migrate_disable() maps to preempt_enable() and preempt_disable(), so no change in functionality in case of non-RT. -Replace preempt_enable(), preempt_disable() with migrate_enable(), migrate_disable() respectively -Replace get_cpu(), put_cpu() with get_cpu_light(), put_cpu_light() respectively Signed-off-by: Priyanka Jain <Priyanka.Jain@freescale.com> Acked-by: Rajan Srivastava <Rajan.Srivastava@freescale.com> Cc: <rostedt@goodmis.orgn> Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com Cc: stable-rt@vger.kernel.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--net/core/dev.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 875cf67112f1..338608df7116 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2881,7 +2881,7 @@ int netif_rx(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
- preempt_disable();
+ migrate_disable();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -2891,13 +2891,13 @@ int netif_rx(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- preempt_enable();
+ migrate_enable();
}
#else
{
unsigned int qtail;
- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
- put_cpu();
+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
+ put_cpu_light();
}
#endif
return ret;