aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2012-05-21 10:12:39 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-05-21 10:12:39 -0400
commitb3f87b98aa3dc22cc58f970140113b270015cddb (patch)
treec4aec5996567ad96310f61e9244e799f41bf2625 /net
parent041245c88a29273788e8eff1353bc6e1f56c61df (diff)
parent1afeaf5c29aa07db25760d2fbed5c08a3aec3498 (diff)
Merge branch 'bugfixes' into nfs-for-next
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/bluetooth/hci_core.c27
-rw-r--r--net/bluetooth/hci_event.c3
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_netfilter.c8
-rw-r--r--net/core/dev.c36
-rw-r--r--net/core/drop_monitor.c88
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/ieee802154/6lowpan.c40
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/udp_diag.c9
-rw-r--r--net/l2tp/l2tp_ip.c3
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c3
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/transport.c17
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c7
-rw-r--r--net/sunrpc/clnt.c16
-rw-r--r--net/sunrpc/rpc_pipe.c8
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/xprt.c7
40 files changed, 307 insertions, 186 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9988d4abb37..9757c193c86 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
}
- skb_set_dev(skb, vlan_dev_priv(dev)->real_dev);
+ skb->dev = vlan_dev_priv(dev)->real_dev;
len = skb->len;
if (netpoll_tx_running(dev))
return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 92a857e3786..edfd61addce 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1215,40 +1215,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
return NULL;
}
-static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
+static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
u8 key_type, u8 old_key_type)
{
/* Legacy key */
if (key_type < 0x03)
- return 1;
+ return true;
/* Debug keys are insecure so don't store them persistently */
if (key_type == HCI_LK_DEBUG_COMBINATION)
- return 0;
+ return false;
/* Changed combination key and there's no previous one */
if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
- return 0;
+ return false;
/* Security mode 3 case */
if (!conn)
- return 1;
+ return true;
/* Neither local nor remote side had no-bonding as requirement */
if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
- return 1;
+ return true;
/* Local side had dedicated bonding as requirement */
if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
- return 1;
+ return true;
/* Remote side had dedicated bonding as requirement */
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
- return 1;
+ return true;
/* If none of the above criteria match, then don't store the key
* persistently */
- return 0;
+ return false;
}
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
@@ -1285,7 +1285,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
{
struct link_key *key, *old_key;
- u8 old_key_type, persistent;
+ u8 old_key_type;
+ bool persistent;
old_key = hci_find_link_key(hdev, bdaddr);
if (old_key) {
@@ -1328,10 +1329,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
mgmt_new_link_key(hdev, key, persistent);
- if (!persistent) {
- list_del(&key->list);
- kfree(key);
- }
+ if (conn)
+ conn->flush_key = !persistent;
return 0;
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b37531094c4..6c065254afc 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1901,6 +1901,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
}
if (ev->status == 0) {
+ if (conn->type == ACL_LINK && conn->flush_key)
+ hci_remove_link_key(hdev, &conn->dst);
hci_proto_disconn_cfm(conn, ev->reason);
hci_conn_del(conn);
}
@@ -2311,6 +2313,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
case HCI_OP_USER_PASSKEY_NEG_REPLY:
hci_cc_user_passkey_neg_reply(hdev, skb);
+ break;
case HCI_OP_LE_SET_SCAN_PARAM:
hci_cc_le_set_scan_param(hdev, skb);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4ef275c6967..4bb03b11112 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2884,7 +2884,7 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
return 0;
}
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent)
+int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent)
{
struct mgmt_ev_new_link_key ev;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 61f65344e71..a2098e3de50 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
kfree_skb(skb);
} else {
skb_push(skb, ETH_HLEN);
+ br_drop_fake_rtable(skb);
dev_queue_xmit(skb);
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index dec4f381713..d7f49b63ab0 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -156,7 +156,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
- rt->dst.flags = DST_NOXFRM | DST_NOPEER;
+ rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
rt->dst.ops = &fake_dst_ops;
}
@@ -694,11 +694,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- struct rtable *rt = skb_rtable(skb);
-
- if (rt && rt == bridge_parent_rtable(in))
- skb_dst_drop(skb);
-
+ br_drop_fake_rtable(skb);
return NF_ACCEPT;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 9bb8f87c4cd..99e1d759f41 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1617,10 +1617,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
return NET_RX_DROP;
}
skb->skb_iif = 0;
- skb_set_dev(skb, dev);
+ skb->dev = dev;
+ skb_dst_drop(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
+ skb->mark = 0;
+ secpath_reset(skb);
+ nf_reset(skb);
return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1869,36 +1873,6 @@ void netif_device_attach(struct net_device *dev)
}
EXPORT_SYMBOL(netif_device_attach);
-/**
- * skb_dev_set -- assign a new device to a buffer
- * @skb: buffer for the new device
- * @dev: network device
- *
- * If an skb is owned by a device already, we have to reset
- * all data private to the namespace a device belongs to
- * before assigning it a new device.
- */
-#ifdef CONFIG_NET_NS
-void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
-{
- skb_dst_drop(skb);
- if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
- secpath_reset(skb);
- nf_reset(skb);
- skb_init_secmark(skb);
- skb->mark = 0;
- skb->priority = 0;
- skb->nf_trace = 0;
- skb->ipvs_property = 0;
-#ifdef CONFIG_NET_SCHED
- skb->tc_index = 0;
-#endif
- }
- skb->dev = dev;
-}
-EXPORT_SYMBOL(skb_set_dev);
-#endif /* CONFIG_NET_NS */
-
static void skb_warn_bad_offload(const struct sk_buff *skb)
{
static const netdev_features_t null_features = 0;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 5c3c81a609e..a7cad741df0 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused);
* netlink alerts
*/
static int trace_state = TRACE_OFF;
-static DEFINE_SPINLOCK(trace_state_lock);
+static DEFINE_MUTEX(trace_state_mutex);
struct per_cpu_dm_data {
struct work_struct dm_alert_work;
- struct sk_buff *skb;
+ struct sk_buff __rcu *skb;
atomic_t dm_hit_count;
struct timer_list send_timer;
+ int cpu;
};
struct dm_hw_stat_delta {
@@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
size_t al;
struct net_dm_alert_msg *msg;
struct nlattr *nla;
+ struct sk_buff *skb;
+ struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
al = sizeof(struct net_dm_alert_msg);
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
al += sizeof(struct nlattr);
- data->skb = genlmsg_new(al, GFP_KERNEL);
- genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family,
- 0, NET_DM_CMD_ALERT);
- nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg));
- msg = nla_data(nla);
- memset(msg, 0, al);
- atomic_set(&data->dm_hit_count, dm_hit_limit);
+ skb = genlmsg_new(al, GFP_KERNEL);
+
+ if (skb) {
+ genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+ 0, NET_DM_CMD_ALERT);
+ nla = nla_reserve(skb, NLA_UNSPEC,
+ sizeof(struct net_dm_alert_msg));
+ msg = nla_data(nla);
+ memset(msg, 0, al);
+ } else
+ schedule_work_on(data->cpu, &data->dm_alert_work);
+
+ /*
+ * Don't need to lock this, since we are guaranteed to only
+ * run this on a single cpu at a time.
+ * Note also that we only update data->skb if the old and new skb
+ * pointers don't match. This ensures that we don't continually call
+ * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
+ */
+ if (skb != oskb) {
+ rcu_assign_pointer(data->skb, skb);
+
+ synchronize_rcu();
+
+ atomic_set(&data->dm_hit_count, dm_hit_limit);
+ }
+
}
static void send_dm_alert(struct work_struct *unused)
{
struct sk_buff *skb;
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+ WARN_ON_ONCE(data->cpu != smp_processor_id());
/*
* Grab the skb we're about to send
*/
- skb = data->skb;
+ skb = rcu_dereference_protected(data->skb, 1);
/*
* Replace it with a new one
@@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused)
/*
* Ship it!
*/
- genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ if (skb)
+ genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+ put_cpu_var(dm_cpu_data);
}
/*
@@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused)
*/
static void sched_send_work(unsigned long unused)
{
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+ schedule_work_on(smp_processor_id(), &data->dm_alert_work);
- schedule_work(&data->dm_alert_work);
+ put_cpu_var(dm_cpu_data);
}
static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -134,8 +163,15 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
struct nlmsghdr *nlh;
struct nlattr *nla;
int i;
- struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data);
+ struct sk_buff *dskb;
+ struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+
+
+ rcu_read_lock();
+ dskb = rcu_dereference(data->skb);
+ if (!dskb)
+ goto out;
if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
/*
@@ -144,7 +180,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
goto out;
}
- nlh = (struct nlmsghdr *)data->skb->data;
+ nlh = (struct nlmsghdr *)dskb->data;
nla = genlmsg_data(nlmsg_data(nlh));
msg = nla_data(nla);
for (i = 0; i < msg->entries; i++) {
@@ -158,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
/*
* We need to create a new entry
*/
- __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point));
+ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
msg->points[msg->entries].count = 1;
@@ -170,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
}
out:
+ rcu_read_unlock();
+ put_cpu_var(dm_cpu_data);
return;
}
@@ -214,7 +252,7 @@ static int set_all_monitor_traces(int state)
struct dm_hw_stat_delta *new_stat = NULL;
struct dm_hw_stat_delta *temp;
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
if (state == trace_state) {
rc = -EAGAIN;
@@ -253,7 +291,7 @@ static int set_all_monitor_traces(int state)
rc = -EINPROGRESS;
out_unlock:
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
return rc;
}
@@ -296,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block,
new_stat->dev = dev;
new_stat->last_rx = jiffies;
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
list_add_rcu(&new_stat->list, &hw_stats_list);
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
break;
case NETDEV_UNREGISTER:
- spin_lock(&trace_state_lock);
+ mutex_lock(&trace_state_mutex);
list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
if (new_stat->dev == dev) {
new_stat->dev = NULL;
@@ -312,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
}
}
}
- spin_unlock(&trace_state_lock);
+ mutex_unlock(&trace_state_mutex);
break;
}
out:
@@ -368,13 +406,15 @@ static int __init init_net_drop_monitor(void)
for_each_present_cpu(cpu) {
data = &per_cpu(dm_cpu_data, cpu);
- reset_per_cpu_data(data);
+ data->cpu = cpu;
INIT_WORK(&data->dm_alert_work, send_dm_alert);
init_timer(&data->send_timer);
data->send_timer.data = cpu;
data->send_timer.function = sched_send_work;
+ reset_per_cpu_data(data);
}
+
goto out;
out_unreg:
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d8ce93cd50..77a59980b57 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1931,7 +1931,7 @@ static int pktgen_device_event(struct notifier_block *unused,
{
struct net_device *dev = ptr;
- if (!net_eq(dev_net(dev), &init_net))
+ if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting)
return NOTIFY_DONE;
/* It is OK that we do not hold the group lock right now,
@@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void)
{
struct pktgen_thread *t;
struct list_head *q, *n;
+ struct list_head list;
/* Stop all interfaces & threads */
pktgen_exiting = true;
- list_for_each_safe(q, n, &pktgen_threads) {
+ mutex_lock(&pktgen_thread_lock);
+ list_splice(&list, &pktgen_threads);
+ mutex_unlock(&pktgen_thread_lock);
+
+ list_for_each_safe(q, n, &list) {
t = list_entry(q, struct pktgen_thread, th_list);
+ list_del(&t->th_list);
kthread_stop(t->tsk);
kfree(t);
}
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 36851588536..840821b90bc 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -1044,6 +1044,24 @@ static void lowpan_dev_free(struct net_device *dev)
free_netdev(dev);
}
+static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
+}
+
+static u16 lowpan_get_pan_id(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
+}
+
+static u16 lowpan_get_short_addr(const struct net_device *dev)
+{
+ struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+ return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
+}
+
static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
};
@@ -1053,6 +1071,12 @@ static const struct net_device_ops lowpan_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
+static struct ieee802154_mlme_ops lowpan_mlme = {
+ .get_pan_id = lowpan_get_pan_id,
+ .get_phy = lowpan_get_phy,
+ .get_short_addr = lowpan_get_short_addr,
+};
+
static void lowpan_setup(struct net_device *dev)
{
pr_debug("(%s)\n", __func__);
@@ -1070,6 +1094,7 @@ static void lowpan_setup(struct net_device *dev)
dev->netdev_ops = &lowpan_netdev_ops;
dev->header_ops = &lowpan_header_ops;
+ dev->ml_priv = &lowpan_mlme;
dev->destructor = lowpan_dev_free;
}
@@ -1143,6 +1168,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
list_add_tail(&entry->list, &lowpan_devices);
mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+ spin_lock_init(&flist_lock);
+
register_netdevice(dev);
return 0;
@@ -1152,11 +1179,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
{
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
struct net_device *real_dev = lowpan_dev->real_dev;
- struct lowpan_dev_record *entry;
- struct lowpan_dev_record *tmp;
+ struct lowpan_dev_record *entry, *tmp;
+ struct lowpan_fragment *frame, *tframe;
ASSERT_RTNL();
+ spin_lock(&flist_lock);
+ list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
+ del_timer(&frame->timer);
+ list_del(&frame->list);
+ dev_kfree_skb(frame->skb);
+ kfree(frame);
+ }
+ spin_unlock(&flist_lock);
+
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
if (entry->ldev == dev) {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index bce36f1a37b..30b88d7b4bd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1370,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
continue;
+ if (fi->fib_dead)
+ continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
fib_alias_accessed(fa);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8d25a1c557e..8f8db724bfa 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto rtattr_failure;
if (icsk == NULL) {
- r->idiag_rqueue = r->idiag_wqueue = 0;
+ handler->idiag_get_info(sk, r, NULL);
goto out;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 8bb6adeb62c..1272a88c2a6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3243,7 +3243,7 @@ void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long limit;
- int max_share, cnt;
+ int max_rshare, max_wshare, cnt;
unsigned int i;
unsigned long jiffy = jiffies;
@@ -3303,15 +3303,16 @@ void __init tcp_init(void)
tcp_init_mem(&init_net);
/* Set per-socket limits to no more than 1/128 the pressure threshold */
limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
- max_share = min(4UL*1024*1024, limit);
+ max_wshare = min(4UL*1024*1024, limit);
+ max_rshare = min(6UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_wmem[1] = 16*1024;
- sysctl_tcp_wmem[2] = max(64*1024, max_share);
+ sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_rmem[1] = 87380;
- sysctl_tcp_rmem[2] = max(87380, max_share);
+ sysctl_tcp_rmem[2] = max(87380, max_rshare);
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3ff36406537..257b61789ee 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
EXPORT_SYMBOL(sysctl_tcp_ecn);
int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
-int sysctl_tcp_adv_win_scale __read_mostly = 2;
+int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
int sysctl_tcp_stdurg __read_mostly;
@@ -495,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return;
- tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
+ tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
new_measure:
tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
@@ -2868,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk)
/* Do not moderate cwnd if it's already undone in cwr or recovery. */
if (tp->undo_marker) {
- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
- else /* PRR */
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
+ /* PRR algorithm. */
tp->snd_cwnd = tp->snd_ssthresh;
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
}
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a949f19deb..a7f86a3cd50 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
return udp_dump_one(&udp_table, in_skb, nlh, req);
}
+static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+ void *info)
+{
+ r->idiag_rqueue = sk_rmem_alloc_get(sk);
+ r->idiag_wqueue = sk_wmem_alloc_get(sk);
+}
+
static const struct inet_diag_handler udp_diag_handler = {
.dump = udp_diag_dump,
.dump_one = udp_diag_dump_one,
+ .idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDP,
};
@@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *
static const struct inet_diag_handler udplite_diag_handler = {
.dump = udplite_diag_dump,
.dump_one = udplite_diag_dump_one,
+ .idiag_get_info = udp_diag_get_info,
.idiag_type = IPPROTO_UDPLITE,
};
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 585d93ecee2..6274f0be82b 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -442,8 +442,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
daddr = lip->l2tp_addr.s_addr;
} else {
+ rc = -EDESTADDRREQ;
if (sk->sk_state != TCP_ESTABLISHED)
- return -EDESTADDRREQ;
+ goto out;
daddr = inet->inet_daddr;
connected = 1;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d9798a307f2..db8fae51714 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1210,7 +1210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
-void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
/* IBSS code */
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 401c01f0731..c20051b7ffc 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -486,6 +486,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
/* free all potentially still buffered bcast frames */
local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
skb_queue_purge(&sdata->u.ap.ps_bc_buf);
+ } else if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ ieee80211_mgd_stop(sdata);
}
if (going_down)
@@ -644,8 +646,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_rmc_free(sdata);
- else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- ieee80211_mgd_teardown(sdata);
flushed = sta_info_flush(local, sdata);
WARN_ON(flushed);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f76da5b3f5c..20c680bfc3a 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3497,7 +3497,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata)
+void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 782a60198df..e76facc69e9 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1158,7 +1158,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->sta = rcu_dereference(sdata->u.vlan.sta);
if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
return TX_DROP;
- } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+ tx->sdata->control_port_protocol == tx->skb->protocol) {
tx->sta = sta_info_get_bss(sdata, hdr->addr1);
}
if (!tx->sta)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2555816e778..00bdb1d9d69 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1924,6 +1924,7 @@ protocol_fail:
control_fail:
ip_vs_estimator_net_cleanup(net);
estimator_fail:
+ net->ipvs = NULL;
return -ENOMEM;
}
@@ -1936,6 +1937,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
ip_vs_control_net_cleanup(net);
ip_vs_estimator_net_cleanup(net);
IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
+ net->ipvs = NULL;
}
static void __net_exit __ip_vs_dev_cleanup(struct net *net)
@@ -1993,10 +1995,18 @@ static int __init ip_vs_init(void)
goto cleanup_dev;
}
+ ret = ip_vs_register_nl_ioctl();
+ if (ret < 0) {
+ pr_err("can't register netlink/ioctl.\n");
+ goto cleanup_hooks;
+ }
+
pr_info("ipvs loaded.\n");
return ret;
+cleanup_hooks:
+ nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
cleanup_dev:
unregister_pernet_device(&ipvs_core_dev_ops);
cleanup_sub:
@@ -2012,6 +2022,7 @@ exit:
static void __exit ip_vs_cleanup(void)
{
+ ip_vs_unregister_nl_ioctl();
nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
unregister_pernet_device(&ipvs_core_dev_ops);
unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index b3afe189af6..f5589987fc8 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3680,7 +3680,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
return 0;
}
-void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
+void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3692,7 +3692,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
#else
int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
#endif
@@ -3750,21 +3750,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
free_percpu(ipvs->tot_stats.cpustats);
}
-int __init ip_vs_control_init(void)
+int __init ip_vs_register_nl_ioctl(void)
{
- int idx;
int ret;
- EnterFunction(2);
-
- /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
- for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
- INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
- INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
- }
-
- smp_wmb(); /* Do we really need it now ? */
-
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
@@ -3776,28 +3765,47 @@ int __init ip_vs_control_init(void)
pr_err("cannot register Generic Netlink interface.\n");
goto err_genl;
}
-
- ret = register_netdevice_notifier(&ip_vs_dst_notifier);
- if (ret < 0)
- goto err_notf;
-
- LeaveFunction(2);
return 0;
-err_notf:
- ip_vs_genl_unregister();
err_genl:
nf_unregister_sockopt(&ip_vs_sockopts);
err_sock:
return ret;
}
+void ip_vs_unregister_nl_ioctl(void)
+{
+ ip_vs_genl_unregister();
+ nf_unregister_sockopt(&ip_vs_sockopts);
+}
+
+int __init ip_vs_control_init(void)
+{
+ int idx;
+ int ret;
+
+ EnterFunction(2);
+
+ /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
+ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+ INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
+ INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
+ }
+
+ smp_wmb(); /* Do we really need it now ? */
+
+ ret = register_netdevice_notifier(&ip_vs_dst_notifier);
+ if (ret < 0)
+ return ret;
+
+ LeaveFunction(2);
+ return 0;
+}
+
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
unregister_netdevice_notifier(&ip_vs_dst_notifier);
- ip_vs_genl_unregister();
- nf_unregister_sockopt(&ip_vs_sockopts);
LeaveFunction(2);
}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 538d74ee4f6..e39f693dd3e 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
struct ip_vs_app *app;
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
if (!app)
return -ENOMEM;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 0f16283fd05..caa43704e55 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
+
if (!net_eq(net, &init_net)) {
ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index eec797f8cce..548bf37aa29 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
+ if (!ipvs)
+ return -ENOENT;
+
if (!net_eq(net, &init_net)) {
ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
sizeof(vs_vars_table),
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index f843a883325..ed835e67a07 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
return 0;
}
-#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
- defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
- defined(CONFIG_IP_VS_PROTO_ESP)
/*
* register an ipvs protocols netns related data
*/
@@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
ipvs->proto_data_table[hash] = pd;
atomic_set(&pd->appcnt, 0); /* Init app counter */
- if (pp->init_netns != NULL)
- pp->init_netns(net, pd);
+ if (pp->init_netns != NULL) {
+ int ret = pp->init_netns(net, pd);
+ if (ret) {
+ /* unlink an free proto data */
+ ipvs->proto_data_table[hash] = pd->next;
+ kfree(pd);
+ return ret;
+ }
+ }
return 0;
}
-#endif
/*
* unregister an ipvs protocol
@@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
*/
int __net_init ip_vs_protocol_net_init(struct net *net)
{
+ int i, ret;
+ static struct ip_vs_protocol *protos[] = {
#ifdef CONFIG_IP_VS_PROTO_TCP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+ &ip_vs_protocol_tcp,
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+ &ip_vs_protocol_udp,
#endif
#ifdef CONFIG_IP_VS_PROTO_SCTP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+ &ip_vs_protocol_sctp,
#endif
#ifdef CONFIG_IP_VS_PROTO_AH
- register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+ &ip_vs_protocol_ah,
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
- register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+ &ip_vs_protocol_esp,
#endif
+ };
+
+ for (i = 0; i < ARRAY_SIZE(protos); i++) {
+ ret = register_ip_vs_proto_netns(net, protos[i]);
+ if (ret < 0)
+ goto cleanup;
+ }
return 0;
+
+cleanup:
+ ip_vs_protocol_net_cleanup(net);
+ return ret;
}
void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1fbf7a2816f..9f3fb751c49 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -1090,7 +1090,7 @@ out:
* timeouts is netns related now.
* ---------------------------------------------
*/
-static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->sctp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
sizeof(sctp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
+ return 0;
}
static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index ef8641f7af8..cd609cc6272 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
* timeouts is netns related now.
* ---------------------------------------------
*/
-static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->tcp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
sizeof(tcp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
pd->tcp_state_table = tcp_states;
+ return 0;
}
static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f4b7262896b..2fedb2dcb3d 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction,
cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
}
-static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
spin_lock_init(&ipvs->udp_app_lock);
pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
sizeof(udp_timeouts));
+ if (!pd->timeout_table)
+ return -ENOMEM;
+ return 0;
}
static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 59530e93fa5..3746d8b9a47 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -227,7 +227,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
- if (info->timeout) {
+ if (info->timeout[0]) {
typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
struct nf_conn_timeout *timeout_ext;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e44e631ea95..777716bc80f 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -421,6 +421,19 @@ static int validate_sample(const struct nlattr *attr,
return validate_actions(actions, key, depth + 1);
}
+static int validate_tp_port(const struct sw_flow_key *flow_key)
+{
+ if (flow_key->eth.type == htons(ETH_P_IP)) {
+ if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst)
+ return 0;
+ } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
+ if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int validate_set(const struct nlattr *a,
const struct sw_flow_key *flow_key)
{
@@ -462,18 +475,13 @@ static int validate_set(const struct nlattr *a,
if (flow_key->ip.proto != IPPROTO_TCP)
return -EINVAL;
- if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
- return -EINVAL;
-
- break;
+ return validate_tp_port(flow_key);
case OVS_KEY_ATTR_UDP:
if (flow_key->ip.proto != IPPROTO_UDP)
return -EINVAL;
- if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst)
- return -EINVAL;
- break;
+ return validate_tp_port(flow_key);
default:
return -EINVAL;
@@ -1641,10 +1649,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
OVS_VPORT_CMD_NEW);
if (IS_ERR(reply)) {
- err = PTR_ERR(reply);
netlink_set_err(init_net.genl_sock, 0,
- ovs_dp_vport_multicast_group.id, err);
- return 0;
+ ovs_dp_vport_multicast_group.id, PTR_ERR(reply));
+ goto exit_unlock;
}
genl_notify(reply, genl_info_net(info), info->snd_pid,
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1252c3081ef..2a11ec2383e 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
u8 tcp_flags = 0;
if (flow->key.eth.type == htons(ETH_P_IP) &&
- flow->key.ip.proto == IPPROTO_TCP) {
+ flow->key.ip.proto == IPPROTO_TCP &&
+ likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
u8 *tcp = (u8 *)tcp_hdr(skb);
tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5da548fa7ae..ebd22966f74 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
(skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_help(skb))) {
- sch->qstats.drops++;
- return NET_XMIT_DROP;
- }
+ skb_checksum_help(skb)))
+ return qdisc_drop(skb, sch);
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 817174eb5f4..8fc4dcd294a 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
*/
skb_set_owner_w(nskb, sk);
- /* The 'obsolete' field of dst is set to 2 when a dst is freed. */
- if (!dst || (dst->obsolete > 1)) {
- dst_release(dst);
+ if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
sctp_assoc_sync_pmtu(asoc);
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 3889330b7b0..b026ba0c699 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
-/* this is a complete rip-off from __sk_dst_check
- * the cookie is always 0 since this is how it's used in the
- * pmtu code
- */
-static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
-{
- struct dst_entry *dst = t->dst;
-
- if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
- dst_release(t->dst);
- t->dst = NULL;
- return NULL;
- }
-
- return dst;
-}
-
void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst;
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index ca8cad8251c..782bfe1b646 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -242,12 +242,13 @@ EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
{
struct gss_api_mech *pos = NULL;
- int i = 0;
+ int j, i = 0;
spin_lock(&registered_mechs_lock);
list_for_each_entry(pos, &registered_mechs, gm_list) {
- array_ptr[i] = pos->gm_pfs->pseudoflavor;
- i++;
+ for (j=0; j < pos->gm_pf_num; j++) {
+ array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
+ }
}
spin_unlock(&registered_mechs_lock);
return i;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d127bd74752..25302c80246 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -279,6 +279,14 @@ void rpc_clients_notifier_unregister(void)
return rpc_pipefs_notifier_unregister(&rpc_clients_block);
}
+static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename)
+{
+ clnt->cl_nodelen = strlen(nodename);
+ if (clnt->cl_nodelen > UNX_MAXNODENAME)
+ clnt->cl_nodelen = UNX_MAXNODENAME;
+ memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
+}
+
static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
{
const struct rpc_program *program = args->program;
@@ -359,10 +367,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
}
/* save the nodename */
- clnt->cl_nodelen = strlen(init_utsname()->nodename);
- if (clnt->cl_nodelen > UNX_MAXNODENAME)
- clnt->cl_nodelen = UNX_MAXNODENAME;
- memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen);
+ rpc_clnt_set_nodename(clnt, utsname()->nodename);
rpc_register_client(clnt);
return clnt;
@@ -521,6 +526,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
if (err != 0)
goto out_no_path;
+ rpc_clnt_set_nodename(new, utsname()->nodename);
if (new->cl_auth)
atomic_inc(&new->cl_auth->au_count);
atomic_inc(&clnt->cl_count);
@@ -1282,6 +1288,8 @@ call_reserveresult(struct rpc_task *task)
}
switch (status) {
+ case -ENOMEM:
+ rpc_delay(task, HZ >> 2);
case -EAGAIN: /* woken up; retry */
task->tk_action = call_reserve;
return;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index f955d7252e4..88945d0f759 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -120,7 +120,7 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
/**
* rpc_queue_upcall - queue an upcall message to userspace
- * @inode: inode of upcall pipe on which to queue given message
+ * @pipe: upcall pipe on which to queue given message
* @msg: message to queue
*
* Call with an @inode created by rpc_mkpipe() to queue an upcall.
@@ -819,9 +819,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
* @parent: dentry of directory to create new "pipe" in
* @name: name of pipe
* @private: private data to associate with the pipe, for the caller's use
- * @ops: operations defining the behavior of the pipe: upcall, downcall,
- * release_pipe, open_pipe, and destroy_msg.
- * @flags: rpc_pipe flags
+ * @pipe: &rpc_pipe containing input parameters
*
* Data is made available for userspace to read by calls to
* rpc_queue_upcall(). The actual reads will result in calls to
@@ -943,7 +941,7 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry,
/**
* rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
- * @clnt: rpc client
+ * @dentry: dentry for the pipe
*/
int rpc_remove_client_dir(struct dentry *dentry)
{
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 78ac39fd9fe..3c0653439f3 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -394,6 +394,7 @@ static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
/**
* rpcb_register - set or unset a port registration with the local rpcbind svc
+ * @net: target network namespace
* @prog: RPC program number to bind
* @vers: RPC version number to bind
* @prot: transport protocol to register
@@ -521,6 +522,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
/**
* rpcb_v4_register - set or unset a port registration with the local rpcbind
+ * @net: target network namespace
* @program: RPC program number of service to (un)register
* @version: RPC version number of service to (un)register
* @address: address family, IP address, and port to (un)register
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 0cbcd1ab49a..d7ccd7923ea 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -979,20 +979,21 @@ static void xprt_alloc_slot(struct rpc_task *task)
list_del(&req->rq_list);
goto out_init_req;
}
- req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
+ req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
if (!IS_ERR(req))
goto out_init_req;
switch (PTR_ERR(req)) {
case -ENOMEM:
- rpc_delay(task, HZ >> 2);
dprintk("RPC: dynamic allocation of request slot "
"failed! Retrying\n");
+ task->tk_status = -ENOMEM;
break;
case -EAGAIN:
rpc_sleep_on(&xprt->backlog, task, NULL);
dprintk("RPC: waiting for request slot\n");
+ default:
+ task->tk_status = -EAGAIN;
}
- task->tk_status = -EAGAIN;
return;
out_init_req:
task->tk_status = 0;