aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c103
-rw-r--r--net/8021q/vlan.h17
-rw-r--r--net/8021q/vlan_core.c37
-rw-r--r--net/8021q/vlan_dev.c57
-rw-r--r--net/8021q/vlan_netlink.c7
-rw-r--r--net/8021q/vlanproc.c13
-rw-r--r--net/9p/client.c5
-rw-r--r--net/9p/trans_fd.c4
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/appletalk/ddp.c132
-rw-r--r--net/atm/ioctl.c177
-rw-r--r--net/atm/pvc.c3
-rw-r--r--net/atm/svc.c9
-rw-r--r--net/ax25/af_ax25.c12
-rw-r--r--net/bluetooth/af_bluetooth.c5
-rw-r--r--net/bluetooth/bnep/sock.c3
-rw-r--r--net/bluetooth/cmtp/sock.c3
-rw-r--r--net/bluetooth/hci_conn.c1
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/bluetooth/hidp/sock.c3
-rw-r--r--net/bluetooth/l2cap.c38
-rw-r--r--net/bluetooth/rfcomm/sock.c3
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/bridge/br_fdb.c4
-rw-r--r--net/bridge/br_if.c10
-rw-r--r--net/bridge/br_ioctl.c4
-rw-r--r--net/bridge/br_sysfs_br.c6
-rw-r--r--net/bridge/netfilter/ebt_stp.c4
-rw-r--r--net/can/af_can.c18
-rw-r--r--net/can/bcm.c22
-rw-r--r--net/can/raw.c1
-rw-r--r--net/compat.c8
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/datagram.c10
-rw-r--r--net/core/dev.c428
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/fib_rules.c107
-rw-r--r--net/core/link_watch.c94
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net-sysfs.c9
-rw-r--r--net/core/net_namespace.c272
-rw-r--r--net/core/pktgen.c51
-rw-r--r--net/core/rtnetlink.c90
-rw-r--r--net/core/skb_dma_map.c65
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c17
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dcb/dcbnl.c6
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/dccp/minisocks.c2
-rw-r--r--net/decnet/af_decnet.c11
-rw-r--r--net/decnet/dn_dev.c53
-rw-r--r--net/decnet/dn_fib.c10
-rw-r--r--net/decnet/dn_route.c10
-rw-r--r--net/decnet/dn_rules.c22
-rw-r--r--net/decnet/dn_table.c7
-rw-r--r--net/decnet/sysctl_net_decnet.c7
-rw-r--r--net/econet/af_econet.c5
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ieee802154/Makefile4
-rw-r--r--net/ieee802154/af_ieee802154.c4
-rw-r--r--net/ieee802154/ieee802154.h53
-rw-r--r--net/ieee802154/netlink.c613
-rw-r--r--net/ieee802154/nl-mac.c617
-rw-r--r--net/ieee802154/nl-phy.c344
-rw-r--r--net/ieee802154/nl_policy.c2
-rw-r--r--net/ieee802154/wpan-class.c75
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/devinet.c158
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c29
-rw-r--r--net/ipv4/fib_rules.c14
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/icmp.c9
-rw-r--r--net/ipv4/igmp.c50
-rw-r--r--net/ipv4/inet_connection_sock.c6
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/inet_lro.c36
-rw-r--r--net/ipv4/inet_timewait_sock.c45
-rw-r--r--net/ipv4/inetpeer.c5
-rw-r--r--net/ipv4/ip_fragment.c13
-rw-r--r--net/ipv4/ip_gre.c56
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipconfig.c13
-rw-r--r--net/ipv4/ipip.c58
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/netfilter.c8
-rw-r--r--net/ipv4/netfilter/ip_queue.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c34
-rw-r--r--net/ipv4/raw.c24
-rw-r--r--net/ipv4/route.c107
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv4/sysctl_net_ipv4.c12
-rw-r--r--net/ipv4/tcp.c293
-rw-r--r--net/ipv4/tcp_htcp.c10
-rw-r--r--net/ipv4/tcp_input.c85
-rw-r--r--net/ipv4/tcp_ipv4.c116
-rw-r--r--net/ipv4/tcp_lp.c4
-rw-r--r--net/ipv4/tcp_minisocks.c77
-rw-r--r--net/ipv4/tcp_output.c307
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/tcp_veno.c5
-rw-r--r--net/ipv4/tcp_yeah.c4
-rw-r--r--net/ipv4/udp.c345
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv6/addrconf.c236
-rw-r--r--net/ipv6/af_inet6.c22
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/anycast.c35
-rw-r--r--net/ipv6/datagram.c14
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/fib6_rules.c24
-rw-r--r--net/ipv6/ip6_flowlabel.c17
-rw-r--r--net/ipv6/ip6_tunnel.c38
-rw-r--r--net/ipv6/mcast.c51
-rw-r--r--net/ipv6/netfilter/ip6_queue.c2
-rw-r--r--net/ipv6/raw.c24
-rw-r--r--net/ipv6/reassembly.c17
-rw-r--r--net/ipv6/sit.c69
-rw-r--r--net/ipv6/syncookies.c5
-rw-r--r--net/ipv6/tcp_ipv6.c104
-rw-r--r--net/ipv6/udp.c223
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/ipx/af_ipx.c59
-rw-r--r--net/irda/af_irda.c338
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c1
-rw-r--r--net/irda/irlan/irlan_common.c1
-rw-r--r--net/irda/irlan/irlan_eth.c1
-rw-r--r--net/irda/irnet/irnet_irda.c5
-rw-r--r--net/irda/irnet/irnet_ppp.c1
-rw-r--r--net/iucv/af_iucv.c3
-rw-r--r--net/iucv/iucv.c16
-rw-r--r--net/key/af_key.c30
-rw-r--r--net/llc/af_llc.c7
-rw-r--r--net/mac80211/Kconfig13
-rw-r--r--net/mac80211/Makefile2
-rw-r--r--net/mac80211/agg-rx.c18
-rw-r--r--net/mac80211/agg-tx.c141
-rw-r--r--net/mac80211/cfg.c61
-rw-r--r--net/mac80211/debugfs.c75
-rw-r--r--net/mac80211/debugfs.h2
-rw-r--r--net/mac80211/debugfs_key.c44
-rw-r--r--net/mac80211/debugfs_netdev.c176
-rw-r--r--net/mac80211/debugfs_sta.c67
-rw-r--r--net/mac80211/driver-ops.h5
-rw-r--r--net/mac80211/driver-trace.h9
-rw-r--r--net/mac80211/ht.c12
-rw-r--r--net/mac80211/ibss.c19
-rw-r--r--net/mac80211/ieee80211_i.h186
-rw-r--r--net/mac80211/iface.c16
-rw-r--r--net/mac80211/key.h12
-rw-r--r--net/mac80211/main.c350
-rw-r--r--net/mac80211/mesh.c149
-rw-r--r--net/mac80211/mesh.h30
-rw-r--r--net/mac80211/mesh_hwmp.c415
-rw-r--r--net/mac80211/mesh_pathtbl.c21
-rw-r--r--net/mac80211/mesh_plink.c58
-rw-r--r--net/mac80211/mlme.c34
-rw-r--r--net/mac80211/rate.c19
-rw-r--r--net/mac80211/rate.h9
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c1
-rw-r--r--net/mac80211/rx.c539
-rw-r--r--net/mac80211/scan.c119
-rw-r--r--net/mac80211/spectmgmt.c2
-rw-r--r--net/mac80211/sta_info.c175
-rw-r--r--net/mac80211/sta_info.h51
-rw-r--r--net/mac80211/status.c337
-rw-r--r--net/mac80211/tkip.c4
-rw-r--r--net/mac80211/tx.c192
-rw-r--r--net/mac80211/util.c41
-rw-r--r--net/mac80211/wep.c8
-rw-r--r--net/mac80211/wpa.c25
-rw-r--r--net/netfilter/nf_conntrack_core.c9
-rw-r--r--net/netfilter/nf_conntrack_expect.c6
-rw-r--r--net/netfilter/nf_conntrack_ftp.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c33
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c22
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c64
-rw-r--r--net/netfilter/nf_log.c18
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/xt_connlimit.c10
-rw-r--r--net/netfilter/xt_limit.c2
-rw-r--r--net/netfilter/xt_osf.c2
-rw-r--r--net/netlabel/netlabel_unlabeled.c8
-rw-r--r--net/netlink/af_netlink.c9
-rw-r--r--net/netrom/af_netrom.c5
-rw-r--r--net/netrom/nr_route.c15
-rw-r--r--net/packet/af_packet.c45
-rw-r--r--net/phonet/af_phonet.c26
-rw-r--r--net/phonet/pep.c33
-rw-r--r--net/phonet/pn_dev.c140
-rw-r--r--net/phonet/pn_netlink.c12
-rw-r--r--net/phonet/socket.c83
-rw-r--r--net/rds/af_rds.c14
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/connection.c6
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_cm.c6
-rw-r--r--net/rds/ib_rdma.c9
-rw-r--r--net/rds/ib_recv.c47
-rw-r--r--net/rds/ib_send.c4
-rw-r--r--net/rds/iw.h2
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/iw_rdma.c9
-rw-r--r--net/rds/iw_recv.c47
-rw-r--r--net/rds/iw_send.c7
-rw-r--r--net/rds/message.c3
-rw-r--r--net/rds/rdma.c36
-rw-r--r--net/rds/rdma.h1
-rw-r--r--net/rds/recv.c11
-rw-r--r--net/rds/send.c27
-rw-r--r--net/rds/threads.c4
-rw-r--r--net/rfkill/core.c5
-rw-r--r--net/rose/af_rose.c5
-rw-r--r--net/rose/rose_route.c44
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/act_mirred.c107
-rw-r--r--net/sched/cls_api.c8
-rw-r--r--net/sched/cls_flow.c2
-rw-r--r--net/sched/cls_rsvp.h28
-rw-r--r--net/sched/em_meta.c13
-rw-r--r--net/sched/sch_api.c17
-rw-r--r--net/sched/sch_generic.c18
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_netem.c12
-rw-r--r--net/sched/sch_teql.c11
-rw-r--r--net/sctp/associola.c31
-rw-r--r--net/sctp/chunk.c15
-rw-r--r--net/sctp/ipv6.c21
-rw-r--r--net/sctp/output.c50
-rw-r--r--net/sctp/outqueue.c36
-rw-r--r--net/sctp/protocol.c11
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_sideeffect.c6
-rw-r--r--net/sctp/sm_statefuns.c41
-rw-r--r--net/sctp/socket.c379
-rw-r--r--net/sctp/sysctl.c13
-rw-r--r--net/sctp/transport.c53
-rw-r--r--net/socket.c608
-rw-r--r--net/sunrpc/addr.c18
-rw-r--r--net/sunrpc/auth.c6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c4
-rw-r--r--net/sunrpc/cache.c5
-rw-r--r--net/sunrpc/svc.c5
-rw-r--r--net/sunrpc/svc_xprt.c8
-rw-r--r--net/sunrpc/svcauth.c4
-rw-r--r--net/sunrpc/svcauth_unix.c4
-rw-r--r--net/sunrpc/svcsock.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c7
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtrdma/verbs.c4
-rw-r--r--net/tipc/cluster.c16
-rw-r--r--net/tipc/link.c12
-rw-r--r--net/tipc/socket.c20
-rw-r--r--net/tipc/subscr.c6
-rw-r--r--net/unix/af_unix.c13
-rw-r--r--net/wimax/op-msg.c2
-rw-r--r--net/wimax/op-rfkill.c18
-rw-r--r--net/wimax/stack.c11
-rw-r--r--net/wireless/Kconfig6
-rw-r--r--net/wireless/core.c21
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/debugfs.c15
-rw-r--r--net/wireless/debugfs.h3
-rw-r--r--net/wireless/ibss.c4
-rw-r--r--net/wireless/mlme.c63
-rw-r--r--net/wireless/nl80211.c280
-rw-r--r--net/wireless/reg.c13
-rw-r--r--net/wireless/scan.c50
-rw-r--r--net/wireless/sme.c24
-rw-r--r--net/wireless/util.c40
-rw-r--r--net/wireless/wext-compat.c55
-rw-r--r--net/wireless/wext-core.c5
-rw-r--r--net/x25/af_x25.c111
-rw-r--r--net/x25/x25_route.c4
-rw-r--r--net/x25/x25_subr.c6
-rw-r--r--net/xfrm/xfrm_algo.c35
-rw-r--r--net/xfrm/xfrm_state.c32
-rw-r--r--net/xfrm/xfrm_user.c147
285 files changed, 8719 insertions, 5234 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 511afe72af31..ec3769295dac 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -41,7 +41,7 @@
/* Global VLAN variables */
-int vlan_net_id;
+int vlan_net_id __read_mostly;
/* Our listing of VLAN group(s) */
static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
@@ -161,10 +161,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
grp->nr_vlans--;
- if (!grp->killall) {
- vlan_group_set_device(grp, vlan_id, NULL);
+ vlan_group_set_device(grp, vlan_id, NULL);
+ if (!grp->killall)
synchronize_net();
- }
+
unregister_netdevice_queue(dev, head);
/* If the group is now empty, kill off the group. */
@@ -184,34 +184,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
dev_put(real_dev);
}
-void unregister_vlan_dev_alls(struct vlan_group *grp)
-{
- LIST_HEAD(list);
- int i;
- struct net_device *vlandev;
- struct vlan_group save;
-
- memcpy(&save, grp, sizeof(save));
- memset(&grp->vlan_devices_arrays, 0, sizeof(grp->vlan_devices_arrays));
- grp->killall = 1;
-
- synchronize_net();
-
- /* Delete all VLANs for this dev. */
- for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
- vlandev = vlan_group_get_device(&save, i);
- if (!vlandev)
- continue;
-
- unregister_vlan_dev(vlandev, &list);
- if (grp->nr_vlans == 0)
- break;
- }
- unregister_netdevice_many(&list);
- for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
- kfree(save.vlan_devices_arrays[i]);
-}
-
static void vlan_transfer_operstate(const struct net_device *dev,
struct net_device *vlandev)
{
@@ -310,8 +282,11 @@ out_uninit_applicant:
if (ngrp)
vlan_gvrp_uninit_applicant(real_dev);
out_free_group:
- if (ngrp)
- vlan_group_free(ngrp);
+ if (ngrp) {
+ hlist_del_rcu(&ngrp->hlist);
+ /* Free the group, after all cpu's are done. */
+ call_rcu(&ngrp->rcu, vlan_rcu_free);
+ }
return err;
}
@@ -456,6 +431,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
struct vlan_group *grp;
int i, flgs;
struct net_device *vlandev;
+ struct vlan_dev_info *vlan;
+ LIST_HEAD(list);
if (is_vlan_dev(dev))
__vlan_device_event(dev, event);
@@ -531,7 +508,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if (!(flgs & IFF_UP))
continue;
- dev_change_flags(vlandev, flgs & ~IFF_UP);
+ vlan = vlan_dev_info(vlandev);
+ if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ dev_change_flags(vlandev, flgs & ~IFF_UP);
vlan_transfer_operstate(dev, vlandev);
}
break;
@@ -547,13 +526,30 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if (flgs & IFF_UP)
continue;
- dev_change_flags(vlandev, flgs | IFF_UP);
+ vlan = vlan_dev_info(vlandev);
+ if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ dev_change_flags(vlandev, flgs | IFF_UP);
vlan_transfer_operstate(dev, vlandev);
}
break;
case NETDEV_UNREGISTER:
- unregister_vlan_dev_alls(grp);
+ /* Delete all VLANs for this dev. */
+ grp->killall = 1;
+
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ /* unregistration of last vlan destroys group, abort
+ * afterwards */
+ if (grp->nr_vlans == 1)
+ i = VLAN_GROUP_ARRAY_LEN;
+
+ unregister_vlan_dev(vlandev, &list);
+ }
+ unregister_netdevice_many(&list);
break;
}
@@ -690,47 +686,26 @@ out:
static int vlan_init_net(struct net *net)
{
+ struct vlan_net *vn = net_generic(net, vlan_net_id);
int err;
- struct vlan_net *vn;
-
- err = -ENOMEM;
- vn = kzalloc(sizeof(struct vlan_net), GFP_KERNEL);
- if (vn == NULL)
- goto err_alloc;
-
- err = net_assign_generic(net, vlan_net_id, vn);
- if (err < 0)
- goto err_assign;
vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
err = vlan_proc_init(net);
- if (err < 0)
- goto err_proc;
-
- return 0;
-err_proc:
- /* nothing */
-err_assign:
- kfree(vn);
-err_alloc:
return err;
}
static void vlan_exit_net(struct net *net)
{
- struct vlan_net *vn;
-
- vn = net_generic(net, vlan_net_id);
- rtnl_kill_links(net, &vlan_link_ops);
vlan_proc_cleanup(net);
- kfree(vn);
}
static struct pernet_operations vlan_net_ops = {
.init = vlan_init_net,
.exit = vlan_exit_net,
+ .id = &vlan_net_id,
+ .size = sizeof(struct vlan_net),
};
static int __init vlan_proto_init(void)
@@ -740,7 +715,7 @@ static int __init vlan_proto_init(void)
pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
pr_info("All bugs added by %s\n", vlan_buggyright);
- err = register_pernet_gen_device(&vlan_net_id, &vlan_net_ops);
+ err = register_pernet_subsys(&vlan_net_ops);
if (err < 0)
goto err0;
@@ -765,7 +740,7 @@ err4:
err3:
unregister_netdevice_notifier(&vlan_notifier_block);
err2:
- unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
+ unregister_pernet_subsys(&vlan_net_ops);
err0:
return err;
}
@@ -785,7 +760,7 @@ static void __exit vlan_cleanup_module(void)
for (i = 0; i < VLAN_GRP_HASH_SIZE; i++)
BUG_ON(!hlist_empty(&vlan_group_hash[i]));
- unregister_pernet_gen_device(vlan_net_id, &vlan_net_ops);
+ unregister_pernet_subsys(&vlan_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
vlan_gvrp_uninit();
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 68f9290e6837..5685296017e9 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -16,6 +16,21 @@ struct vlan_priority_tci_mapping {
struct vlan_priority_tci_mapping *next;
};
+
+/**
+ * struct vlan_rx_stats - VLAN percpu rx stats
+ * @rx_packets: number of received packets
+ * @rx_bytes: number of received bytes
+ * @multicast: number of received multicast packets
+ * @rx_errors: number of errors
+ */
+struct vlan_rx_stats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long multicast;
+ unsigned long rx_errors;
+};
+
/**
* struct vlan_dev_info - VLAN private device data
* @nr_ingress_mappings: number of ingress priority mappings
@@ -29,6 +44,7 @@ struct vlan_priority_tci_mapping {
* @dent: proc dir entry
* @cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX
* @cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
+ * @vlan_rx_stats: ptr to percpu rx stats
*/
struct vlan_dev_info {
unsigned int nr_ingress_mappings;
@@ -45,6 +61,7 @@ struct vlan_dev_info {
struct proc_dir_entry *dent;
unsigned long cnt_inc_headroom_on_tx;
unsigned long cnt_encap_on_xmit;
+ struct vlan_rx_stats *vlan_rx_stats;
};
static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 7f7de1a04de6..e75a2f3b10af 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -14,7 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
if (skb_bond_should_drop(skb))
goto drop;
- skb->vlan_tci = vlan_tci;
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
if (!skb->dev)
@@ -31,7 +31,7 @@ EXPORT_SYMBOL(__vlan_hwaccel_rx);
int vlan_hwaccel_do_receive(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
- struct net_device_stats *stats;
+ struct vlan_rx_stats *rx_stats;
skb->dev = vlan_dev_info(dev)->real_dev;
netif_nit_deliver(skb);
@@ -40,15 +40,17 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
skb->vlan_tci = 0;
- stats = &dev->stats;
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
+ smp_processor_id());
+
+ rx_stats->rx_packets++;
+ rx_stats->rx_bytes += skb->len;
switch (skb->pkt_type) {
case PACKET_BROADCAST:
break;
case PACKET_MULTICAST:
- stats->multicast++;
+ rx_stats->multicast++;
break;
case PACKET_OTHERHOST:
/* Our lower layer thinks this is not local, let's make sure.
@@ -74,15 +76,16 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
}
EXPORT_SYMBOL(vlan_dev_vlan_id);
-static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
- unsigned int vlan_tci, struct sk_buff *skb)
+static gro_result_t
+vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb)
{
struct sk_buff *p;
if (skb_bond_should_drop(skb))
goto drop;
- skb->vlan_tci = vlan_tci;
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
if (!skb->dev)
@@ -101,11 +104,12 @@ drop:
return GRO_DROP;
}
-int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
- unsigned int vlan_tci, struct sk_buff *skb)
+gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci, struct sk_buff *skb)
{
if (netpoll_rx_on(skb))
- return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
+ return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
+ ? GRO_DROP : GRO_NORMAL;
skb_gro_reset_offset(skb);
@@ -113,17 +117,18 @@ int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
}
EXPORT_SYMBOL(vlan_gro_receive);
-int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
- unsigned int vlan_tci)
+gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
+ unsigned int vlan_tci)
{
struct sk_buff *skb = napi_frags_skb(napi);
if (!skb)
- return NET_RX_DROP;
+ return GRO_DROP;
if (netpoll_rx_on(skb)) {
skb->protocol = eth_type_trans(skb, skb->dev);
- return vlan_hwaccel_receive_skb(skb, grp, vlan_tci);
+ return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
+ ? GRO_DROP : GRO_NORMAL;
}
return napi_frags_finish(napi, skb,
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 790fd55ec318..b7889782047e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -140,7 +140,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
struct vlan_hdr *vhdr;
- struct net_device_stats *stats;
+ struct vlan_rx_stats *rx_stats;
u16 vlan_id;
u16 vlan_tci;
@@ -163,9 +163,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
goto err_unlock;
}
- stats = &skb->dev->stats;
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats,
+ smp_processor_id());
+ rx_stats->rx_packets++;
+ rx_stats->rx_bytes += skb->len;
skb_pull_rcsum(skb, VLAN_HLEN);
@@ -180,7 +181,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
break;
case PACKET_MULTICAST:
- stats->multicast++;
+ rx_stats->multicast++;
break;
case PACKET_OTHERHOST:
@@ -200,7 +201,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
skb = vlan_check_reorder_header(skb);
if (!skb) {
- stats->rx_errors++;
+ rx_stats->rx_errors++;
goto err_unlock;
}
@@ -332,7 +333,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
} else
txq->tx_dropped++;
- return NETDEV_TX_OK;
+ return ret;
}
static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
@@ -358,7 +359,7 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
} else
txq->tx_dropped++;
- return NETDEV_TX_OK;
+ return ret;
}
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
@@ -430,7 +431,8 @@ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask)
struct vlan_dev_info *vlan = vlan_dev_info(dev);
u32 old_flags = vlan->flags;
- if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP))
+ if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+ VLAN_FLAG_LOOSE_BINDING))
return -EINVAL;
vlan->flags = (old_flags & ~mask) | (flags & mask);
@@ -455,7 +457,8 @@ static int vlan_dev_open(struct net_device *dev)
struct net_device *real_dev = vlan->real_dev;
int err;
- if (!(real_dev->flags & IFF_UP))
+ if (!(real_dev->flags & IFF_UP) &&
+ !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
return -ENETDOWN;
if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
@@ -731,6 +734,11 @@ static int vlan_dev_init(struct net_device *dev)
subclass = 1;
vlan_dev_set_lockdep_class(dev, subclass);
+
+ vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
+ if (!vlan_dev_info(dev)->vlan_rx_stats)
+ return -ENOMEM;
+
return 0;
}
@@ -740,6 +748,8 @@ static void vlan_dev_uninit(struct net_device *dev)
struct vlan_dev_info *vlan = vlan_dev_info(dev);
int i;
+ free_percpu(vlan->vlan_rx_stats);
+ vlan->vlan_rx_stats = NULL;
for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
while ((pm = vlan->egress_priority_map[i]) != NULL) {
vlan->egress_priority_map[i] = pm->next;
@@ -775,6 +785,31 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
return dev_ethtool_get_flags(vlan->real_dev);
}
+static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev)
+{
+ struct net_device_stats *stats = &dev->stats;
+
+ dev_txq_stats_fold(dev, stats);
+
+ if (vlan_dev_info(dev)->vlan_rx_stats) {
+ struct vlan_rx_stats *p, rx = {0};
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
+ rx.rx_packets += p->rx_packets;
+ rx.rx_bytes += p->rx_bytes;
+ rx.rx_errors += p->rx_errors;
+ rx.multicast += p->multicast;
+ }
+ stats->rx_packets = rx.rx_packets;
+ stats->rx_bytes = rx.rx_bytes;
+ stats->rx_errors = rx.rx_errors;
+ stats->multicast = rx.multicast;
+ }
+ return stats;
+}
+
static const struct ethtool_ops vlan_ethtool_ops = {
.get_settings = vlan_ethtool_get_settings,
.get_drvinfo = vlan_ethtool_get_drvinfo,
@@ -797,6 +832,7 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
+ .ndo_get_stats = vlan_dev_get_stats,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -820,6 +856,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
+ .ndo_get_stats = vlan_dev_get_stats,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
.ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
.ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index a91504850195..ddc105734af7 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -60,7 +60,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
if (data[IFLA_VLAN_FLAGS]) {
flags = nla_data(data[IFLA_VLAN_FLAGS]);
if ((flags->flags & flags->mask) &
- ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP))
+ ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
+ VLAN_FLAG_LOOSE_BINDING))
return -EINVAL;
}
@@ -119,7 +120,7 @@ static int vlan_get_tx_queues(struct net *net,
return 0;
}
-static int vlan_newlink(struct net_device *dev,
+static int vlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct vlan_dev_info *vlan = vlan_dev_info(dev);
@@ -131,7 +132,7 @@ static int vlan_newlink(struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- real_dev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 6262c335f3c2..9ec1f057c03a 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -201,18 +201,17 @@ int vlan_proc_rem_dev(struct net_device *vlandev)
/* start read of /proc/net/vlan/config */
static void *vlan_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(dev_base_lock)
+ __acquires(rcu)
{
struct net_device *dev;
struct net *net = seq_file_net(seq);
loff_t i = 1;
- read_lock(&dev_base_lock);
-
+ rcu_read_lock();
if (*pos == 0)
return SEQ_START_TOKEN;
- for_each_netdev(net, dev) {
+ for_each_netdev_rcu(net, dev) {
if (!is_vlan_dev(dev))
continue;
@@ -234,7 +233,7 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (v == SEQ_START_TOKEN)
dev = net_device_entry(&net->dev_base_head);
- for_each_netdev_continue(net, dev) {
+ for_each_netdev_continue_rcu(net, dev) {
if (!is_vlan_dev(dev))
continue;
@@ -245,9 +244,9 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void vlan_seq_stop(struct seq_file *seq, void *v)
- __releases(dev_base_lock)
+ __releases(rcu)
{
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static int vlan_seq_show(struct seq_file *seq, void *v)
diff --git a/net/9p/client.c b/net/9p/client.c
index 5bf5f227dbe0..8af95b2dddd6 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -582,11 +582,9 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt)
memset(&fid->qid, 0, sizeof(struct p9_qid));
fid->mode = -1;
- fid->rdir_fpos = 0;
fid->uid = current_fsuid();
fid->clnt = clnt;
- fid->aux = NULL;
-
+ fid->rdir = NULL;
spin_lock_irqsave(&clnt->lock, flags);
list_add(&fid->flist, &clnt->fidlist);
spin_unlock_irqrestore(&clnt->lock, flags);
@@ -609,6 +607,7 @@ static void p9_fid_destroy(struct p9_fid *fid)
spin_lock_irqsave(&clnt->lock, flags);
list_del(&fid->flist);
spin_unlock_irqrestore(&clnt->lock, flags);
+ kfree(fid->rdir);
kfree(fid);
}
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 8d934dd7fd54..4dd873e3a1bb 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -633,8 +633,8 @@ static void p9_poll_mux(struct p9_conn *m)
if (n & POLLOUT) {
set_bit(Wpending, &m->wsched);
P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
- if ((m->wsize || !list_empty(&m->unsent_req_list))
- && !test_and_set_bit(Wworksched, &m->wsched)) {
+ if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
+ !test_and_set_bit(Wworksched, &m->wsched)) {
P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
queue_work(p9_mux_wq, &m->wq);
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index b2e07f0dd298..ea1e3daabefe 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -43,7 +43,6 @@
#include <net/9p/transport.h>
#include <linux/scatterlist.h>
#include <linux/virtio.h>
-#include <linux/virtio_ids.h>
#include <linux/virtio_9p.h>
#define VIRTQUEUE_NUM 128
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index abe38014b7fd..9fc4da56fb1d 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -56,6 +56,7 @@
#include <linux/if_arp.h>
#include <linux/smp_lock.h>
#include <linux/termios.h> /* For TIOCOUTQ/INQ */
+#include <linux/compat.h>
#include <net/datalink.h>
#include <net/psnap.h>
#include <net/sock.h>
@@ -922,13 +923,8 @@ static unsigned long atalk_sum_partial(const unsigned char *data,
{
/* This ought to be unwrapped neatly. I'll trust gcc for now */
while (len--) {
- sum += *data;
- sum <<= 1;
- if (sum & 0x10000) {
- sum++;
- sum &= 0xffff;
- }
- data++;
+ sum += *data++;
+ sum = rol16(sum, 1);
}
return sum;
}
@@ -1021,12 +1017,13 @@ static struct proto ddp_proto = {
* Create a socket. Initialise the socket, blank the addresses
* set the state.
*/
-static int atalk_create(struct net *net, struct socket *sock, int protocol)
+static int atalk_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
int rc = -ESOCKTNOSUPPORT;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
/*
@@ -1054,11 +1051,13 @@ static int atalk_release(struct socket *sock)
{
struct sock *sk = sock->sk;
+ lock_kernel();
if (sk) {
sock_orphan(sk);
sock->sk = NULL;
atalk_destroy_socket(sk);
}
+ unlock_kernel();
return 0;
}
@@ -1134,6 +1133,7 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_at *addr = (struct sockaddr_at *)uaddr;
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
+ int err;
if (!sock_flag(sk, SOCK_ZAPPED) ||
addr_len != sizeof(struct sockaddr_at))
@@ -1142,37 +1142,44 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr->sat_family != AF_APPLETALK)
return -EAFNOSUPPORT;
+ lock_kernel();
if (addr->sat_addr.s_net == htons(ATADDR_ANYNET)) {
struct atalk_addr *ap = atalk_find_primary();
+ err = -EADDRNOTAVAIL;
if (!ap)
- return -EADDRNOTAVAIL;
+ goto out;
at->src_net = addr->sat_addr.s_net = ap->s_net;
at->src_node = addr->sat_addr.s_node= ap->s_node;
} else {
+ err = -EADDRNOTAVAIL;
if (!atalk_find_interface(addr->sat_addr.s_net,
addr->sat_addr.s_node))
- return -EADDRNOTAVAIL;
+ goto out;
at->src_net = addr->sat_addr.s_net;
at->src_node = addr->sat_addr.s_node;
}
if (addr->sat_port == ATADDR_ANYPORT) {
- int n = atalk_pick_and_bind_port(sk, addr);
+ err = atalk_pick_and_bind_port(sk, addr);
- if (n < 0)
- return n;
+ if (err < 0)
+ goto out;
} else {
at->src_port = addr->sat_port;
+ err = -EADDRINUSE;
if (atalk_find_or_insert_socket(sk, addr))
- return -EADDRINUSE;
+ goto out;
}
sock_reset_flag(sk, SOCK_ZAPPED);
- return 0;
+ err = 0;
+out:
+ unlock_kernel();
+ return err;
}
/* Set the address we talk to */
@@ -1182,6 +1189,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
struct sockaddr_at *addr;
+ int err;
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
@@ -1206,12 +1214,15 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
#endif
}
+ lock_kernel();
+ err = -EBUSY;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
- return -EBUSY;
+ goto out;
+ err = -ENETUNREACH;
if (!atrtr_get_dev(&addr->sat_addr))
- return -ENETUNREACH;
+ goto out;
at->dest_port = addr->sat_port;
at->dest_net = addr->sat_addr.s_net;
@@ -1219,7 +1230,10 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
- return 0;
+ err = 0;
+out:
+ unlock_kernel();
+ return err;
}
/*
@@ -1232,17 +1246,21 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
struct sockaddr_at sat;
struct sock *sk = sock->sk;
struct atalk_sock *at = at_sk(sk);
+ int err;
+ lock_kernel();
+ err = -ENOBUFS;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
- return -ENOBUFS;
+ goto out;
*uaddr_len = sizeof(struct sockaddr_at);
memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
if (peer) {
+ err = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
- return -ENOTCONN;
+ goto out;
sat.sat_addr.s_net = at->dest_net;
sat.sat_addr.s_node = at->dest_node;
@@ -1253,9 +1271,23 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
sat.sat_port = at->src_port;
}
+ err = 0;
sat.sat_family = AF_APPLETALK;
memcpy(uaddr, &sat, sizeof(sat));
- return 0;
+
+out:
+ unlock_kernel();
+ return err;
+}
+
+static unsigned int atalk_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ int err;
+ lock_kernel();
+ err = datagram_poll(file, sock, wait);
+ unlock_kernel();
+ return err;
}
#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE)
@@ -1563,23 +1595,28 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
if (len > DDP_MAXSZ)
return -EMSGSIZE;
+ lock_kernel();
if (usat) {
+ err = -EBUSY;
if (sock_flag(sk, SOCK_ZAPPED))
if (atalk_autobind(sk) < 0)
- return -EBUSY;
+ goto out;
+ err = -EINVAL;
if (msg->msg_namelen < sizeof(*usat) ||
usat->sat_family != AF_APPLETALK)
- return -EINVAL;
+ goto out;
+ err = -EPERM;
/* netatalk didn't implement this check */
if (usat->sat_addr.s_node == ATADDR_BCAST &&
!sock_flag(sk, SOCK_BROADCAST)) {
- return -EPERM;
+ goto out;
}
} else {
+ err = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
- return -ENOTCONN;
+ goto out;
usat = &local_satalk;
usat->sat_family = AF_APPLETALK;
usat->sat_port = at->dest_port;
@@ -1603,8 +1640,9 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
rt = atrtr_find(&at_hint);
}
+ err = ENETUNREACH;
if (!rt)
- return -ENETUNREACH;
+ goto out;
dev = rt->dev;
@@ -1614,7 +1652,7 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
size += dev->hard_header_len;
skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
if (!skb)
- return err;
+ goto out;
skb->sk = sk;
skb_reserve(skb, ddp_dl->header_length);
@@ -1637,7 +1675,8 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (err) {
kfree_skb(skb);
- return -EFAULT;
+ err = -EFAULT;
+ goto out;
}
if (sk->sk_no_check == 1)
@@ -1676,7 +1715,8 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
rt = atrtr_find(&at_lo);
if (!rt) {
kfree_skb(skb);
- return -ENETUNREACH;
+ err = -ENETUNREACH;
+ goto out;
}
dev = rt->dev;
skb->dev = dev;
@@ -1696,7 +1736,9 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
}
SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len);
- return len;
+out:
+ unlock_kernel();
+ return err ? : len;
}
static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
@@ -1708,10 +1750,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
int copied = 0;
int offset = 0;
int err = 0;
- struct sk_buff *skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
+ struct sk_buff *skb;
+
+ lock_kernel();
+ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
- return err;
+ goto out;
/* FIXME: use skb->cb to be able to use shared skbs */
ddp = ddp_hdr(skb);
@@ -1739,6 +1784,9 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
}
skb_free_datagram(sk, skb); /* Free the datagram. */
+
+out:
+ unlock_kernel();
return err ? : copied;
}
@@ -1810,12 +1858,14 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static int atalk_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
/*
- * All Appletalk ioctls except SIOCATALKDIFADDR are standard. And
- * SIOCATALKDIFADDR is handled by upper layer as well, so there is
- * nothing to do. Eventually SIOCATALKDIFADDR should be moved
- * here so there is no generic SIOCPROTOPRIVATE translation in the
- * system.
+ * SIOCATALKDIFADDR is a SIOCPROTOPRIVATE ioctl number, so we
+ * cannot handle it in common code. The data we access if ifreq
+ * here is compatible, so we can simply call the native
+ * handler.
*/
+ if (cmd == SIOCATALKDIFADDR)
+ return atalk_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+
return -ENOIOCTLCMD;
}
#endif
@@ -1827,7 +1877,7 @@ static const struct net_proto_family atalk_family_ops = {
.owner = THIS_MODULE,
};
-static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = {
+static const struct proto_ops atalk_dgram_ops = {
.family = PF_APPLETALK,
.owner = THIS_MODULE,
.release = atalk_release,
@@ -1836,7 +1886,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = atalk_getname,
- .poll = datagram_poll,
+ .poll = atalk_poll,
.ioctl = atalk_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = atalk_compat_ioctl,
@@ -1851,8 +1901,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(atalk_dgram_ops) = {
.sendpage = sock_no_sendpage,
};
-SOCKOPS_WRAP(atalk_dgram, PF_APPLETALK);
-
static struct notifier_block ddp_notifier = {
.notifier_call = ddp_device_event,
};
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 4da8892ced5f..2ea40995dced 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -191,8 +191,181 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
-int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+/*
+ * FIXME:
+ * The compat_ioctl handling is duplicated, using both these conversion
+ * routines and the compat argument to the actual handlers. Both
+ * versions are somewhat incomplete and should be merged, e.g. by
+ * moving the ioctl number translation into the actual handlers and
+ * killing the conversion code.
+ *
+ * -arnd, November 2009
+ */
+#define ATM_GETLINKRATE32 _IOW('a', ATMIOC_ITF+1, struct compat_atmif_sioc)
+#define ATM_GETNAMES32 _IOW('a', ATMIOC_ITF+3, struct compat_atm_iobuf)
+#define ATM_GETTYPE32 _IOW('a', ATMIOC_ITF+4, struct compat_atmif_sioc)
+#define ATM_GETESI32 _IOW('a', ATMIOC_ITF+5, struct compat_atmif_sioc)
+#define ATM_GETADDR32 _IOW('a', ATMIOC_ITF+6, struct compat_atmif_sioc)
+#define ATM_RSTADDR32 _IOW('a', ATMIOC_ITF+7, struct compat_atmif_sioc)
+#define ATM_ADDADDR32 _IOW('a', ATMIOC_ITF+8, struct compat_atmif_sioc)
+#define ATM_DELADDR32 _IOW('a', ATMIOC_ITF+9, struct compat_atmif_sioc)
+#define ATM_GETCIRANGE32 _IOW('a', ATMIOC_ITF+10, struct compat_atmif_sioc)
+#define ATM_SETCIRANGE32 _IOW('a', ATMIOC_ITF+11, struct compat_atmif_sioc)
+#define ATM_SETESI32 _IOW('a', ATMIOC_ITF+12, struct compat_atmif_sioc)
+#define ATM_SETESIF32 _IOW('a', ATMIOC_ITF+13, struct compat_atmif_sioc)
+#define ATM_GETSTAT32 _IOW('a', ATMIOC_SARCOM+0, struct compat_atmif_sioc)
+#define ATM_GETSTATZ32 _IOW('a', ATMIOC_SARCOM+1, struct compat_atmif_sioc)
+#define ATM_GETLOOP32 _IOW('a', ATMIOC_SARCOM+2, struct compat_atmif_sioc)
+#define ATM_SETLOOP32 _IOW('a', ATMIOC_SARCOM+3, struct compat_atmif_sioc)
+#define ATM_QUERYLOOP32 _IOW('a', ATMIOC_SARCOM+4, struct compat_atmif_sioc)
+
+static struct {
+ unsigned int cmd32;
+ unsigned int cmd;
+} atm_ioctl_map[] = {
+ { ATM_GETLINKRATE32, ATM_GETLINKRATE },
+ { ATM_GETNAMES32, ATM_GETNAMES },
+ { ATM_GETTYPE32, ATM_GETTYPE },
+ { ATM_GETESI32, ATM_GETESI },
+ { ATM_GETADDR32, ATM_GETADDR },
+ { ATM_RSTADDR32, ATM_RSTADDR },
+ { ATM_ADDADDR32, ATM_ADDADDR },
+ { ATM_DELADDR32, ATM_DELADDR },
+ { ATM_GETCIRANGE32, ATM_GETCIRANGE },
+ { ATM_SETCIRANGE32, ATM_SETCIRANGE },
+ { ATM_SETESI32, ATM_SETESI },
+ { ATM_SETESIF32, ATM_SETESIF },
+ { ATM_GETSTAT32, ATM_GETSTAT },
+ { ATM_GETSTATZ32, ATM_GETSTATZ },
+ { ATM_GETLOOP32, ATM_GETLOOP },
+ { ATM_SETLOOP32, ATM_SETLOOP },
+ { ATM_QUERYLOOP32, ATM_QUERYLOOP },
+};
+
+#define NR_ATM_IOCTL ARRAY_SIZE(atm_ioctl_map)
+
+static int do_atm_iobuf(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct atm_iobuf __user *iobuf;
+ struct compat_atm_iobuf __user *iobuf32;
+ u32 data;
+ void __user *datap;
+ int len, err;
+
+ iobuf = compat_alloc_user_space(sizeof(*iobuf));
+ iobuf32 = compat_ptr(arg);
+
+ if (get_user(len, &iobuf32->length) ||
+ get_user(data, &iobuf32->buffer))
+ return -EFAULT;
+ datap = compat_ptr(data);
+ if (put_user(len, &iobuf->length) ||
+ put_user(datap, &iobuf->buffer))
+ return -EFAULT;
+
+ err = do_vcc_ioctl(sock, cmd, (unsigned long) iobuf, 0);
+
+ if (!err) {
+ if (copy_in_user(&iobuf32->length, &iobuf->length,
+ sizeof(int)))
+ err = -EFAULT;
+ }
+
+ return err;
+}
+
+static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct atmif_sioc __user *sioc;
+ struct compat_atmif_sioc __user *sioc32;
+ u32 data;
+ void __user *datap;
+ int err;
+
+ sioc = compat_alloc_user_space(sizeof(*sioc));
+ sioc32 = compat_ptr(arg);
+
+ if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int))
+ || get_user(data, &sioc32->arg))
+ return -EFAULT;
+ datap = compat_ptr(data);
+ if (put_user(datap, &sioc->arg))
+ return -EFAULT;
+
+ err = do_vcc_ioctl(sock, cmd, (unsigned long) sioc, 0);
+
+ if (!err) {
+ if (copy_in_user(&sioc32->length, &sioc->length,
+ sizeof(int)))
+ err = -EFAULT;
+ }
+ return err;
+}
+
+static int do_atm_ioctl(struct socket *sock, unsigned int cmd32,
+ unsigned long arg)
+{
+ int i;
+ unsigned int cmd = 0;
+
+ switch (cmd32) {
+ case SONET_GETSTAT:
+ case SONET_GETSTATZ:
+ case SONET_GETDIAG:
+ case SONET_SETDIAG:
+ case SONET_CLRDIAG:
+ case SONET_SETFRAMING:
+ case SONET_GETFRAMING:
+ case SONET_GETFRSENSE:
+ return do_atmif_sioc(sock, cmd32, arg);
+ }
+
+ for (i = 0; i < NR_ATM_IOCTL; i++) {
+ if (cmd32 == atm_ioctl_map[i].cmd32) {
+ cmd = atm_ioctl_map[i].cmd;
+ break;
+ }
+ }
+ if (i == NR_ATM_IOCTL)
+ return -EINVAL;
+
+ switch (cmd) {
+ case ATM_GETNAMES:
+ return do_atm_iobuf(sock, cmd, arg);
+
+ case ATM_GETLINKRATE:
+ case ATM_GETTYPE:
+ case ATM_GETESI:
+ case ATM_GETADDR:
+ case ATM_RSTADDR:
+ case ATM_ADDADDR:
+ case ATM_DELADDR:
+ case ATM_GETCIRANGE:
+ case ATM_SETCIRANGE:
+ case ATM_SETESI:
+ case ATM_SETESIF:
+ case ATM_GETSTAT:
+ case ATM_GETSTATZ:
+ case ATM_GETLOOP:
+ case ATM_SETLOOP:
+ case ATM_QUERYLOOP:
+ return do_atmif_sioc(sock, cmd, arg);
+ }
+
+ return -EINVAL;
+}
+
+int vcc_compat_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
- return do_vcc_ioctl(sock, cmd, arg, 1);
+ int ret;
+
+ ret = do_vcc_ioctl(sock, cmd, arg, 1);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+
+ return do_atm_ioctl(sock, cmd, arg);
}
#endif
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index a6e1fdbae87f..8d74e62b0d79 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -127,7 +127,8 @@ static const struct proto_ops pvc_proto_ops = {
};
-static int pvc_create(struct net *net, struct socket *sock,int protocol)
+static int pvc_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
if (net != &init_net)
return -EAFNOSUPPORT;
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 819354233318..66e1d9b3e5de 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -25,7 +25,7 @@
#include "signaling.h"
#include "addr.h"
-static int svc_create(struct net *net, struct socket *sock,int protocol);
+static int svc_create(struct net *net, struct socket *sock, int protocol, int kern);
/*
* Note: since all this is still nicely synchronized with the signaling demon,
@@ -330,7 +330,7 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
lock_sock(sk);
- error = svc_create(sock_net(sk), newsock,0);
+ error = svc_create(sock_net(sk), newsock, 0, 0);
if (error)
goto out;
@@ -650,11 +650,12 @@ static const struct proto_ops svc_proto_ops = {
};
-static int svc_create(struct net *net, struct socket *sock,int protocol)
+static int svc_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
int error;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
sock->ops = &svc_proto_ops;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index f05306f168fa..5588ba69c468 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -369,6 +369,9 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
return -EINVAL;
+ if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
+ return -EINVAL;
+
digi.ndigi = ax25_ctl.digi_count;
for (k = 0; k < digi.ndigi; k++)
digi.calls[k] = ax25_ctl.digi_addr[k];
@@ -418,14 +421,10 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
break;
case AX25_T3:
- if (ax25_ctl.arg < 0)
- goto einval_put;
ax25->t3 = ax25_ctl.arg * HZ;
break;
case AX25_IDLE:
- if (ax25_ctl.arg < 0)
- goto einval_put;
ax25->idle = ax25_ctl.arg * 60 * HZ;
break;
@@ -800,12 +799,13 @@ static struct proto ax25_proto = {
.obj_size = sizeof(struct sock),
};
-static int ax25_create(struct net *net, struct socket *sock, int protocol)
+static int ax25_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
ax25_cb *ax25;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
switch (sock->type) {
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 399e59c9c6cb..087cc51f5927 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -126,7 +126,8 @@ int bt_sock_unregister(int proto)
}
EXPORT_SYMBOL(bt_sock_unregister);
-static int bt_sock_create(struct net *net, struct socket *sock, int proto)
+static int bt_sock_create(struct net *net, struct socket *sock, int proto,
+ int kern)
{
int err;
@@ -144,7 +145,7 @@ static int bt_sock_create(struct net *net, struct socket *sock, int proto)
read_lock(&bt_proto_lock);
if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
- err = bt_proto[proto]->create(net, sock, proto);
+ err = bt_proto[proto]->create(net, sock, proto, kern);
bt_sock_reclassify_lock(sock, proto);
module_put(bt_proto[proto]->owner);
}
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 0a2c5460bb48..2ff6ac7b2ed4 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -195,7 +195,8 @@ static struct proto bnep_proto = {
.obj_size = sizeof(struct bt_sock)
};
-static int bnep_sock_create(struct net *net, struct socket *sock, int protocol)
+static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index de7c8040bc56..978cc3a718ad 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -190,7 +190,8 @@ static struct proto cmtp_proto = {
.obj_size = sizeof(struct bt_sock)
};
-static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol)
+static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a9750984f772..b7c4224f4e7d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -211,6 +211,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
conn->type = type;
conn->mode = HCI_CM_ACTIVE;
conn->state = BT_OPEN;
+ conn->auth_type = HCI_AT_GENERAL_BONDING;
conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index e7395f231989..1ca5c7ca9bd4 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -621,7 +621,8 @@ static struct proto hci_sk_proto = {
.obj_size = sizeof(struct hci_pinfo)
};
-static int hci_sock_create(struct net *net, struct socket *sock, int protocol)
+static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 4beb6a7a2953..9cfef68b9fec 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -241,7 +241,8 @@ static struct proto hidp_proto = {
.obj_size = sizeof(struct bt_sock)
};
-static int hidp_sock_create(struct net *net, struct socket *sock, int protocol)
+static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index d65101d92ee5..54992f782301 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -819,7 +819,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
return sk;
}
-static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
+static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
@@ -831,7 +832,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
- if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
return -EPERM;
sock->ops = &l2cap_sock_ops;
@@ -1361,8 +1362,8 @@ static int l2cap_ertm_send(struct sock *sk)
if (pi->conn_state & L2CAP_CONN_WAIT_F)
return 0;
- while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
- && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
+ while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
+ !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
tx_skb = skb_clone(skb, GFP_ATOMIC);
if (pi->remote_max_tx &&
@@ -1603,8 +1604,8 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
return -EOPNOTSUPP;
/* Check outgoing MTU */
- if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
- && len > pi->omtu)
+ if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
+ len > pi->omtu)
return -EINVAL;
lock_sock(sk);
@@ -2205,7 +2206,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
- struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
void *ptr = req->data;
BT_DBG("sk %p", sk);
@@ -2394,6 +2395,10 @@ done:
rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
pi->conf_state |= L2CAP_CONF_MODE_DONE;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+
break;
case L2CAP_MODE_STREAMING:
@@ -2401,6 +2406,10 @@ done:
pi->max_pdu_size = rfc.max_pdu_size;
pi->conf_state |= L2CAP_CONF_MODE_DONE;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+
break;
default:
@@ -2410,9 +2419,6 @@ done:
rfc.mode = pi->mode;
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
-
if (result == L2CAP_CONF_SUCCESS)
pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
}
@@ -2750,8 +2756,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
goto unlock;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
- || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
+ l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
sk->sk_state = BT_CONNECTED;
@@ -2839,8 +2845,8 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
- if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
- || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
+ l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
sk->sk_state = BT_CONNECTED;
@@ -3382,8 +3388,8 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
pi->expected_ack_seq = tx_seq;
l2cap_drop_acked_frames(sk);
- if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
- && (pi->unacked_frames > 0))
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
+ (pi->unacked_frames > 0))
__mod_retrans_timer();
l2cap_ertm_send(sk);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index d3bfc1b0afb1..4b5968dda673 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -323,7 +323,8 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int
return sk;
}
-static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol)
+static int rfcomm_sock_create(struct net *net, struct socket *sock,
+ int protocol, int kern)
{
struct sock *sk;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 694a65541b73..dd8f6ec57dce 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -430,7 +430,8 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
return sk;
}
-static int sco_sock_create(struct net *net, struct socket *sock, int protocol)
+static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 57bf05c353bc..3b8e038ab32c 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -60,8 +60,8 @@ static inline unsigned long hold_time(const struct net_bridge *br)
static inline int has_expired(const struct net_bridge *br,
const struct net_bridge_fdb_entry *fdb)
{
- return !fdb->is_static
- && time_before_eq(fdb->ageing_timer + hold_time(br), jiffies);
+ return !fdb->is_static &&
+ time_before_eq(fdb->ageing_timer + hold_time(br), jiffies);
}
static inline int br_mac_hash(const unsigned char *mac)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 2117e5ba24c8..a2cbe61f6e65 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -377,15 +377,23 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
struct net_bridge_port *p;
int err = 0;
- if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
+ /* Don't allow bridging non-ethernet like devices */
+ if ((dev->flags & IFF_LOOPBACK) ||
+ dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN)
return -EINVAL;
+ /* No bridging of bridges */
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
return -ELOOP;
+ /* Device is already being bridged */
if (dev->br_port != NULL)
return -EBUSY;
+ /* No bridging devices that dislike that (e.g. wireless) */
+ if (dev->priv_flags & IFF_DONT_BRIDGE)
+ return -EOPNOTSUPP;
+
p = new_nbp(br, dev);
if (IS_ERR(p))
return PTR_ERR(p);
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 6a6433daaf27..2af6e4a90262 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -81,6 +81,7 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
return num;
}
+/* called with RTNL */
static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
{
struct net_device *dev;
@@ -89,7 +90,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- dev = dev_get_by_index(dev_net(br->dev), ifindex);
+ dev = __dev_get_by_index(dev_net(br->dev), ifindex);
if (dev == NULL)
return -EINVAL;
@@ -98,7 +99,6 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
else
ret = br_del_if(br, dev);
- dev_put(dev);
return ret;
}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index ee4820aa1843..bee4f300d0c8 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -316,9 +316,9 @@ static ssize_t store_group_addr(struct device *d,
if (new_addr[5] & ~0xf)
return -EINVAL;
- if (new_addr[5] == 1 /* 802.3x Pause address */
- || new_addr[5] == 2 /* 802.3ad Slow protocols */
- || new_addr[5] == 3) /* 802.1X PAE address */
+ if (new_addr[5] == 1 || /* 802.3x Pause address */
+ new_addr[5] == 2 || /* 802.3ad Slow protocols */
+ new_addr[5] == 3) /* 802.1X PAE address */
return -EINVAL;
spin_lock_bh(&br->lock);
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 48527e621626..75e29a9cebda 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -135,8 +135,8 @@ ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (memcmp(sp, header, sizeof(header)))
return false;
- if (info->bitmask & EBT_STP_TYPE
- && FWINV(info->type != sp->type, EBT_STP_TYPE))
+ if (info->bitmask & EBT_STP_TYPE &&
+ FWINV(info->type != sp->type, EBT_STP_TYPE))
return false;
if (sp->type == BPDU_TYPE_CONFIG &&
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 3f2eb27e1ffb..51adc4c2b860 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -114,7 +114,8 @@ static void can_sock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
}
-static int can_create(struct net *net, struct socket *sock, int protocol)
+static int can_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct can_proto *cp;
@@ -125,7 +126,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol)
if (protocol < 0 || protocol >= CAN_NPROTO)
return -EINVAL;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
#ifdef CONFIG_MODULES
@@ -160,11 +161,6 @@ static int can_create(struct net *net, struct socket *sock, int protocol)
goto errout;
}
- if (cp->capability >= 0 && !capable(cp->capability)) {
- err = -EPERM;
- goto errout;
- }
-
sock->ops = cp->ops;
sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
@@ -379,8 +375,8 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
return &d->rx[RX_ALL];
/* extra filterlists for the subscription of a single non-RTR can_id */
- if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
- && !(*can_id & CAN_RTR_FLAG)) {
+ if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
+ !(*can_id & CAN_RTR_FLAG)) {
if (*can_id & CAN_EFF_FLAG) {
if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
@@ -529,8 +525,8 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
*/
hlist_for_each_entry_rcu(r, next, rl, list) {
- if (r->can_id == can_id && r->mask == mask
- && r->func == func && r->data == data)
+ if (r->can_id == can_id && r->mask == mask &&
+ r->func == func && r->data == data)
break;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 2f47039c79dd..e32af52238a2 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -132,23 +132,27 @@ static inline struct bcm_sock *bcm_sk(const struct sock *sk)
/*
* procfs functions
*/
-static char *bcm_proc_getifname(int ifindex)
+static char *bcm_proc_getifname(char *result, int ifindex)
{
struct net_device *dev;
if (!ifindex)
return "any";
- /* no usage counting */
- dev = __dev_get_by_index(&init_net, ifindex);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(&init_net, ifindex);
if (dev)
- return dev->name;
+ strcpy(result, dev->name);
+ else
+ strcpy(result, "???");
+ rcu_read_unlock();
- return "???";
+ return result;
}
static int bcm_proc_show(struct seq_file *m, void *v)
{
+ char ifname[IFNAMSIZ];
struct sock *sk = (struct sock *)m->private;
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
@@ -157,7 +161,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
seq_printf(m, " / sk %p", sk);
seq_printf(m, " / bo %p", bo);
seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
- seq_printf(m, " / bound %s", bcm_proc_getifname(bo->ifindex));
+ seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
seq_printf(m, " <<<\n");
list_for_each_entry(op, &bo->rx_ops, list) {
@@ -169,7 +173,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
continue;
seq_printf(m, "rx_op: %03X %-5s ",
- op->can_id, bcm_proc_getifname(op->ifindex));
+ op->can_id, bcm_proc_getifname(ifname, op->ifindex));
seq_printf(m, "[%d]%c ", op->nframes,
(op->flags & RX_CHECK_DLC)?'d':' ');
if (op->kt_ival1.tv64)
@@ -194,7 +198,8 @@ static int bcm_proc_show(struct seq_file *m, void *v)
list_for_each_entry(op, &bo->tx_ops, list) {
seq_printf(m, "tx_op: %03X %s [%d] ",
- op->can_id, bcm_proc_getifname(op->ifindex),
+ op->can_id,
+ bcm_proc_getifname(ifname, op->ifindex),
op->nframes);
if (op->kt_ival1.tv64)
@@ -1576,7 +1581,6 @@ static struct proto bcm_proto __read_mostly = {
static struct can_proto bcm_can_proto __read_mostly = {
.type = SOCK_DGRAM,
.protocol = CAN_BCM,
- .capability = -1,
.ops = &bcm_ops,
.prot = &bcm_proto,
};
diff --git a/net/can/raw.c b/net/can/raw.c
index 6e77db58b9e6..abca920440b5 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -742,7 +742,6 @@ static struct proto raw_proto __read_mostly = {
static struct can_proto raw_can_proto __read_mostly = {
.type = SOCK_RAW,
.protocol = CAN_RAW,
- .capability = -1,
.ops = &raw_ops,
.prot = &raw_proto,
};
diff --git a/net/compat.c b/net/compat.c
index 6a2f75fb3f45..e1a56ade803b 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -758,9 +758,13 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
{
int datagrams;
struct timespec ktspec;
- struct compat_timespec __user *utspec =
- (struct compat_timespec __user *)timeout;
+ struct compat_timespec __user *utspec;
+ if (timeout == NULL)
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT, NULL);
+
+ utspec = (struct compat_timespec __user *)timeout;
if (get_user(ktspec.tv_sec, &utspec->tv_sec) ||
get_user(ktspec.tv_nsec, &utspec->tv_nsec))
return -EFAULT;
diff --git a/net/core/Makefile b/net/core/Makefile
index 796f46eece5f..08791ac3e05a 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -6,7 +6,6 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
gen_stats.o gen_estimator.o net_namespace.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
-obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 4d57f5e12b05..95c2e0840d0d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -224,6 +224,15 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
consume_skb(skb);
sk_mem_reclaim_partial(sk);
}
+EXPORT_SYMBOL(skb_free_datagram);
+
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
+{
+ lock_sock(sk);
+ skb_free_datagram(sk, skb);
+ release_sock(sk);
+}
+EXPORT_SYMBOL(skb_free_datagram_locked);
/**
* skb_kill_datagram - Free a datagram skbuff forcibly
@@ -753,5 +762,4 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
EXPORT_SYMBOL(datagram_poll);
EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
EXPORT_SYMBOL(skb_copy_datagram_iovec);
-EXPORT_SYMBOL(skb_free_datagram);
EXPORT_SYMBOL(skb_recv_datagram);
diff --git a/net/core/dev.c b/net/core/dev.c
index 68a1bb68b5a8..0913a08a87d6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -79,6 +79,7 @@
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/hash.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/string.h>
@@ -104,6 +105,7 @@
#include <net/dst.h>
#include <net/pkt_sched.h>
#include <net/checksum.h>
+#include <net/xfrm.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/kmod.h>
@@ -175,7 +177,7 @@ static struct list_head ptype_all __read_mostly; /* Taps */
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
* semaphore.
*
- * Pure readers hold dev_base_lock for reading.
+ * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
*
* Writers must hold the rtnl semaphore while they loop through the
* dev_base_head list, and hold dev_base_lock for writing when they do the
@@ -196,7 +198,7 @@ EXPORT_SYMBOL(dev_base_lock);
static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
{
unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
- return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
+ return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
}
static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
@@ -212,8 +214,8 @@ static int list_netdevice(struct net_device *dev)
ASSERT_RTNL();
write_lock_bh(&dev_base_lock);
- list_add_tail(&dev->dev_list, &net->dev_base_head);
- hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
+ list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
+ hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
write_unlock_bh(&dev_base_lock);
@@ -229,8 +231,8 @@ static void unlist_netdevice(struct net_device *dev)
/* Unlink dev from the device chain */
write_lock_bh(&dev_base_lock);
- list_del(&dev->dev_list);
- hlist_del(&dev->name_hlist);
+ list_del_rcu(&dev->dev_list);
+ hlist_del_rcu(&dev->name_hlist);
hlist_del_rcu(&dev->index_hlist);
write_unlock_bh(&dev_base_lock);
}
@@ -587,18 +589,44 @@ __setup("netdev=", netdev_boot_setup);
struct net_device *__dev_get_by_name(struct net *net, const char *name)
{
struct hlist_node *p;
+ struct net_device *dev;
+ struct hlist_head *head = dev_name_hash(net, name);
- hlist_for_each(p, dev_name_hash(net, name)) {
- struct net_device *dev
- = hlist_entry(p, struct net_device, name_hlist);
+ hlist_for_each_entry(dev, p, head, name_hlist)
if (!strncmp(dev->name, name, IFNAMSIZ))
return dev;
- }
+
return NULL;
}
EXPORT_SYMBOL(__dev_get_by_name);
/**
+ * dev_get_by_name_rcu - find a device by its name
+ * @net: the applicable net namespace
+ * @name: name to find
+ *
+ * Find an interface by name.
+ * If the name is found a pointer to the device is returned.
+ * If the name is not found then %NULL is returned.
+ * The reference counters are not incremented so the caller must be
+ * careful with locks. The caller must hold RCU lock.
+ */
+
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
+{
+ struct hlist_node *p;
+ struct net_device *dev;
+ struct hlist_head *head = dev_name_hash(net, name);
+
+ hlist_for_each_entry_rcu(dev, p, head, name_hlist)
+ if (!strncmp(dev->name, name, IFNAMSIZ))
+ return dev;
+
+ return NULL;
+}
+EXPORT_SYMBOL(dev_get_by_name_rcu);
+
+/**
* dev_get_by_name - find a device by its name
* @net: the applicable net namespace
* @name: name to find
@@ -614,11 +642,11 @@ struct net_device *dev_get_by_name(struct net *net, const char *name)
{
struct net_device *dev;
- read_lock(&dev_base_lock);
- dev = __dev_get_by_name(net, name);
+ rcu_read_lock();
+ dev = dev_get_by_name_rcu(net, name);
if (dev)
dev_hold(dev);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return dev;
}
EXPORT_SYMBOL(dev_get_by_name);
@@ -638,13 +666,13 @@ EXPORT_SYMBOL(dev_get_by_name);
struct net_device *__dev_get_by_index(struct net *net, int ifindex)
{
struct hlist_node *p;
+ struct net_device *dev;
+ struct hlist_head *head = dev_index_hash(net, ifindex);
- hlist_for_each(p, dev_index_hash(net, ifindex)) {
- struct net_device *dev
- = hlist_entry(p, struct net_device, index_hlist);
+ hlist_for_each_entry(dev, p, head, index_hlist)
if (dev->ifindex == ifindex)
return dev;
- }
+
return NULL;
}
EXPORT_SYMBOL(__dev_get_by_index);
@@ -773,15 +801,15 @@ struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
struct net_device *dev, *ret;
ret = NULL;
- read_lock(&dev_base_lock);
- for_each_netdev(net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
if (((dev->flags ^ if_flags) & mask) == 0) {
dev_hold(dev);
ret = dev;
break;
}
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(dev_get_by_flags);
@@ -866,7 +894,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
free_page((unsigned long) inuse);
}
- snprintf(buf, IFNAMSIZ, name, i);
+ if (buf != name)
+ snprintf(buf, IFNAMSIZ, name, i);
if (!__dev_get_by_name(net, buf))
return i;
@@ -906,6 +935,21 @@ int dev_alloc_name(struct net_device *dev, const char *name)
}
EXPORT_SYMBOL(dev_alloc_name);
+static int dev_get_valid_name(struct net *net, const char *name, char *buf,
+ bool fmt)
+{
+ if (!dev_valid_name(name))
+ return -EINVAL;
+
+ if (fmt && strchr(name, '%'))
+ return __dev_alloc_name(net, name, buf);
+ else if (__dev_get_by_name(net, name))
+ return -EEXIST;
+ else if (buf != name)
+ strlcpy(buf, name, IFNAMSIZ);
+
+ return 0;
+}
/**
* dev_change_name - change name of a device
@@ -929,28 +973,20 @@ int dev_change_name(struct net_device *dev, const char *newname)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (!dev_valid_name(newname))
- return -EINVAL;
-
if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
return 0;
memcpy(oldname, dev->name, IFNAMSIZ);
- if (strchr(newname, '%')) {
- err = dev_alloc_name(dev, newname);
- if (err < 0)
- return err;
- } else if (__dev_get_by_name(net, newname))
- return -EEXIST;
- else
- strlcpy(dev->name, newname, IFNAMSIZ);
+ err = dev_get_valid_name(net, newname, dev->name, 1);
+ if (err < 0)
+ return err;
rollback:
/* For now only devices in the initial network namespace
* are in sysfs.
*/
- if (net == &init_net) {
+ if (net_eq(net, &init_net)) {
ret = device_rename(&dev->dev, dev->name);
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
@@ -960,21 +996,27 @@ rollback:
write_lock_bh(&dev_base_lock);
hlist_del(&dev->name_hlist);
- hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
+ write_unlock_bh(&dev_base_lock);
+
+ synchronize_rcu();
+
+ write_lock_bh(&dev_base_lock);
+ hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
write_unlock_bh(&dev_base_lock);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
if (ret) {
- if (err) {
- printk(KERN_ERR
- "%s: name change rollback failed: %d.\n",
- dev->name, ret);
- } else {
+ /* err >= 0 after dev_alloc_name() or stores the first errno */
+ if (err >= 0) {
err = ret;
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
+ } else {
+ printk(KERN_ERR
+ "%s: name change rollback failed: %d.\n",
+ dev->name, ret);
}
}
@@ -1062,9 +1104,9 @@ void dev_load(struct net *net, const char *name)
{
struct net_device *dev;
- read_lock(&dev_base_lock);
- dev = __dev_get_by_name(net, name);
- read_unlock(&dev_base_lock);
+ rcu_read_lock();
+ dev = dev_get_by_name_rcu(net, name);
+ rcu_read_unlock();
if (!dev && capable(CAP_NET_ADMIN))
request_module("%s", name);
@@ -1311,6 +1353,7 @@ rollback:
nb->notifier_call(nb, NETDEV_DOWN, dev);
}
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+ nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
}
}
@@ -1377,6 +1420,45 @@ static inline void net_timestamp(struct sk_buff *skb)
skb->tstamp.tv64 = 0;
}
+/**
+ * dev_forward_skb - loopback an skb to another netif
+ *
+ * @dev: destination network device
+ * @skb: buffer to forward
+ *
+ * return values:
+ * NET_RX_SUCCESS (no congestion)
+ * NET_RX_DROP (packet was dropped)
+ *
+ * dev_forward_skb can be used for injecting an skb from the
+ * start_xmit function of one device into the receive queue
+ * of another device.
+ *
+ * The receiving device may be in another namespace, so
+ * we have to clear all information in the skb that could
+ * impact namespace isolation.
+ */
+int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ skb_orphan(skb);
+
+ if (!(dev->flags & IFF_UP))
+ return NET_RX_DROP;
+
+ if (skb->len > (dev->mtu + dev->hard_header_len))
+ return NET_RX_DROP;
+
+ skb_dst_drop(skb);
+ skb->tstamp.tv64 = 0;
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->mark = 0;
+ secpath_reset(skb);
+ nf_reset(skb);
+ return netif_rx(skb);
+}
+EXPORT_SYMBOL_GPL(dev_forward_skb);
+
/*
* Support routine. Sends outgoing frames to any network
* taps currently in use.
@@ -1725,7 +1807,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
- int rc;
+ int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
if (!list_empty(&ptype_all))
@@ -1773,6 +1855,8 @@ gso:
nskb->next = NULL;
rc = ops->ndo_start_xmit(nskb, dev);
if (unlikely(rc != NETDEV_TX_OK)) {
+ if (rc & ~NETDEV_TX_MASK)
+ goto out_kfree_gso_skb;
nskb->next = skb->next;
skb->next = nskb;
return rc;
@@ -1782,11 +1866,12 @@ gso:
return NETDEV_TX_BUSY;
} while (skb->next);
- skb->destructor = DEV_GSO_CB(skb)->destructor;
-
+out_kfree_gso_skb:
+ if (likely(skb->next == NULL))
+ skb->destructor = DEV_GSO_CB(skb)->destructor;
out_kfree_skb:
kfree_skb(skb);
- return NETDEV_TX_OK;
+ return rc;
}
static u32 skb_tx_hashrnd;
@@ -1813,6 +1898,20 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_tx_hash);
+static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
+{
+ if (unlikely(queue_index >= dev->real_num_tx_queues)) {
+ if (net_ratelimit()) {
+ WARN(1, "%s selects TX queue %d, but "
+ "real number of TX queues is %d\n",
+ dev->name, queue_index,
+ dev->real_num_tx_queues);
+ }
+ return 0;
+ }
+ return queue_index;
+}
+
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
@@ -1826,6 +1925,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
if (ops->ndo_select_queue) {
queue_index = ops->ndo_select_queue(dev, skb);
+ queue_index = dev_cap_txqueue(dev, queue_index);
} else {
queue_index = 0;
if (dev->real_num_tx_queues > 1)
@@ -1971,8 +2071,8 @@ gso:
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_tx_queue_stopped(txq)) {
- rc = NET_XMIT_SUCCESS;
- if (!dev_hard_start_xmit(skb, dev, txq)) {
+ rc = dev_hard_start_xmit(skb, dev, txq);
+ if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
}
@@ -2227,7 +2327,7 @@ static int ing_filter(struct sk_buff *skb)
if (MAX_RED_LOOP < ttl++) {
printk(KERN_WARNING
"Redir loop detected Dropping packet (%d->%d)\n",
- skb->iif, dev->ifindex);
+ skb->skb_iif, dev->ifindex);
return TC_ACT_SHOT;
}
@@ -2335,8 +2435,8 @@ int netif_receive_skb(struct sk_buff *skb)
if (netpoll_receive_skb(skb))
return NET_RX_DROP;
- if (!skb->iif)
- skb->iif = skb->dev->ifindex;
+ if (!skb->skb_iif)
+ skb->skb_iif = skb->dev->ifindex;
null_or_orig = NULL;
orig_dev = skb->dev;
@@ -2476,7 +2576,7 @@ void napi_gro_flush(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_gro_flush);
-int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
struct packet_type *ptype;
@@ -2484,7 +2584,7 @@ int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
int same_flow;
int mac_len;
- int ret;
+ enum gro_result ret;
if (!(skb->dev->features & NETIF_F_GRO))
goto normal;
@@ -2568,7 +2668,8 @@ normal:
}
EXPORT_SYMBOL(dev_gro_receive);
-static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+static gro_result_t
+__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff *p;
@@ -2576,33 +2677,35 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
return GRO_NORMAL;
for (p = napi->gro_list; p; p = p->next) {
- NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
- && !compare_ether_header(skb_mac_header(p),
- skb_gro_mac_header(skb));
+ NAPI_GRO_CB(p)->same_flow =
+ (p->dev == skb->dev) &&
+ !compare_ether_header(skb_mac_header(p),
+ skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->flush = 0;
}
return dev_gro_receive(napi, skb);
}
-int napi_skb_finish(int ret, struct sk_buff *skb)
+gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
{
- int err = NET_RX_SUCCESS;
-
switch (ret) {
case GRO_NORMAL:
- return netif_receive_skb(skb);
+ if (netif_receive_skb(skb))
+ ret = GRO_DROP;
+ break;
case GRO_DROP:
- err = NET_RX_DROP;
- /* fall through */
-
case GRO_MERGED_FREE:
kfree_skb(skb);
break;
+
+ case GRO_HELD:
+ case GRO_MERGED:
+ break;
}
- return err;
+ return ret;
}
EXPORT_SYMBOL(napi_skb_finish);
@@ -2622,7 +2725,7 @@ void skb_gro_reset_offset(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_gro_reset_offset);
-int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
skb_gro_reset_offset(skb);
@@ -2652,31 +2755,30 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_get_frags);
-int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
+gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
+ gro_result_t ret)
{
- int err = NET_RX_SUCCESS;
-
switch (ret) {
case GRO_NORMAL:
case GRO_HELD:
skb->protocol = eth_type_trans(skb, napi->dev);
- if (ret == GRO_NORMAL)
- return netif_receive_skb(skb);
-
- skb_gro_pull(skb, -ETH_HLEN);
+ if (ret == GRO_HELD)
+ skb_gro_pull(skb, -ETH_HLEN);
+ else if (netif_receive_skb(skb))
+ ret = GRO_DROP;
break;
case GRO_DROP:
- err = NET_RX_DROP;
- /* fall through */
-
case GRO_MERGED_FREE:
napi_reuse_skb(napi, skb);
break;
+
+ case GRO_MERGED:
+ break;
}
- return err;
+ return ret;
}
EXPORT_SYMBOL(napi_frags_finish);
@@ -2717,12 +2819,12 @@ out:
}
EXPORT_SYMBOL(napi_frags_skb);
-int napi_gro_frags(struct napi_struct *napi)
+gro_result_t napi_gro_frags(struct napi_struct *napi)
{
struct sk_buff *skb = napi_frags_skb(napi);
if (!skb)
- return NET_RX_DROP;
+ return GRO_DROP;
return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
}
@@ -3045,18 +3147,18 @@ static int dev_ifconf(struct net *net, char __user *arg)
* in detail.
*/
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(dev_base_lock)
+ __acquires(RCU)
{
struct net *net = seq_file_net(seq);
loff_t off;
struct net_device *dev;
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (!*pos)
return SEQ_START_TOKEN;
off = 1;
- for_each_netdev(net, dev)
+ for_each_netdev_rcu(net, dev)
if (off++ == *pos)
return dev;
@@ -3065,16 +3167,18 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos)
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net *net = seq_file_net(seq);
+ struct net_device *dev = (v == SEQ_START_TOKEN) ?
+ first_net_device(seq_file_net(seq)) :
+ next_net_device((struct net_device *)v);
+
++*pos;
- return v == SEQ_START_TOKEN ?
- first_net_device(net) : next_net_device((struct net_device *)v);
+ return rcu_dereference(dev);
}
void dev_seq_stop(struct seq_file *seq, void *v)
- __releases(dev_base_lock)
+ __releases(RCU)
{
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
@@ -4283,12 +4387,12 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
EXPORT_SYMBOL(dev_set_mac_address);
/*
- * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
+ * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
*/
static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
{
int err;
- struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+ struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
if (!dev)
return -ENODEV;
@@ -4520,9 +4624,9 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
case SIOCGIFINDEX:
case SIOCGIFTXQLEN:
dev_load(net, ifr.ifr_name);
- read_lock(&dev_base_lock);
+ rcu_read_lock();
ret = dev_ifsioc_locked(net, &ifr, cmd);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
if (!ret) {
if (colon)
*colon = ':';
@@ -4723,6 +4827,10 @@ static void rollback_registered_many(struct list_head *head)
netdev_unregister_kobject(dev);
}
+ /* Process any work delayed until the end of the batch */
+ dev = list_entry(head->next, struct net_device, unreg_list);
+ call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
+
synchronize_net();
list_for_each_entry(dev, head, unreg_list)
@@ -4811,8 +4919,6 @@ EXPORT_SYMBOL(netdev_fix_features);
int register_netdevice(struct net_device *dev)
{
- struct hlist_head *head;
- struct hlist_node *p;
int ret;
struct net *net = dev_net(dev);
@@ -4841,26 +4947,14 @@ int register_netdevice(struct net_device *dev)
}
}
- if (!dev_valid_name(dev->name)) {
- ret = -EINVAL;
+ ret = dev_get_valid_name(net, dev->name, dev->name, 0);
+ if (ret)
goto err_uninit;
- }
dev->ifindex = dev_new_index(net);
if (dev->iflink == -1)
dev->iflink = dev->ifindex;
- /* Check for existence of name */
- head = dev_name_hash(net, dev->name);
- hlist_for_each(p, head) {
- struct net_device *d
- = hlist_entry(p, struct net_device, name_hlist);
- if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
- ret = -EEXIST;
- goto err_uninit;
- }
- }
-
/* Fix illegal checksum combinations */
if ((dev->features & NETIF_F_HW_CSUM) &&
(dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
@@ -5013,6 +5107,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
{
unsigned long rebroadcast_time, warning_time;
+ linkwatch_forget_dev(dev);
+
rebroadcast_time = warning_time = jiffies;
while (atomic_read(&dev->refcnt) != 0) {
if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
@@ -5020,6 +5116,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
/* Rebroadcast unregister notification */
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+ /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
+ * should have already handle it the first time */
if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
&dev->state)) {
@@ -5115,6 +5213,32 @@ void netdev_run_todo(void)
}
/**
+ * dev_txq_stats_fold - fold tx_queues stats
+ * @dev: device to get statistics from
+ * @stats: struct net_device_stats to hold results
+ */
+void dev_txq_stats_fold(const struct net_device *dev,
+ struct net_device_stats *stats)
+{
+ unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
+ unsigned int i;
+ struct netdev_queue *txq;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ txq = netdev_get_tx_queue(dev, i);
+ tx_bytes += txq->tx_bytes;
+ tx_packets += txq->tx_packets;
+ tx_dropped += txq->tx_dropped;
+ }
+ if (tx_bytes || tx_packets || tx_dropped) {
+ stats->tx_bytes = tx_bytes;
+ stats->tx_packets = tx_packets;
+ stats->tx_dropped = tx_dropped;
+ }
+}
+EXPORT_SYMBOL(dev_txq_stats_fold);
+
+/**
* dev_get_stats - get network device statistics
* @dev: device to get statistics from
*
@@ -5128,25 +5252,9 @@ const struct net_device_stats *dev_get_stats(struct net_device *dev)
if (ops->ndo_get_stats)
return ops->ndo_get_stats(dev);
- else {
- unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
- struct net_device_stats *stats = &dev->stats;
- unsigned int i;
- struct netdev_queue *txq;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- txq = netdev_get_tx_queue(dev, i);
- tx_bytes += txq->tx_bytes;
- tx_packets += txq->tx_packets;
- tx_dropped += txq->tx_dropped;
- }
- if (tx_bytes || tx_packets || tx_dropped) {
- stats->tx_bytes = tx_bytes;
- stats->tx_packets = tx_packets;
- stats->tx_dropped = tx_dropped;
- }
- return stats;
- }
+
+ dev_txq_stats_fold(dev, &dev->stats);
+ return &dev->stats;
}
EXPORT_SYMBOL(dev_get_stats);
@@ -5226,6 +5334,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
netdev_init_queues(dev);
INIT_LIST_HEAD(&dev->napi_list);
+ INIT_LIST_HEAD(&dev->unreg_list);
+ INIT_LIST_HEAD(&dev->link_watch_list);
dev->priv_flags = IFF_XMIT_DST_RELEASE;
setup(dev);
strcpy(dev->name, name);
@@ -5293,7 +5403,7 @@ EXPORT_SYMBOL(synchronize_net);
* unregister_netdevice_queue - remove device from the kernel
* @dev: device
* @head: list
-
+ *
* This function shuts down a device interface and removes it
* from the kernel tables.
* If head not NULL, device is queued to be unregistered later.
@@ -5307,7 +5417,7 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
ASSERT_RTNL();
if (head) {
- list_add_tail(&dev->unreg_list, head);
+ list_move_tail(&dev->unreg_list, head);
} else {
rollback_registered(dev);
/* Finish processing unregister after unlock */
@@ -5319,7 +5429,6 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
/**
* unregister_netdevice_many - unregister many devices
* @head: list of devices
- *
*/
void unregister_netdevice_many(struct list_head *head)
{
@@ -5368,8 +5477,6 @@ EXPORT_SYMBOL(unregister_netdev);
int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
{
- char buf[IFNAMSIZ];
- const char *destname;
int err;
ASSERT_RTNL();
@@ -5402,20 +5509,11 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
* we can use it in the destination network namespace.
*/
err = -EEXIST;
- destname = dev->name;
- if (__dev_get_by_name(net, destname)) {
+ if (__dev_get_by_name(net, dev->name)) {
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- if (!dev_valid_name(pat))
- goto out;
- if (strchr(pat, '%')) {
- if (__dev_alloc_name(net, pat, buf) < 0)
- goto out;
- destname = buf;
- } else
- destname = pat;
- if (__dev_get_by_name(net, destname))
+ if (dev_get_valid_name(net, pat, dev->name, 1))
goto out;
}
@@ -5439,6 +5537,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
this device. They should clean all the things.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+ call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
/*
* Flush the unicast and multicast chains
@@ -5451,10 +5550,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Actually switch the network namespace */
dev_net_set(dev, net);
- /* Assign the new device name */
- if (destname != dev->name)
- strcpy(dev->name, destname);
-
/* If there is an ifindex conflict assign a new one */
if (__dev_get_by_index(net, dev->ifindex)) {
int iflink = (dev->iflink == dev->ifindex);
@@ -5641,14 +5736,13 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
static void __net_exit default_device_exit(struct net *net)
{
- struct net_device *dev;
+ struct net_device *dev, *aux;
/*
- * Push all migratable of the network devices back to the
+ * Push all migratable network devices back to the
* initial network namespace
*/
rtnl_lock();
-restart:
- for_each_netdev(net, dev) {
+ for_each_netdev_safe(net, dev, aux) {
int err;
char fb_name[IFNAMSIZ];
@@ -5656,11 +5750,9 @@ restart:
if (dev->features & NETIF_F_NETNS_LOCAL)
continue;
- /* Delete virtual devices */
- if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
- dev->rtnl_link_ops->dellink(dev, NULL);
- goto restart;
- }
+ /* Leave virtual devices for the generic cleanup */
+ if (dev->rtnl_link_ops)
+ continue;
/* Push remaing network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
@@ -5670,13 +5762,37 @@ restart:
__func__, dev->name, err);
BUG();
}
- goto restart;
}
rtnl_unlock();
}
+static void __net_exit default_device_exit_batch(struct list_head *net_list)
+{
+ /* At exit all network devices most be removed from a network
+ * namespace. Do this in the reverse order of registeration.
+ * Do this across as many network namespaces as possible to
+ * improve batching efficiency.
+ */
+ struct net_device *dev;
+ struct net *net;
+ LIST_HEAD(dev_kill_list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list) {
+ for_each_netdev_reverse(net, dev) {
+ if (dev->rtnl_link_ops)
+ dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
+ else
+ unregister_netdevice_queue(dev, &dev_kill_list);
+ }
+ }
+ unregister_netdevice_many(&dev_kill_list);
+ rtnl_unlock();
+}
+
static struct pernet_operations __net_initdata default_device_ops = {
.exit = default_device_exit,
+ .exit_batch = default_device_exit_batch,
};
/*
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 0a113f26bc9f..b8e9d3a86887 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -41,7 +41,7 @@ static void send_dm_alert(struct work_struct *unused);
* netlink alerts
*/
static int trace_state = TRACE_OFF;
-static spinlock_t trace_state_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(trace_state_lock);
struct per_cpu_dm_data {
struct work_struct dm_alert_work;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index bd309384f8b8..02a3b2c69c1e 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -72,7 +72,7 @@ static void flush_route_cache(struct fib_rules_ops *ops)
ops->flush_cache(ops);
}
-int fib_rules_register(struct fib_rules_ops *ops)
+static int __fib_rules_register(struct fib_rules_ops *ops)
{
int err = -EEXIST;
struct fib_rules_ops *o;
@@ -102,6 +102,28 @@ errout:
return err;
}
+struct fib_rules_ops *
+fib_rules_register(struct fib_rules_ops *tmpl, struct net *net)
+{
+ struct fib_rules_ops *ops;
+ int err;
+
+ ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL);
+ if (ops == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->rules_list);
+ ops->fro_net = net;
+
+ err = __fib_rules_register(ops);
+ if (err) {
+ kfree(ops);
+ ops = ERR_PTR(err);
+ }
+
+ return ops;
+}
+
EXPORT_SYMBOL_GPL(fib_rules_register);
void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
@@ -115,6 +137,15 @@ void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
}
EXPORT_SYMBOL_GPL(fib_rules_cleanup_ops);
+static void fib_rules_put_rcu(struct rcu_head *head)
+{
+ struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
+ struct net *net = ops->fro_net;
+
+ release_net(net);
+ kfree(ops);
+}
+
void fib_rules_unregister(struct fib_rules_ops *ops)
{
struct net *net = ops->fro_net;
@@ -124,8 +155,7 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
fib_rules_cleanup_ops(ops);
spin_unlock(&net->rules_mod_lock);
- synchronize_rcu();
- release_net(net);
+ call_rcu(&ops->rcu, fib_rules_put_rcu);
}
EXPORT_SYMBOL_GPL(fib_rules_unregister);
@@ -135,7 +165,10 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
{
int ret = 0;
- if (rule->ifindex && (rule->ifindex != fl->iif))
+ if (rule->iifindex && (rule->iifindex != fl->iif))
+ goto out;
+
+ if (rule->oifindex && (rule->oifindex != fl->oif))
goto out;
if ((rule->mark ^ fl->mark) & rule->mark_mask)
@@ -248,14 +281,24 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
if (tb[FRA_PRIORITY])
rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
- if (tb[FRA_IFNAME]) {
+ if (tb[FRA_IIFNAME]) {
+ struct net_device *dev;
+
+ rule->iifindex = -1;
+ nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
+ dev = __dev_get_by_name(net, rule->iifname);
+ if (dev)
+ rule->iifindex = dev->ifindex;
+ }
+
+ if (tb[FRA_OIFNAME]) {
struct net_device *dev;
- rule->ifindex = -1;
- nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ);
- dev = __dev_get_by_name(net, rule->ifname);
+ rule->oifindex = -1;
+ nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
+ dev = __dev_get_by_name(net, rule->oifname);
if (dev)
- rule->ifindex = dev->ifindex;
+ rule->oifindex = dev->ifindex;
}
if (tb[FRA_FWMARK]) {
@@ -274,7 +317,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
rule->flags = frh->flags;
rule->table = frh_get_table(frh, tb);
- if (!rule->pref && ops->default_pref)
+ if (!tb[FRA_PRIORITY] && ops->default_pref)
rule->pref = ops->default_pref(ops);
err = -EINVAL;
@@ -388,8 +431,12 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
(rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
continue;
- if (tb[FRA_IFNAME] &&
- nla_strcmp(tb[FRA_IFNAME], rule->ifname))
+ if (tb[FRA_IIFNAME] &&
+ nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
+ continue;
+
+ if (tb[FRA_OIFNAME] &&
+ nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
continue;
if (tb[FRA_FWMARK] &&
@@ -447,7 +494,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
struct fib_rule *rule)
{
size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
- + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */
+ + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
+ + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
+ nla_total_size(4) /* FRA_PRIORITY */
+ nla_total_size(4) /* FRA_TABLE */
+ nla_total_size(4) /* FRA_FWMARK */
@@ -481,11 +529,18 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
frh->flags |= FIB_RULE_UNRESOLVED;
- if (rule->ifname[0]) {
- NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);
+ if (rule->iifname[0]) {
+ NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
- if (rule->ifindex == -1)
- frh->flags |= FIB_RULE_DEV_DETACHED;
+ if (rule->iifindex == -1)
+ frh->flags |= FIB_RULE_IIF_DETACHED;
+ }
+
+ if (rule->oifname[0]) {
+ NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
+
+ if (rule->oifindex == -1)
+ frh->flags |= FIB_RULE_OIF_DETACHED;
}
if (rule->pref)
@@ -600,9 +655,12 @@ static void attach_rules(struct list_head *rules, struct net_device *dev)
struct fib_rule *rule;
list_for_each_entry(rule, rules, list) {
- if (rule->ifindex == -1 &&
- strcmp(dev->name, rule->ifname) == 0)
- rule->ifindex = dev->ifindex;
+ if (rule->iifindex == -1 &&
+ strcmp(dev->name, rule->iifname) == 0)
+ rule->iifindex = dev->ifindex;
+ if (rule->oifindex == -1 &&
+ strcmp(dev->name, rule->oifname) == 0)
+ rule->oifindex = dev->ifindex;
}
}
@@ -610,9 +668,12 @@ static void detach_rules(struct list_head *rules, struct net_device *dev)
{
struct fib_rule *rule;
- list_for_each_entry(rule, rules, list)
- if (rule->ifindex == dev->ifindex)
- rule->ifindex = -1;
+ list_for_each_entry(rule, rules, list) {
+ if (rule->iifindex == dev->ifindex)
+ rule->iifindex = -1;
+ if (rule->oifindex == dev->ifindex)
+ rule->oifindex = -1;
+ }
}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index bf8f7af699d7..5910b555a54a 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -35,7 +35,7 @@ static unsigned long linkwatch_nextevent;
static void linkwatch_event(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
-static struct net_device *lweventlist;
+static LIST_HEAD(lweventlist);
static DEFINE_SPINLOCK(lweventlist_lock);
static unsigned char default_operstate(const struct net_device *dev)
@@ -89,8 +89,10 @@ static void linkwatch_add_event(struct net_device *dev)
unsigned long flags;
spin_lock_irqsave(&lweventlist_lock, flags);
- dev->link_watch_next = lweventlist;
- lweventlist = dev;
+ if (list_empty(&dev->link_watch_list)) {
+ list_add_tail(&dev->link_watch_list, &lweventlist);
+ dev_hold(dev);
+ }
spin_unlock_irqrestore(&lweventlist_lock, flags);
}
@@ -133,9 +135,35 @@ static void linkwatch_schedule_work(int urgent)
}
+static void linkwatch_do_dev(struct net_device *dev)
+{
+ /*
+ * Make sure the above read is complete since it can be
+ * rewritten as soon as we clear the bit below.
+ */
+ smp_mb__before_clear_bit();
+
+ /* We are about to handle this device,
+ * so new events can be accepted
+ */
+ clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
+
+ rfc2863_policy(dev);
+ if (dev->flags & IFF_UP) {
+ if (netif_carrier_ok(dev))
+ dev_activate(dev);
+ else
+ dev_deactivate(dev);
+
+ netdev_state_change(dev);
+ }
+ dev_put(dev);
+}
+
static void __linkwatch_run_queue(int urgent_only)
{
- struct net_device *next;
+ struct net_device *dev;
+ LIST_HEAD(wrk);
/*
* Limit the number of linkwatch events to one
@@ -153,46 +181,40 @@ static void __linkwatch_run_queue(int urgent_only)
clear_bit(LW_URGENT, &linkwatch_flags);
spin_lock_irq(&lweventlist_lock);
- next = lweventlist;
- lweventlist = NULL;
- spin_unlock_irq(&lweventlist_lock);
+ list_splice_init(&lweventlist, &wrk);
- while (next) {
- struct net_device *dev = next;
+ while (!list_empty(&wrk)) {
- next = dev->link_watch_next;
+ dev = list_first_entry(&wrk, struct net_device, link_watch_list);
+ list_del_init(&dev->link_watch_list);
if (urgent_only && !linkwatch_urgent_event(dev)) {
- linkwatch_add_event(dev);
+ list_add_tail(&dev->link_watch_list, &lweventlist);
continue;
}
-
- /*
- * Make sure the above read is complete since it can be
- * rewritten as soon as we clear the bit below.
- */
- smp_mb__before_clear_bit();
-
- /* We are about to handle this device,
- * so new events can be accepted
- */
- clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
-
- rfc2863_policy(dev);
- if (dev->flags & IFF_UP) {
- if (netif_carrier_ok(dev))
- dev_activate(dev);
- else
- dev_deactivate(dev);
-
- netdev_state_change(dev);
- }
-
- dev_put(dev);
+ spin_unlock_irq(&lweventlist_lock);
+ linkwatch_do_dev(dev);
+ spin_lock_irq(&lweventlist_lock);
}
- if (lweventlist)
+ if (!list_empty(&lweventlist))
linkwatch_schedule_work(0);
+ spin_unlock_irq(&lweventlist_lock);
+}
+
+void linkwatch_forget_dev(struct net_device *dev)
+{
+ unsigned long flags;
+ int clean = 0;
+
+ spin_lock_irqsave(&lweventlist_lock, flags);
+ if (!list_empty(&dev->link_watch_list)) {
+ list_del_init(&dev->link_watch_list);
+ clean = 1;
+ }
+ spin_unlock_irqrestore(&lweventlist_lock, flags);
+ if (clean)
+ linkwatch_do_dev(dev);
}
@@ -216,8 +238,6 @@ void linkwatch_fire_event(struct net_device *dev)
bool urgent = linkwatch_urgent_event(dev);
if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
- dev_hold(dev);
-
linkwatch_add_event(dev);
} else if (!urgent)
return;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e587e6819698..a08a35bf0a7b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2092,7 +2092,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
if (h > s_h)
s_idx = 0;
for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
- if (dev_net(n->dev) != net)
+ if (!net_eq(dev_net(n->dev), net))
continue;
if (idx < s_idx)
goto next;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 89de182353b0..fbc1c7472c5e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -525,7 +525,7 @@ void netdev_unregister_kobject(struct net_device * net)
kobject_get(&dev->kobj);
- if (dev_net(net) != &init_net)
+ if (!net_eq(dev_net(net), &init_net))
return;
device_del(dev);
@@ -544,8 +544,11 @@ int netdev_register_kobject(struct net_device *net)
dev_set_name(dev, "%s", net->name);
#ifdef CONFIG_SYSFS
- *groups++ = &netstat_group;
+ /* Allow for a device specific group */
+ if (*groups)
+ groups++;
+ *groups++ = &netstat_group;
#ifdef CONFIG_WIRELESS_EXT_SYSFS
if (net->ieee80211_ptr)
*groups++ = &wireless_group;
@@ -556,7 +559,7 @@ int netdev_register_kobject(struct net_device *net)
#endif
#endif /* CONFIG_SYSFS */
- if (dev_net(net) != &init_net)
+ if (!net_eq(dev_net(net), &init_net))
return 0;
return device_add(dev);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 1c1af2756f38..bd8c4712ea24 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -27,14 +27,64 @@ EXPORT_SYMBOL(init_net);
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
+static int ops_init(const struct pernet_operations *ops, struct net *net)
+{
+ int err;
+ if (ops->id && ops->size) {
+ void *data = kzalloc(ops->size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = net_assign_generic(net, *ops->id, data);
+ if (err) {
+ kfree(data);
+ return err;
+ }
+ }
+ if (ops->init)
+ return ops->init(net);
+ return 0;
+}
+
+static void ops_free(const struct pernet_operations *ops, struct net *net)
+{
+ if (ops->id && ops->size) {
+ int id = *ops->id;
+ kfree(net_generic(net, id));
+ }
+}
+
+static void ops_exit_list(const struct pernet_operations *ops,
+ struct list_head *net_exit_list)
+{
+ struct net *net;
+ if (ops->exit) {
+ list_for_each_entry(net, net_exit_list, exit_list)
+ ops->exit(net);
+ }
+ if (ops->exit_batch)
+ ops->exit_batch(net_exit_list);
+}
+
+static void ops_free_list(const struct pernet_operations *ops,
+ struct list_head *net_exit_list)
+{
+ struct net *net;
+ if (ops->size && ops->id) {
+ list_for_each_entry(net, net_exit_list, exit_list)
+ ops_free(ops, net);
+ }
+}
+
/*
* setup_net runs the initializers for the network namespace object.
*/
static __net_init int setup_net(struct net *net)
{
/* Must be called with net_mutex held */
- struct pernet_operations *ops;
+ const struct pernet_operations *ops, *saved_ops;
int error = 0;
+ LIST_HEAD(net_exit_list);
atomic_set(&net->count, 1);
@@ -43,11 +93,9 @@ static __net_init int setup_net(struct net *net)
#endif
list_for_each_entry(ops, &pernet_list, list) {
- if (ops->init) {
- error = ops->init(net);
- if (error < 0)
- goto out_undo;
- }
+ error = ops_init(ops, net);
+ if (error < 0)
+ goto out_undo;
}
out:
return error;
@@ -56,10 +104,14 @@ out_undo:
/* Walk through the list backwards calling the exit functions
* for the pernet modules whose init functions did not fail.
*/
- list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
- if (ops->exit)
- ops->exit(net);
- }
+ list_add(&net->exit_list, &net_exit_list);
+ saved_ops = ops;
+ list_for_each_entry_continue_reverse(ops, &pernet_list, list)
+ ops_exit_list(ops, &net_exit_list);
+
+ ops = saved_ops;
+ list_for_each_entry_continue_reverse(ops, &pernet_list, list)
+ ops_free_list(ops, &net_exit_list);
rcu_barrier();
goto out;
@@ -147,18 +199,29 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
return net_create();
}
+static DEFINE_SPINLOCK(cleanup_list_lock);
+static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
+
static void cleanup_net(struct work_struct *work)
{
- struct pernet_operations *ops;
- struct net *net;
+ const struct pernet_operations *ops;
+ struct net *net, *tmp;
+ LIST_HEAD(net_kill_list);
+ LIST_HEAD(net_exit_list);
- net = container_of(work, struct net, work);
+ /* Atomically snapshot the list of namespaces to cleanup */
+ spin_lock_irq(&cleanup_list_lock);
+ list_replace_init(&cleanup_list, &net_kill_list);
+ spin_unlock_irq(&cleanup_list_lock);
mutex_lock(&net_mutex);
/* Don't let anyone else find us. */
rtnl_lock();
- list_del_rcu(&net->list);
+ list_for_each_entry(net, &net_kill_list, cleanup_list) {
+ list_del_rcu(&net->list);
+ list_add_tail(&net->exit_list, &net_exit_list);
+ }
rtnl_unlock();
/*
@@ -169,10 +232,12 @@ static void cleanup_net(struct work_struct *work)
synchronize_rcu();
/* Run all of the network namespace exit methods */
- list_for_each_entry_reverse(ops, &pernet_list, list) {
- if (ops->exit)
- ops->exit(net);
- }
+ list_for_each_entry_reverse(ops, &pernet_list, list)
+ ops_exit_list(ops, &net_exit_list);
+
+ /* Free the net generic variables */
+ list_for_each_entry_reverse(ops, &pernet_list, list)
+ ops_free_list(ops, &net_exit_list);
mutex_unlock(&net_mutex);
@@ -182,14 +247,23 @@ static void cleanup_net(struct work_struct *work)
rcu_barrier();
/* Finally it is safe to free my network namespace structure */
- net_free(net);
+ list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
+ list_del_init(&net->exit_list);
+ net_free(net);
+ }
}
+static DECLARE_WORK(net_cleanup_work, cleanup_net);
void __put_net(struct net *net)
{
/* Cleanup the network namespace in process context */
- INIT_WORK(&net->work, cleanup_net);
- queue_work(netns_wq, &net->work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cleanup_list_lock, flags);
+ list_add(&net->cleanup_list, &cleanup_list);
+ spin_unlock_irqrestore(&cleanup_list_lock, flags);
+
+ queue_work(netns_wq, &net_cleanup_work);
}
EXPORT_SYMBOL_GPL(__put_net);
@@ -259,18 +333,20 @@ static int __init net_ns_init(void)
pure_initcall(net_ns_init);
#ifdef CONFIG_NET_NS
-static int register_pernet_operations(struct list_head *list,
- struct pernet_operations *ops)
+static int __register_pernet_operations(struct list_head *list,
+ struct pernet_operations *ops)
{
- struct net *net, *undo_net;
+ struct net *net;
int error;
+ LIST_HEAD(net_exit_list);
list_add_tail(&ops->list, list);
- if (ops->init) {
+ if (ops->init || (ops->id && ops->size)) {
for_each_net(net) {
- error = ops->init(net);
+ error = ops_init(ops, net);
if (error)
goto out_undo;
+ list_add_tail(&net->exit_list, &net_exit_list);
}
}
return 0;
@@ -278,45 +354,82 @@ static int register_pernet_operations(struct list_head *list,
out_undo:
/* If I have an error cleanup all namespaces I initialized */
list_del(&ops->list);
- if (ops->exit) {
- for_each_net(undo_net) {
- if (undo_net == net)
- goto undone;
- ops->exit(undo_net);
- }
- }
-undone:
+ ops_exit_list(ops, &net_exit_list);
+ ops_free_list(ops, &net_exit_list);
return error;
}
-static void unregister_pernet_operations(struct pernet_operations *ops)
+static void __unregister_pernet_operations(struct pernet_operations *ops)
{
struct net *net;
+ LIST_HEAD(net_exit_list);
list_del(&ops->list);
- if (ops->exit)
- for_each_net(net)
- ops->exit(net);
+ for_each_net(net)
+ list_add_tail(&net->exit_list, &net_exit_list);
+ ops_exit_list(ops, &net_exit_list);
+ ops_free_list(ops, &net_exit_list);
}
#else
+static int __register_pernet_operations(struct list_head *list,
+ struct pernet_operations *ops)
+{
+ int err = 0;
+ err = ops_init(ops, &init_net);
+ if (err)
+ ops_free(ops, &init_net);
+ return err;
+
+}
+
+static void __unregister_pernet_operations(struct pernet_operations *ops)
+{
+ LIST_HEAD(net_exit_list);
+ list_add(&init_net.exit_list, &net_exit_list);
+ ops_exit_list(ops, &net_exit_list);
+ ops_free_list(ops, &net_exit_list);
+}
+
+#endif /* CONFIG_NET_NS */
+
+static DEFINE_IDA(net_generic_ids);
+
static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
- if (ops->init == NULL)
- return 0;
- return ops->init(&init_net);
+ int error;
+
+ if (ops->id) {
+again:
+ error = ida_get_new_above(&net_generic_ids, 1, ops->id);
+ if (error < 0) {
+ if (error == -EAGAIN) {
+ ida_pre_get(&net_generic_ids, GFP_KERNEL);
+ goto again;
+ }
+ return error;
+ }
+ }
+ error = __register_pernet_operations(list, ops);
+ if (error) {
+ rcu_barrier();
+ if (ops->id)
+ ida_remove(&net_generic_ids, *ops->id);
+ }
+
+ return error;
}
static void unregister_pernet_operations(struct pernet_operations *ops)
{
- if (ops->exit)
- ops->exit(&init_net);
+
+ __unregister_pernet_operations(ops);
+ rcu_barrier();
+ if (ops->id)
+ ida_remove(&net_generic_ids, *ops->id);
}
-#endif
-
-static DEFINE_IDA(net_generic_ids);
/**
* register_pernet_subsys - register a network namespace subsystem
@@ -364,38 +477,6 @@ void unregister_pernet_subsys(struct pernet_operations *module)
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
-int register_pernet_gen_subsys(int *id, struct pernet_operations *ops)
-{
- int rv;
-
- mutex_lock(&net_mutex);
-again:
- rv = ida_get_new_above(&net_generic_ids, 1, id);
- if (rv < 0) {
- if (rv == -EAGAIN) {
- ida_pre_get(&net_generic_ids, GFP_KERNEL);
- goto again;
- }
- goto out;
- }
- rv = register_pernet_operations(first_device, ops);
- if (rv < 0)
- ida_remove(&net_generic_ids, *id);
-out:
- mutex_unlock(&net_mutex);
- return rv;
-}
-EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
-
-void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops)
-{
- mutex_lock(&net_mutex);
- unregister_pernet_operations(ops);
- ida_remove(&net_generic_ids, id);
- mutex_unlock(&net_mutex);
-}
-EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys);
-
/**
* register_pernet_device - register a network namespace device
* @ops: pernet operations structure for the subsystem
@@ -427,30 +508,6 @@ int register_pernet_device(struct pernet_operations *ops)
}
EXPORT_SYMBOL_GPL(register_pernet_device);
-int register_pernet_gen_device(int *id, struct pernet_operations *ops)
-{
- int error;
- mutex_lock(&net_mutex);
-again:
- error = ida_get_new_above(&net_generic_ids, 1, id);
- if (error) {
- if (error == -EAGAIN) {
- ida_pre_get(&net_generic_ids, GFP_KERNEL);
- goto again;
- }
- goto out;
- }
- error = register_pernet_operations(&pernet_list, ops);
- if (error)
- ida_remove(&net_generic_ids, *id);
- else if (first_device == &pernet_list)
- first_device = &ops->list;
-out:
- mutex_unlock(&net_mutex);
- return error;
-}
-EXPORT_SYMBOL_GPL(register_pernet_gen_device);
-
/**
* unregister_pernet_device - unregister a network namespace netdevice
* @ops: pernet operations structure to manipulate
@@ -470,17 +527,6 @@ void unregister_pernet_device(struct pernet_operations *ops)
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
-void unregister_pernet_gen_device(int id, struct pernet_operations *ops)
-{
- mutex_lock(&net_mutex);
- if (&ops->list == first_device)
- first_device = first_device->next;
- unregister_pernet_operations(ops);
- ida_remove(&net_generic_ids, id);
- mutex_unlock(&net_mutex);
-}
-EXPORT_SYMBOL_GPL(unregister_pernet_gen_device);
-
static void net_generic_release(struct rcu_head *rcu)
{
struct net_generic *ng;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5ce017bf4afa..a23b45f08ec9 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -340,6 +340,7 @@ struct pktgen_dev {
__u16 cur_udp_src;
__u16 cur_queue_map;
__u32 cur_pkt_size;
+ __u32 last_pkt_size;
__u8 hh[14];
/* = {
@@ -363,6 +364,7 @@ struct pktgen_dev {
* device name (not when the inject is
* started as it used to do.)
*/
+ char odevname[32];
struct flow_state *flows;
unsigned cflows; /* Concurrent flows (config) */
unsigned lflow; /* Flow length (config) */
@@ -426,7 +428,7 @@ static const char version[] =
static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
- const char *ifname);
+ const char *ifname, bool exact);
static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
static void pktgen_run_all_threads(void);
static void pktgen_reset_all_threads(void);
@@ -528,7 +530,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_printf(seq,
" frags: %d delay: %llu clone_skb: %d ifname: %s\n",
pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
- pkt_dev->clone_skb, pkt_dev->odev->name);
+ pkt_dev->clone_skb, pkt_dev->odevname);
seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
pkt_dev->lflow);
@@ -1688,13 +1690,13 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
if_lock(t);
list_for_each_entry(pkt_dev, &t->if_list, list)
if (pkt_dev->running)
- seq_printf(seq, "%s ", pkt_dev->odev->name);
+ seq_printf(seq, "%s ", pkt_dev->odevname);
seq_printf(seq, "\nStopped: ");
list_for_each_entry(pkt_dev, &t->if_list, list)
if (!pkt_dev->running)
- seq_printf(seq, "%s ", pkt_dev->odev->name);
+ seq_printf(seq, "%s ", pkt_dev->odevname);
if (t->result[0])
seq_printf(seq, "\nResult: %s\n", t->result);
@@ -1817,9 +1819,10 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
{
struct pktgen_thread *t;
struct pktgen_dev *pkt_dev = NULL;
+ bool exact = (remove == FIND);
list_for_each_entry(t, &pktgen_threads, th_list) {
- pkt_dev = pktgen_find_dev(t, ifname);
+ pkt_dev = pktgen_find_dev(t, ifname, exact);
if (pkt_dev) {
if (remove) {
if_lock(t);
@@ -1994,7 +1997,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
"queue_map_min (zero-based) (%d) exceeds valid range "
"[0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odev->name);
+ pkt_dev->odevname);
pkt_dev->queue_map_min = ntxq - 1;
}
if (pkt_dev->queue_map_max >= ntxq) {
@@ -2002,7 +2005,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
"queue_map_max (zero-based) (%d) exceeds valid range "
"[0 - %d] for (%d) queues on %s, resetting\n",
pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odev->name);
+ pkt_dev->odevname);
pkt_dev->queue_map_max = ntxq - 1;
}
@@ -2049,9 +2052,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
read_lock_bh(&idev->lock);
for (ifp = idev->addr_list; ifp;
ifp = ifp->if_next) {
- if (ifp->scope == IFA_LINK
- && !(ifp->
- flags & IFA_F_TENTATIVE)) {
+ if (ifp->scope == IFA_LINK &&
+ !(ifp->flags & IFA_F_TENTATIVE)) {
ipv6_addr_copy(&pkt_dev->
cur_in6_saddr,
&ifp->addr);
@@ -3262,7 +3264,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
if (!pkt_dev->running) {
printk(KERN_WARNING "pktgen: interface: %s is already "
- "stopped\n", pkt_dev->odev->name);
+ "stopped\n", pkt_dev->odevname);
return -EINVAL;
}
@@ -3434,7 +3436,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->clone_count--; /* back out increment, OOM */
return;
}
-
+ pkt_dev->last_pkt_size = pkt_dev->skb->len;
pkt_dev->allocated_skbs++;
pkt_dev->clone_count = 0; /* reset counter */
}
@@ -3461,12 +3463,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->last_ok = 1;
pkt_dev->sofar++;
pkt_dev->seq_num++;
- pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
+ pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
break;
default: /* Drivers are not supposed to return other values! */
if (net_ratelimit())
pr_info("pktgen: %s xmit error: %d\n",
- odev->name, ret);
+ pkt_dev->odevname, ret);
pkt_dev->errors++;
/* fallthru */
case NETDEV_TX_LOCKED:
@@ -3569,13 +3571,18 @@ static int pktgen_thread_worker(void *arg)
}
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
- const char *ifname)
+ const char *ifname, bool exact)
{
struct pktgen_dev *p, *pkt_dev = NULL;
- if_lock(t);
+ size_t len = strlen(ifname);
+ if_lock(t);
list_for_each_entry(p, &t->if_list, list)
- if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) {
+ if (strncmp(p->odevname, ifname, len) == 0) {
+ if (p->odevname[len]) {
+ if (exact || p->odevname[len] != '@')
+ continue;
+ }
pkt_dev = p;
break;
}
@@ -3618,6 +3625,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
{
struct pktgen_dev *pkt_dev;
int err;
+ int node = cpu_to_node(t->cpu);
/* We don't allow a device to be on several threads */
@@ -3627,11 +3635,13 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
return -EBUSY;
}
- pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL);
+ pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node);
if (!pkt_dev)
return -ENOMEM;
- pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state));
+ strcpy(pkt_dev->odevname, ifname);
+ pkt_dev->flows = vmalloc_node(MAX_CFLOWS * sizeof(struct flow_state),
+ node);
if (pkt_dev->flows == NULL) {
kfree(pkt_dev);
return -ENOMEM;
@@ -3693,7 +3703,8 @@ static int __init pktgen_create_thread(int cpu)
struct proc_dir_entry *pe;
struct task_struct *p;
- t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL);
+ t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
+ cpu_to_node(cpu));
if (!t) {
printk(KERN_ERR "pktgen: ERROR: out of memory, can't "
"create new thread.\n");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 391a62cd9df6..33148a568199 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -38,7 +38,6 @@
#include <asm/uaccess.h>
#include <asm/system.h>
-#include <asm/string.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
@@ -53,8 +52,7 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
-struct rtnl_link
-{
+struct rtnl_link {
rtnl_doit_func doit;
rtnl_dumpit_func dumpit;
};
@@ -65,6 +63,7 @@ void rtnl_lock(void)
{
mutex_lock(&rtnl_mutex);
}
+EXPORT_SYMBOL(rtnl_lock);
void __rtnl_unlock(void)
{
@@ -76,16 +75,19 @@ void rtnl_unlock(void)
/* This fellow will unlock it for us. */
netdev_run_todo();
}
+EXPORT_SYMBOL(rtnl_unlock);
int rtnl_trylock(void)
{
return mutex_trylock(&rtnl_mutex);
}
+EXPORT_SYMBOL(rtnl_trylock);
int rtnl_is_locked(void)
{
return mutex_is_locked(&rtnl_mutex);
}
+EXPORT_SYMBOL(rtnl_is_locked);
static struct rtnl_link *rtnl_msg_handlers[NPROTO];
@@ -168,7 +170,6 @@ int __rtnl_register(int protocol, int msgtype,
return 0;
}
-
EXPORT_SYMBOL_GPL(__rtnl_register);
/**
@@ -188,7 +189,6 @@ void rtnl_register(int protocol, int msgtype,
"protocol = %d, message type = %d\n",
protocol, msgtype);
}
-
EXPORT_SYMBOL_GPL(rtnl_register);
/**
@@ -213,7 +213,6 @@ int rtnl_unregister(int protocol, int msgtype)
return 0;
}
-
EXPORT_SYMBOL_GPL(rtnl_unregister);
/**
@@ -230,7 +229,6 @@ void rtnl_unregister_all(int protocol)
kfree(rtnl_msg_handlers[protocol]);
rtnl_msg_handlers[protocol] = NULL;
}
-
EXPORT_SYMBOL_GPL(rtnl_unregister_all);
static LIST_HEAD(link_ops);
@@ -253,7 +251,6 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
list_add_tail(&ops->list, &link_ops);
return 0;
}
-
EXPORT_SYMBOL_GPL(__rtnl_link_register);
/**
@@ -271,7 +268,6 @@ int rtnl_link_register(struct rtnl_link_ops *ops)
rtnl_unlock();
return err;
}
-
EXPORT_SYMBOL_GPL(rtnl_link_register);
static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
@@ -309,7 +305,6 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
}
list_del(&ops->list);
}
-
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
/**
@@ -322,7 +317,6 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops)
__rtnl_link_unregister(ops);
rtnl_unlock();
}
-
EXPORT_SYMBOL_GPL(rtnl_link_unregister);
static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
@@ -427,12 +421,13 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
struct rtattr *rta;
int size = RTA_LENGTH(attrlen);
- rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
+ rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size));
rta->rta_type = attrtype;
rta->rta_len = size;
memcpy(RTA_DATA(rta), data, attrlen);
memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
}
+EXPORT_SYMBOL(__rta_fill);
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
{
@@ -454,6 +449,7 @@ int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
return nlmsg_unicast(rtnl, skb, pid);
}
+EXPORT_SYMBOL(rtnl_unicast);
void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
struct nlmsghdr *nlh, gfp_t flags)
@@ -466,6 +462,7 @@ void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
nlmsg_notify(rtnl, skb, pid, group, report, flags);
}
+EXPORT_SYMBOL(rtnl_notify);
void rtnl_set_sk_err(struct net *net, u32 group, int error)
{
@@ -473,6 +470,7 @@ void rtnl_set_sk_err(struct net *net, u32 group, int error)
netlink_set_err(rtnl, 0, group, error);
}
+EXPORT_SYMBOL(rtnl_set_sk_err);
int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
{
@@ -501,6 +499,7 @@ nla_put_failure:
nla_nest_cancel(skb, mx);
return -EMSGSIZE;
}
+EXPORT_SYMBOL(rtnetlink_put_metrics);
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
u32 ts, u32 tsage, long expires, u32 error)
@@ -520,14 +519,13 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
}
-
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
static void set_operstate(struct net_device *dev, unsigned char transition)
{
unsigned char operstate = dev->operstate;
- switch(transition) {
+ switch (transition) {
case IF_OPER_UP:
if ((operstate == IF_OPER_DORMANT ||
operstate == IF_OPER_UNKNOWN) &&
@@ -728,12 +726,27 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
};
+EXPORT_SYMBOL(ifla_policy);
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_KIND] = { .type = NLA_STRING },
[IFLA_INFO_DATA] = { .type = NLA_NESTED },
};
+struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
+{
+ struct net *net;
+ /* Examine the link attributes and figure out which
+ * network namespace we are talking about.
+ */
+ if (tb[IFLA_NET_NS_PID])
+ net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
+ else
+ net = get_net(src_net);
+ return net;
+}
+EXPORT_SYMBOL(rtnl_link_get_net);
+
static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
{
if (dev) {
@@ -757,8 +770,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
int err;
if (tb[IFLA_NET_NS_PID]) {
- struct net *net;
- net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
+ struct net *net = rtnl_link_get_net(dev_net(dev), tb);
if (IS_ERR(net)) {
err = PTR_ERR(net);
goto errout;
@@ -932,7 +944,8 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
goto errout;
}
- if ((err = validate_linkmsg(dev, tb)) < 0)
+ err = validate_linkmsg(dev, tb);
+ if (err < 0)
goto errout;
err = do_setlink(dev, ifm, tb, ifname, 0);
@@ -976,8 +989,8 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return 0;
}
-struct net_device *rtnl_create_link(struct net *net, char *ifname,
- const struct rtnl_link_ops *ops, struct nlattr *tb[])
+struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
+ char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
{
int err;
struct net_device *dev;
@@ -985,7 +998,8 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
unsigned int real_num_queues = 1;
if (ops->get_tx_queues) {
- err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues);
+ err = ops->get_tx_queues(src_net, tb, &num_queues,
+ &real_num_queues);
if (err)
goto err;
}
@@ -994,16 +1008,16 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
if (!dev)
goto err;
+ dev_net_set(dev, net);
+ dev->rtnl_link_ops = ops;
dev->real_num_tx_queues = real_num_queues;
+
if (strchr(dev->name, '%')) {
err = dev_alloc_name(dev, dev->name);
if (err < 0)
goto err_free;
}
- dev_net_set(dev, net);
- dev->rtnl_link_ops = ops;
-
if (tb[IFLA_MTU])
dev->mtu = nla_get_u32(tb[IFLA_MTU]);
if (tb[IFLA_ADDRESS])
@@ -1026,6 +1040,7 @@ err_free:
err:
return ERR_PTR(err);
}
+EXPORT_SYMBOL(rtnl_create_link);
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
@@ -1059,7 +1074,8 @@ replay:
else
dev = NULL;
- if ((err = validate_linkmsg(dev, tb)) < 0)
+ err = validate_linkmsg(dev, tb);
+ if (err < 0)
return err;
if (tb[IFLA_LINKINFO]) {
@@ -1080,6 +1096,7 @@ replay:
if (1) {
struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
+ struct net *dest_net;
if (ops) {
if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
@@ -1144,17 +1161,19 @@ replay:
if (!ifname[0])
snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
- dev = rtnl_create_link(net, ifname, ops, tb);
+ dest_net = rtnl_link_get_net(net, tb);
+ dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
if (IS_ERR(dev))
err = PTR_ERR(dev);
else if (ops->newlink)
- err = ops->newlink(dev, tb, data);
+ err = ops->newlink(net, dev, tb, data);
else
err = register_netdevice(dev);
-
if (err < 0 && !IS_ERR(dev))
free_netdev(dev);
+
+ put_net(dest_net);
return err;
}
}
@@ -1210,7 +1229,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
if (s_idx == 0)
s_idx = 1;
- for (idx=1; idx<NPROTO; idx++) {
+ for (idx = 1; idx < NPROTO; idx++) {
int type = cb->nlh->nlmsg_type-RTM_BASE;
if (idx < s_idx || idx == PF_PACKET)
continue;
@@ -1277,7 +1296,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
return 0;
- family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family;
+ family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
if (family >= NPROTO)
return -EAFNOSUPPORT;
@@ -1310,7 +1329,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (nlh->nlmsg_len > min_len) {
int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
- struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len);
+ struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
while (RTA_OK(attr, attrlen)) {
unsigned flavor = attr->rta_type;
@@ -1416,14 +1435,3 @@ void __init rtnetlink_init(void)
rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all);
}
-EXPORT_SYMBOL(__rta_fill);
-EXPORT_SYMBOL(rtnetlink_put_metrics);
-EXPORT_SYMBOL(rtnl_lock);
-EXPORT_SYMBOL(rtnl_trylock);
-EXPORT_SYMBOL(rtnl_unlock);
-EXPORT_SYMBOL(rtnl_is_locked);
-EXPORT_SYMBOL(rtnl_unicast);
-EXPORT_SYMBOL(rtnl_notify);
-EXPORT_SYMBOL(rtnl_set_sk_err);
-EXPORT_SYMBOL(rtnl_create_link);
-EXPORT_SYMBOL(ifla_policy);
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
deleted file mode 100644
index 79687dfd6957..000000000000
--- a/net/core/skb_dma_map.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/* skb_dma_map.c: DMA mapping helpers for socket buffers.
- *
- * Copyright (C) David S. Miller <davem@davemloft.net>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <linux/skbuff.h>
-
-int skb_dma_map(struct device *dev, struct sk_buff *skb,
- enum dma_data_direction dir)
-{
- struct skb_shared_info *sp = skb_shinfo(skb);
- dma_addr_t map;
- int i;
-
- map = dma_map_single(dev, skb->data,
- skb_headlen(skb), dir);
- if (dma_mapping_error(dev, map))
- goto out_err;
-
- sp->dma_head = map;
- for (i = 0; i < sp->nr_frags; i++) {
- skb_frag_t *fp = &sp->frags[i];
-
- map = dma_map_page(dev, fp->page, fp->page_offset,
- fp->size, dir);
- if (dma_mapping_error(dev, map))
- goto unwind;
- sp->dma_maps[i] = map;
- }
-
- return 0;
-
-unwind:
- while (--i >= 0) {
- skb_frag_t *fp = &sp->frags[i];
-
- dma_unmap_page(dev, sp->dma_maps[i],
- fp->size, dir);
- }
- dma_unmap_single(dev, sp->dma_head,
- skb_headlen(skb), dir);
-out_err:
- return -ENOMEM;
-}
-EXPORT_SYMBOL(skb_dma_map);
-
-void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
- enum dma_data_direction dir)
-{
- struct skb_shared_info *sp = skb_shinfo(skb);
- int i;
-
- dma_unmap_single(dev, sp->dma_head,
- skb_headlen(skb), dir);
- for (i = 0; i < sp->nr_frags; i++) {
- skb_frag_t *fp = &sp->frags[i];
-
- dma_unmap_page(dev, sp->dma_maps[i],
- fp->size, dir);
- }
-}
-EXPORT_SYMBOL(skb_dma_unmap);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 80a96166df39..bfa3e7865a8c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -493,6 +493,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
{
struct skb_shared_info *shinfo;
+ if (irqs_disabled())
+ return 0;
+
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
return 0;
@@ -546,7 +549,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#endif
new->protocol = old->protocol;
new->mark = old->mark;
- new->iif = old->iif;
+ new->skb_iif = old->skb_iif;
__nf_copy(new, old);
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
@@ -2701,7 +2704,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
NAPI_GRO_CB(skb)->free = 1;
goto done;
- }
+ } else if (skb_gro_len(p) != pinfo->gso_size)
+ return -E2BIG;
headroom = skb_headroom(p);
nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
diff --git a/net/core/sock.c b/net/core/sock.c
index 5a51512f638a..76ff58d43e26 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -417,17 +417,18 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
if (copy_from_user(devname, optval, optlen))
goto out;
- if (devname[0] == '\0') {
- index = 0;
- } else {
- struct net_device *dev = dev_get_by_name(net, devname);
-
+ index = 0;
+ if (devname[0] != '\0') {
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = dev_get_by_name_rcu(net, devname);
+ if (dev)
+ index = dev->ifindex;
+ rcu_read_unlock();
ret = -ENODEV;
if (!dev)
goto out;
-
- index = dev->ifindex;
- dev_put(dev);
}
lock_sock(sk);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 7db1de0497c6..fcfc5458c399 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -134,7 +134,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
net->core.sysctl_somaxconn = SOMAXCONN;
tbl = netns_core_table;
- if (net != &init_net) {
+ if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
if (tbl == NULL)
goto err_dup;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index ac1205df6c86..db9f5b39388f 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1085,8 +1085,8 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
u8 value_byte;
u32 value_int;
- if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg
- || !netdev->dcbnl_ops->setbcnrp)
+ if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
+ !netdev->dcbnl_ops->setbcnrp)
return ret;
ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
@@ -1126,7 +1126,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
u32 pid = skb ? NETLINK_CB(skb).pid : 0;
int ret = -EINVAL;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 00028d4b09d9..efbcfdc12796 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -477,7 +477,8 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
return &rt->u.dst;
}
-static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
+static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
+ struct request_values *rv_unused)
{
int err = -1;
struct sk_buff *skb;
@@ -626,7 +627,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
dreq->dreq_iss = dccp_v4_init_sequence(skb);
dreq->dreq_service = service;
- if (dccp_v4_send_response(sk, req))
+ if (dccp_v4_send_response(sk, req, NULL))
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
@@ -991,7 +992,6 @@ static struct inet_protosw dccp_v4_protosw = {
.protocol = IPPROTO_DCCP,
.prot = &dccp_v4_prot,
.ops = &inet_dccp_ops,
- .capability = -1,
.no_check = 0,
.flags = INET_PROTOSW_ICSK,
};
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6d89f9f7d5d8..6574215a1f51 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -241,7 +241,8 @@ out:
}
-static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
+static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
+ struct request_values *rv_unused)
{
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -468,7 +469,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
dreq->dreq_iss = dccp_v6_init_sequence(skb);
dreq->dreq_service = service;
- if (dccp_v6_send_response(sk, req))
+ if (dccp_v6_send_response(sk, req, NULL))
goto drop_and_free;
inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
@@ -1185,7 +1186,6 @@ static struct inet_protosw dccp_v6_protosw = {
.protocol = IPPROTO_DCCP,
.prot = &dccp_v6_prot,
.ops = &inet6_dccp_ops,
- .capability = -1,
.flags = INET_PROTOSW_ICSK,
};
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 5ca49cec95f5..af226a063141 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -184,7 +184,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
* counter (backoff, monitored by dccp_response_timer).
*/
req->retrans++;
- req->rsk_ops->rtx_syn_ack(sk, req);
+ req->rsk_ops->rtx_syn_ack(sk, req, NULL);
}
/* Network Duplicate, discard packet */
return NULL;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 664965c87e16..2b494fac9468 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -675,11 +675,12 @@ char *dn_addr2asc(__u16 addr, char *buf)
-static int dn_create(struct net *net, struct socket *sock, int protocol)
+static int dn_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
switch(sock->type) {
@@ -749,9 +750,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (!(saddr->sdn_flags & SDF_WILD)) {
if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
- read_lock(&dev_base_lock);
+ rcu_read_lock();
ldev = NULL;
- for_each_netdev(&init_net, dev) {
+ for_each_netdev_rcu(&init_net, dev) {
if (!dev->dn_ptr)
continue;
if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
@@ -759,7 +760,7 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
break;
}
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
if (ldev == NULL)
return -EADDRNOTAVAIL;
}
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 6e1f085db06a..f20dec9cfa06 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -68,7 +68,7 @@ extern struct neigh_table dn_neigh_table;
*/
__le16 decnet_address = 0;
-static DEFINE_RWLOCK(dndev_lock);
+static DEFINE_SPINLOCK(dndev_lock);
static struct net_device *decnet_default_device;
static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
@@ -557,7 +557,8 @@ rarok:
struct net_device *dn_dev_get_default(void)
{
struct net_device *dev;
- read_lock(&dndev_lock);
+
+ spin_lock(&dndev_lock);
dev = decnet_default_device;
if (dev) {
if (dev->dn_ptr)
@@ -565,7 +566,8 @@ struct net_device *dn_dev_get_default(void)
else
dev = NULL;
}
- read_unlock(&dndev_lock);
+ spin_unlock(&dndev_lock);
+
return dev;
}
@@ -575,13 +577,15 @@ int dn_dev_set_default(struct net_device *dev, int force)
int rv = -EBUSY;
if (!dev->dn_ptr)
return -ENODEV;
- write_lock(&dndev_lock);
+
+ spin_lock(&dndev_lock);
if (force || decnet_default_device == NULL) {
old = decnet_default_device;
decnet_default_device = dev;
rv = 0;
}
- write_unlock(&dndev_lock);
+ spin_unlock(&dndev_lock);
+
if (old)
dev_put(old);
return rv;
@@ -589,26 +593,29 @@ int dn_dev_set_default(struct net_device *dev, int force)
static void dn_dev_check_default(struct net_device *dev)
{
- write_lock(&dndev_lock);
+ spin_lock(&dndev_lock);
if (dev == decnet_default_device) {
decnet_default_device = NULL;
} else {
dev = NULL;
}
- write_unlock(&dndev_lock);
+ spin_unlock(&dndev_lock);
+
if (dev)
dev_put(dev);
}
+/*
+ * Called with RTNL
+ */
static struct dn_dev *dn_dev_by_index(int ifindex)
{
struct net_device *dev;
struct dn_dev *dn_dev = NULL;
- dev = dev_get_by_index(&init_net, ifindex);
- if (dev) {
+
+ dev = __dev_get_by_index(&init_net, ifindex);
+ if (dev)
dn_dev = dev->dn_ptr;
- dev_put(dev);
- }
return dn_dev;
}
@@ -629,7 +636,7 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
struct dn_ifaddr *ifa, **ifap;
int err = -EINVAL;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
goto errout;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
@@ -668,7 +675,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
struct dn_ifaddr *ifa;
int err;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
@@ -782,7 +789,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
struct dn_dev *dn_db;
struct dn_ifaddr *ifa;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return 0;
skip_ndevs = cb->args[0];
@@ -826,13 +833,17 @@ static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
struct dn_ifaddr *ifa;
int rv = -ENODEV;
+
if (dn_db == NULL)
goto out;
+
+ rtnl_lock();
ifa = dn_db->ifa_list;
if (ifa != NULL) {
*addr = ifa->ifa_local;
rv = 0;
}
+ rtnl_unlock();
out:
return rv;
}
@@ -854,9 +865,7 @@ int dn_dev_bind_default(__le16 *addr)
dev = dn_dev_get_default();
last_chance:
if (dev) {
- read_lock(&dev_base_lock);
rv = dn_dev_get_first(dev, addr);
- read_unlock(&dev_base_lock);
dev_put(dev);
if (rv == 0 || dev == init_net.loopback_dev)
return rv;
@@ -1321,18 +1330,18 @@ static inline int is_dn_dev(struct net_device *dev)
}
static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(&dev_base_lock)
+ __acquires(rcu)
{
int i;
struct net_device *dev;
- read_lock(&dev_base_lock);
+ rcu_read_lock();
if (*pos == 0)
return SEQ_START_TOKEN;
i = 1;
- for_each_netdev(&init_net, dev) {
+ for_each_netdev_rcu(&init_net, dev) {
if (!is_dn_dev(dev))
continue;
@@ -1353,7 +1362,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (v == SEQ_START_TOKEN)
dev = net_device_entry(&init_net.dev_base_head);
- for_each_netdev_continue(&init_net, dev) {
+ for_each_netdev_continue_rcu(&init_net, dev) {
if (!is_dn_dev(dev))
continue;
@@ -1364,9 +1373,9 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void dn_dev_seq_stop(struct seq_file *seq, void *v)
- __releases(&dev_base_lock)
+ __releases(rcu)
{
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static char *dn_type2asc(char type)
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 27ea2e9b080a..e9d48700e83a 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -509,7 +509,7 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
struct rtattr **rta = arg;
struct rtmsg *r = NLMSG_DATA(nlh);
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
if (dn_fib_check_attr(r, rta))
@@ -529,7 +529,7 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
struct rtattr **rta = arg;
struct rtmsg *r = NLMSG_DATA(nlh);
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
if (dn_fib_check_attr(r, rta))
@@ -607,8 +607,8 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
ASSERT_RTNL();
/* Scan device list */
- read_lock(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
dn_db = dev->dn_ptr;
if (dn_db == NULL)
continue;
@@ -619,7 +619,7 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
}
}
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
if (found_it == 0) {
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 57662cabaf9b..a03284061a31 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -908,8 +908,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
dev_put(dev_out);
goto out;
}
- read_lock(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
if (!dev->dn_ptr)
continue;
if (!dn_dev_islocal(dev, oldflp->fld_src))
@@ -922,7 +922,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
dev_out = dev;
break;
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
if (dev_out == NULL)
goto out;
dev_hold(dev_out);
@@ -1517,7 +1517,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
struct sk_buff *skb;
struct flowi fl;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
memset(&fl, 0, sizeof(fl));
@@ -1602,7 +1602,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
int h, s_h;
int idx, s_idx;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return 0;
if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg))
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 72495f25269f..7466c546f286 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -33,7 +33,7 @@
#include <net/dn_dev.h>
#include <net/dn_route.h>
-static struct fib_rules_ops dn_fib_rules_ops;
+static struct fib_rules_ops *dn_fib_rules_ops;
struct dn_fib_rule
{
@@ -56,7 +56,7 @@ int dn_fib_lookup(struct flowi *flp, struct dn_fib_res *res)
};
int err;
- err = fib_rules_lookup(&dn_fib_rules_ops, flp, 0, &arg);
+ err = fib_rules_lookup(dn_fib_rules_ops, flp, 0, &arg);
res->r = arg.rule;
return err;
@@ -217,9 +217,9 @@ static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops)
struct list_head *pos;
struct fib_rule *rule;
- if (!list_empty(&dn_fib_rules_ops.rules_list)) {
- pos = dn_fib_rules_ops.rules_list.next;
- if (pos->next != &dn_fib_rules_ops.rules_list) {
+ if (!list_empty(&dn_fib_rules_ops->rules_list)) {
+ pos = dn_fib_rules_ops->rules_list.next;
+ if (pos->next != &dn_fib_rules_ops->rules_list) {
rule = list_entry(pos->next, struct fib_rule, list);
if (rule->pref)
return rule->pref - 1;
@@ -234,7 +234,7 @@ static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
dn_rt_cache_flush(-1);
}
-static struct fib_rules_ops dn_fib_rules_ops = {
+static struct fib_rules_ops dn_fib_rules_ops_template = {
.family = AF_DECnet,
.rule_size = sizeof(struct dn_fib_rule),
.addr_size = sizeof(u16),
@@ -247,21 +247,23 @@ static struct fib_rules_ops dn_fib_rules_ops = {
.flush_cache = dn_fib_rule_flush_cache,
.nlgroup = RTNLGRP_DECnet_RULE,
.policy = dn_fib_rule_policy,
- .rules_list = LIST_HEAD_INIT(dn_fib_rules_ops.rules_list),
.owner = THIS_MODULE,
.fro_net = &init_net,
};
void __init dn_fib_rules_init(void)
{
- BUG_ON(fib_default_rule_add(&dn_fib_rules_ops, 0x7fff,
+ dn_fib_rules_ops =
+ fib_rules_register(&dn_fib_rules_ops_template, &init_net);
+ BUG_ON(IS_ERR(dn_fib_rules_ops));
+ BUG_ON(fib_default_rule_add(dn_fib_rules_ops, 0x7fff,
RT_TABLE_MAIN, 0));
- fib_rules_register(&dn_fib_rules_ops);
}
void __exit dn_fib_rules_cleanup(void)
{
- fib_rules_unregister(&dn_fib_rules_ops);
+ fib_rules_unregister(dn_fib_rules_ops);
+ rcu_barrier();
}
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 67054b0d550f..b9a33bb5e9cc 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -471,7 +471,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_node *node;
int dumped = 0;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return 0;
if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) &&
@@ -581,8 +581,9 @@ static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct
DN_FIB_SCAN_KEY(f, fp, key) {
if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority)
break;
- if (f->fn_type == type && f->fn_scope == r->rtm_scope
- && DN_FIB_INFO(f) == fi)
+ if (f->fn_type == type &&
+ f->fn_scope == r->rtm_scope &&
+ DN_FIB_INFO(f) == fi)
goto out;
}
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 26b0ab1e9f56..2036568beea9 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -263,11 +263,10 @@ static int dn_def_dev_strategy(ctl_table *table,
return -ENODEV;
rv = -ENODEV;
- if (dev->dn_ptr != NULL) {
+ if (dev->dn_ptr != NULL)
rv = dn_dev_set_default(dev, 1);
- if (rv)
- dev_put(dev);
- }
+ if (rv)
+ dev_put(dev);
}
return rv;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 5e9426a11c3e..29b4931aae52 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -605,13 +605,14 @@ static struct proto econet_proto = {
* Create an Econet socket
*/
-static int econet_create(struct net *net, struct socket *sock, int protocol)
+static int econet_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct econet_sock *eo;
int err;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
/* Econet only provides datagram services. */
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 5a883affecd3..dd3db88f8f0a 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -393,10 +393,3 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
return ((ssize_t) l);
}
EXPORT_SYMBOL(sysfs_format_mac);
-
-char *print_mac(char *buf, const unsigned char *addr)
-{
- _format_mac_addr(buf, MAC_BUF_SIZE, addr, ETH_ALEN);
- return buf;
-}
-EXPORT_SYMBOL(print_mac);
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 4068a9f5113e..ce2d33582859 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,5 @@
-obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o wpan-class.o
-nl802154-y := netlink.o nl_policy.o
+obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
+ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
af_802154-y := af_ieee802154.o raw.o dgram.o
ccflags-y += -Wall -DDEBUG
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 309348fba72b..bad1c49fd960 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -234,14 +234,14 @@ static const struct proto_ops ieee802154_dgram_ops = {
* set the state.
*/
static int ieee802154_create(struct net *net, struct socket *sock,
- int protocol)
+ int protocol, int kern)
{
struct sock *sk;
int rc;
struct proto *proto;
const struct proto_ops *ops;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
switch (sock->type) {
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h
new file mode 100644
index 000000000000..aadec428e6ec
--- /dev/null
+++ b/net/ieee802154/ieee802154.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef IEEE_802154_LOCAL_H
+#define IEEE_802154_LOCAL_H
+
+int __init ieee802154_nl_init(void);
+void __exit ieee802154_nl_exit(void);
+
+#define IEEE802154_OP(_cmd, _func) \
+ { \
+ .cmd = _cmd, \
+ .policy = ieee802154_policy, \
+ .doit = _func, \
+ .dumpit = NULL, \
+ .flags = GENL_ADMIN_PERM, \
+ }
+
+#define IEEE802154_DUMP(_cmd, _func, _dump) \
+ { \
+ .cmd = _cmd, \
+ .policy = ieee802154_policy, \
+ .doit = _func, \
+ .dumpit = _dump, \
+ }
+
+struct genl_info;
+
+struct sk_buff *ieee802154_nl_create(int flags, u8 req);
+int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group);
+struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
+ int flags, u8 req);
+int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info);
+
+extern struct genl_family nl802154_family;
+int nl802154_mac_register(void);
+int nl802154_phy_register(void);
+
+#endif
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index ca767bde17a4..33137b99e471 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -23,21 +23,15 @@
*/
#include <linux/kernel.h>
-#include <linux/if_arp.h>
-#include <linux/netdevice.h>
-#include <net/netlink.h>
#include <net/genetlink.h>
-#include <net/sock.h>
#include <linux/nl802154.h>
-#include <net/af_ieee802154.h>
-#include <net/nl802154.h>
-#include <net/ieee802154.h>
-#include <net/ieee802154_netdev.h>
+
+#include "ieee802154.h"
static unsigned int ieee802154_seq_num;
static DEFINE_SPINLOCK(ieee802154_seq_lock);
-static struct genl_family ieee802154_coordinator_family = {
+struct genl_family nl802154_family = {
.id = GENL_ID_GENERATE,
.hdrsize = 0,
.name = IEEE802154_NL_NAME,
@@ -45,16 +39,8 @@ static struct genl_family ieee802154_coordinator_family = {
.maxattr = IEEE802154_ATTR_MAX,
};
-static struct genl_multicast_group ieee802154_coord_mcgrp = {
- .name = IEEE802154_MCAST_COORD_NAME,
-};
-
-static struct genl_multicast_group ieee802154_beacon_mcgrp = {
- .name = IEEE802154_MCAST_BEACON_NAME,
-};
-
/* Requests to userspace */
-static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
+struct sk_buff *ieee802154_nl_create(int flags, u8 req)
{
void *hdr;
struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -65,7 +51,7 @@ static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
spin_lock_irqsave(&ieee802154_seq_lock, f);
hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
- &ieee802154_coordinator_family, flags, req);
+ &nl802154_family, flags, req);
spin_unlock_irqrestore(&ieee802154_seq_lock, f);
if (!hdr) {
nlmsg_free(msg);
@@ -75,7 +61,7 @@ static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
return msg;
}
-static int ieee802154_nl_finish(struct sk_buff *msg)
+int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group)
{
/* XXX: nlh is right at the start of msg */
void *hdr = genlmsg_data(NLMSG_DATA(msg->data));
@@ -83,607 +69,70 @@ static int ieee802154_nl_finish(struct sk_buff *msg)
if (genlmsg_end(msg, hdr) < 0)
goto out;
- return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id,
- GFP_ATOMIC);
+ return genlmsg_multicast(msg, 0, group, GFP_ATOMIC);
out:
nlmsg_free(msg);
return -ENOBUFS;
}
-int ieee802154_nl_assoc_indic(struct net_device *dev,
- struct ieee802154_addr *addr, u8 cap)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- if (addr->addr_type != IEEE802154_ADDR_LONG) {
- pr_err("%s: received non-long source address!\n", __func__);
- return -EINVAL;
- }
-
- msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC);
- if (!msg)
- return -ENOBUFS;
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
- addr->hwaddr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
-
- return ieee802154_nl_finish(msg);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
-
-int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
- u8 status)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF);
- if (!msg)
- return -ENOBUFS;
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
- NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
- return ieee802154_nl_finish(msg);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
-
-int ieee802154_nl_disassoc_indic(struct net_device *dev,
- struct ieee802154_addr *addr, u8 reason)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC);
- if (!msg)
- return -ENOBUFS;
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- if (addr->addr_type == IEEE802154_ADDR_LONG)
- NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
- addr->hwaddr);
- else
- NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
- addr->short_addr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
-
- return ieee802154_nl_finish(msg);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_disassoc_indic);
-
-int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF);
- if (!msg)
- return -ENOBUFS;
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
- return ieee802154_nl_finish(msg);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm);
-
-int ieee802154_nl_beacon_indic(struct net_device *dev,
- u16 panid, u16 coord_addr)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC);
- if (!msg)
- return -ENOBUFS;
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
- NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
- NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
-
- return ieee802154_nl_finish(msg);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
-
-int ieee802154_nl_scan_confirm(struct net_device *dev,
- u8 status, u8 scan_type, u32 unscanned, u8 page,
- u8 *edl/* , struct list_head *pan_desc_list */)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF);
- if (!msg)
- return -ENOBUFS;
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
- NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
- NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
- NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
-
- if (edl)
- NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
-
- return ieee802154_nl_finish(msg);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
-
-int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
-{
- struct sk_buff *msg;
-
- pr_debug("%s\n", __func__);
-
- msg = ieee802154_nl_create(0, IEEE802154_START_CONF);
- if (!msg)
- return -ENOBUFS;
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
-
- NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
-
- return ieee802154_nl_finish(msg);
-
-nla_put_failure:
- nlmsg_free(msg);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(ieee802154_nl_start_confirm);
-
-static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
- u32 seq, int flags, struct net_device *dev)
+struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
+ int flags, u8 req)
{
void *hdr;
+ struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
- pr_debug("%s\n", __func__);
-
- hdr = genlmsg_put(msg, 0, seq, &ieee802154_coordinator_family, flags,
- IEEE802154_LIST_IFACE);
- if (!hdr)
- goto out;
-
- NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
- NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
-
- NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
- dev->dev_addr);
- NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
- ieee802154_mlme_ops(dev)->get_short_addr(dev));
- NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
- ieee802154_mlme_ops(dev)->get_pan_id(dev));
- return genlmsg_end(msg, hdr);
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
-out:
- return -EMSGSIZE;
-}
-
-/* Requests from userspace */
-static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
-{
- struct net_device *dev;
-
- if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
- char name[IFNAMSIZ + 1];
- nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
- sizeof(name));
- dev = dev_get_by_name(&init_net, name);
- } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
- dev = dev_get_by_index(&init_net,
- nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
- else
- return NULL;
-
- if (!dev)
+ if (!msg)
return NULL;
- if (dev->type != ARPHRD_IEEE802154) {
- dev_put(dev);
+ hdr = genlmsg_put_reply(msg, info,
+ &nl802154_family, flags, req);
+ if (!hdr) {
+ nlmsg_free(msg);
return NULL;
}
- return dev;
-}
-
-static int ieee802154_associate_req(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct net_device *dev;
- struct ieee802154_addr addr;
- u8 page;
- int ret = -EINVAL;
-
- if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
- !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
- (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] &&
- !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) ||
- !info->attrs[IEEE802154_ATTR_CAPABILITY])
- return -EINVAL;
-
- dev = ieee802154_nl_get_dev(info);
- if (!dev)
- return -ENODEV;
-
- if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) {
- addr.addr_type = IEEE802154_ADDR_LONG;
- nla_memcpy(addr.hwaddr,
- info->attrs[IEEE802154_ATTR_COORD_HW_ADDR],
- IEEE802154_ADDR_LEN);
- } else {
- addr.addr_type = IEEE802154_ADDR_SHORT;
- addr.short_addr = nla_get_u16(
- info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
- }
- addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
-
- if (info->attrs[IEEE802154_ATTR_PAGE])
- page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
- else
- page = 0;
-
- ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
- nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
- page,
- nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
-
- dev_put(dev);
- return ret;
-}
-
-static int ieee802154_associate_resp(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct net_device *dev;
- struct ieee802154_addr addr;
- int ret = -EINVAL;
-
- if (!info->attrs[IEEE802154_ATTR_STATUS] ||
- !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] ||
- !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR])
- return -EINVAL;
-
- dev = ieee802154_nl_get_dev(info);
- if (!dev)
- return -ENODEV;
-
- addr.addr_type = IEEE802154_ADDR_LONG;
- nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
- IEEE802154_ADDR_LEN);
- addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-
-
- ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
- nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
- nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS]));
-
- dev_put(dev);
- return ret;
-}
-
-static int ieee802154_disassociate_req(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct net_device *dev;
- struct ieee802154_addr addr;
- int ret = -EINVAL;
-
- if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
- !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
- !info->attrs[IEEE802154_ATTR_REASON])
- return -EINVAL;
-
- dev = ieee802154_nl_get_dev(info);
- if (!dev)
- return -ENODEV;
-
- if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) {
- addr.addr_type = IEEE802154_ADDR_LONG;
- nla_memcpy(addr.hwaddr,
- info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
- IEEE802154_ADDR_LEN);
- } else {
- addr.addr_type = IEEE802154_ADDR_SHORT;
- addr.short_addr = nla_get_u16(
- info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
- }
- addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-
- ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
- nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
-
- dev_put(dev);
- return ret;
-}
-
-/*
- * PANid, channel, beacon_order = 15, superframe_order = 15,
- * PAN_coordinator, battery_life_extension = 0,
- * coord_realignment = 0, security_enable = 0
-*/
-static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
-{
- struct net_device *dev;
- struct ieee802154_addr addr;
-
- u8 channel, bcn_ord, sf_ord;
- u8 page;
- int pan_coord, blx, coord_realign;
- int ret;
-
- if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
- !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
- !info->attrs[IEEE802154_ATTR_CHANNEL] ||
- !info->attrs[IEEE802154_ATTR_BCN_ORD] ||
- !info->attrs[IEEE802154_ATTR_SF_ORD] ||
- !info->attrs[IEEE802154_ATTR_PAN_COORD] ||
- !info->attrs[IEEE802154_ATTR_BAT_EXT] ||
- !info->attrs[IEEE802154_ATTR_COORD_REALIGN]
- )
- return -EINVAL;
-
- dev = ieee802154_nl_get_dev(info);
- if (!dev)
- return -ENODEV;
-
- addr.addr_type = IEEE802154_ADDR_SHORT;
- addr.short_addr = nla_get_u16(
- info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
- addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
-
- channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
- bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
- sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]);
- pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]);
- blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
- coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
-
- if (info->attrs[IEEE802154_ATTR_PAGE])
- page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
- else
- page = 0;
-
-
- if (addr.short_addr == IEEE802154_ADDR_BROADCAST) {
- ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
- dev_put(dev);
- return -EINVAL;
- }
-
- ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
- bcn_ord, sf_ord, pan_coord, blx, coord_realign);
-
- dev_put(dev);
- return ret;
-}
-
-static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
-{
- struct net_device *dev;
- int ret;
- u8 type;
- u32 channels;
- u8 duration;
- u8 page;
-
- if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
- !info->attrs[IEEE802154_ATTR_CHANNELS] ||
- !info->attrs[IEEE802154_ATTR_DURATION])
- return -EINVAL;
-
- dev = ieee802154_nl_get_dev(info);
- if (!dev)
- return -ENODEV;
-
- type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]);
- channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
- duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
-
- if (info->attrs[IEEE802154_ATTR_PAGE])
- page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
- else
- page = 0;
-
-
- ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
- duration);
-
- dev_put(dev);
- return ret;
+ return msg;
}
-static int ieee802154_list_iface(struct sk_buff *skb,
- struct genl_info *info)
+int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info)
{
- /* Request for interface name, index, type, IEEE address,
- PAN Id, short address */
- struct sk_buff *msg;
- struct net_device *dev = NULL;
- int rc = -ENOBUFS;
-
- pr_debug("%s\n", __func__);
-
- dev = ieee802154_nl_get_dev(info);
- if (!dev)
- return -ENODEV;
-
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!msg)
- goto out_dev;
-
- rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq,
- 0, dev);
- if (rc < 0)
- goto out_free;
+ /* XXX: nlh is right at the start of msg */
+ void *hdr = genlmsg_data(NLMSG_DATA(msg->data));
- dev_put(dev);
+ if (genlmsg_end(msg, hdr) < 0)
+ goto out;
- return genlmsg_unicast(&init_net, msg, info->snd_pid);
-out_free:
+ return genlmsg_reply(msg, info);
+out:
nlmsg_free(msg);
-out_dev:
- dev_put(dev);
- return rc;
-
-}
-
-static int ieee802154_dump_iface(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- struct net *net = sock_net(skb->sk);
- struct net_device *dev;
- int idx;
- int s_idx = cb->args[0];
-
- pr_debug("%s\n", __func__);
-
- idx = 0;
- for_each_netdev(net, dev) {
- if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
- goto cont;
-
- if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
- break;
-cont:
- idx++;
- }
- cb->args[0] = idx;
-
- return skb->len;
+ return -ENOBUFS;
}
-#define IEEE802154_OP(_cmd, _func) \
- { \
- .cmd = _cmd, \
- .policy = ieee802154_policy, \
- .doit = _func, \
- .dumpit = NULL, \
- .flags = GENL_ADMIN_PERM, \
- }
-
-#define IEEE802154_DUMP(_cmd, _func, _dump) \
- { \
- .cmd = _cmd, \
- .policy = ieee802154_policy, \
- .doit = _func, \
- .dumpit = _dump, \
- }
-
-static struct genl_ops ieee802154_coordinator_ops[] = {
- IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
- IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
- IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
- IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
- IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
- IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
- ieee802154_dump_iface),
-};
-
-static int __init ieee802154_nl_init(void)
+int __init ieee802154_nl_init(void)
{
int rc;
- int i;
- rc = genl_register_family(&ieee802154_coordinator_family);
+ rc = genl_register_family(&nl802154_family);
if (rc)
goto fail;
- rc = genl_register_mc_group(&ieee802154_coordinator_family,
- &ieee802154_coord_mcgrp);
+ rc = nl802154_mac_register();
if (rc)
goto fail;
- rc = genl_register_mc_group(&ieee802154_coordinator_family,
- &ieee802154_beacon_mcgrp);
+ rc = nl802154_phy_register();
if (rc)
goto fail;
-
- for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) {
- rc = genl_register_ops(&ieee802154_coordinator_family,
- &ieee802154_coordinator_ops[i]);
- if (rc)
- goto fail;
- }
-
return 0;
fail:
- genl_unregister_family(&ieee802154_coordinator_family);
+ genl_unregister_family(&nl802154_family);
return rc;
}
-module_init(ieee802154_nl_init);
-static void __exit ieee802154_nl_exit(void)
+void __exit ieee802154_nl_exit(void)
{
- genl_unregister_family(&ieee802154_coordinator_family);
+ genl_unregister_family(&nl802154_family);
}
-module_exit(ieee802154_nl_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("ieee 802.15.4 configuration interface");
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
new file mode 100644
index 000000000000..135c1678fb11
--- /dev/null
+++ b/net/ieee802154/nl-mac.c
@@ -0,0 +1,617 @@
+/*
+ * Netlink inteface for IEEE 802.15.4 stack
+ *
+ * Copyright 2007, 2008 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Maxim Osipov <maxim.osipov@siemens.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
+#include <linux/nl802154.h>
+#include <net/af_ieee802154.h>
+#include <net/nl802154.h>
+#include <net/ieee802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/wpan-phy.h>
+
+#include "ieee802154.h"
+
+static struct genl_multicast_group ieee802154_coord_mcgrp = {
+ .name = IEEE802154_MCAST_COORD_NAME,
+};
+
+static struct genl_multicast_group ieee802154_beacon_mcgrp = {
+ .name = IEEE802154_MCAST_BEACON_NAME,
+};
+
+int ieee802154_nl_assoc_indic(struct net_device *dev,
+ struct ieee802154_addr *addr, u8 cap)
+{
+ struct sk_buff *msg;
+
+ pr_debug("%s\n", __func__);
+
+ if (addr->addr_type != IEEE802154_ADDR_LONG) {
+ pr_err("%s: received non-long source address!\n", __func__);
+ return -EINVAL;
+ }
+
+ msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC);
+ if (!msg)
+ return -ENOBUFS;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+
+ NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+ addr->hwaddr);
+
+ NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
+
+ return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
+
+int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
+ u8 status)
+{
+ struct sk_buff *msg;
+
+ pr_debug("%s\n", __func__);
+
+ msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF);
+ if (!msg)
+ return -ENOBUFS;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+
+ NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr);
+ NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
+
+ return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
+
+int ieee802154_nl_disassoc_indic(struct net_device *dev,
+ struct ieee802154_addr *addr, u8 reason)
+{
+ struct sk_buff *msg;
+
+ pr_debug("%s\n", __func__);
+
+ msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC);
+ if (!msg)
+ return -ENOBUFS;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+
+ if (addr->addr_type == IEEE802154_ADDR_LONG)
+ NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
+ addr->hwaddr);
+ else
+ NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
+ addr->short_addr);
+
+ NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason);
+
+ return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(ieee802154_nl_disassoc_indic);
+
+int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
+{
+ struct sk_buff *msg;
+
+ pr_debug("%s\n", __func__);
+
+ msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF);
+ if (!msg)
+ return -ENOBUFS;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+
+ NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
+
+ return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm);
+
+int ieee802154_nl_beacon_indic(struct net_device *dev,
+ u16 panid, u16 coord_addr)
+{
+ struct sk_buff *msg;
+
+ pr_debug("%s\n", __func__);
+
+ msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC);
+ if (!msg)
+ return -ENOBUFS;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+ NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr);
+ NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid);
+
+ return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
+
+int ieee802154_nl_scan_confirm(struct net_device *dev,
+ u8 status, u8 scan_type, u32 unscanned, u8 page,
+ u8 *edl/* , struct list_head *pan_desc_list */)
+{
+ struct sk_buff *msg;
+
+ pr_debug("%s\n", __func__);
+
+ msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF);
+ if (!msg)
+ return -ENOBUFS;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+
+ NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
+ NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
+ NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
+
+ if (edl)
+ NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
+
+ return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
+
+int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
+{
+ struct sk_buff *msg;
+
+ pr_debug("%s\n", __func__);
+
+ msg = ieee802154_nl_create(0, IEEE802154_START_CONF);
+ if (!msg)
+ return -ENOBUFS;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+
+ NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
+
+ return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(ieee802154_nl_start_confirm);
+
+static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
+ u32 seq, int flags, struct net_device *dev)
+{
+ void *hdr;
+ struct wpan_phy *phy;
+
+ pr_debug("%s\n", __func__);
+
+ hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
+ IEEE802154_LIST_IFACE);
+ if (!hdr)
+ goto out;
+
+ phy = ieee802154_mlme_ops(dev)->get_phy(dev);
+ BUG_ON(!phy);
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+ NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
+ ieee802154_mlme_ops(dev)->get_short_addr(dev));
+ NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
+ ieee802154_mlme_ops(dev)->get_pan_id(dev));
+ wpan_phy_put(phy);
+ return genlmsg_end(msg, hdr);
+
+nla_put_failure:
+ wpan_phy_put(phy);
+ genlmsg_cancel(msg, hdr);
+out:
+ return -EMSGSIZE;
+}
+
+/* Requests from userspace */
+static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
+{
+ struct net_device *dev;
+
+ if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
+ char name[IFNAMSIZ + 1];
+ nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
+ sizeof(name));
+ dev = dev_get_by_name(&init_net, name);
+ } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
+ dev = dev_get_by_index(&init_net,
+ nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
+ else
+ return NULL;
+
+ if (!dev)
+ return NULL;
+
+ if (dev->type != ARPHRD_IEEE802154) {
+ dev_put(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+static int ieee802154_associate_req(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net_device *dev;
+ struct ieee802154_addr addr;
+ u8 page;
+ int ret = -EINVAL;
+
+ if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
+ !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
+ (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] &&
+ !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) ||
+ !info->attrs[IEEE802154_ATTR_CAPABILITY])
+ return -EINVAL;
+
+ dev = ieee802154_nl_get_dev(info);
+ if (!dev)
+ return -ENODEV;
+
+ if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) {
+ addr.addr_type = IEEE802154_ADDR_LONG;
+ nla_memcpy(addr.hwaddr,
+ info->attrs[IEEE802154_ATTR_COORD_HW_ADDR],
+ IEEE802154_ADDR_LEN);
+ } else {
+ addr.addr_type = IEEE802154_ADDR_SHORT;
+ addr.short_addr = nla_get_u16(
+ info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
+ }
+ addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
+
+ if (info->attrs[IEEE802154_ATTR_PAGE])
+ page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
+ else
+ page = 0;
+
+ ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
+ nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
+ page,
+ nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
+
+ dev_put(dev);
+ return ret;
+}
+
+static int ieee802154_associate_resp(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net_device *dev;
+ struct ieee802154_addr addr;
+ int ret = -EINVAL;
+
+ if (!info->attrs[IEEE802154_ATTR_STATUS] ||
+ !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] ||
+ !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR])
+ return -EINVAL;
+
+ dev = ieee802154_nl_get_dev(info);
+ if (!dev)
+ return -ENODEV;
+
+ addr.addr_type = IEEE802154_ADDR_LONG;
+ nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
+ IEEE802154_ADDR_LEN);
+ addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+
+
+ ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
+ nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
+ nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS]));
+
+ dev_put(dev);
+ return ret;
+}
+
+static int ieee802154_disassociate_req(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net_device *dev;
+ struct ieee802154_addr addr;
+ int ret = -EINVAL;
+
+ if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
+ !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
+ !info->attrs[IEEE802154_ATTR_REASON])
+ return -EINVAL;
+
+ dev = ieee802154_nl_get_dev(info);
+ if (!dev)
+ return -ENODEV;
+
+ if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) {
+ addr.addr_type = IEEE802154_ADDR_LONG;
+ nla_memcpy(addr.hwaddr,
+ info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
+ IEEE802154_ADDR_LEN);
+ } else {
+ addr.addr_type = IEEE802154_ADDR_SHORT;
+ addr.short_addr = nla_get_u16(
+ info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
+ }
+ addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+
+ ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
+ nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
+
+ dev_put(dev);
+ return ret;
+}
+
+/*
+ * PANid, channel, beacon_order = 15, superframe_order = 15,
+ * PAN_coordinator, battery_life_extension = 0,
+ * coord_realignment = 0, security_enable = 0
+*/
+static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ struct ieee802154_addr addr;
+
+ u8 channel, bcn_ord, sf_ord;
+ u8 page;
+ int pan_coord, blx, coord_realign;
+ int ret;
+
+ if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] ||
+ !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] ||
+ !info->attrs[IEEE802154_ATTR_CHANNEL] ||
+ !info->attrs[IEEE802154_ATTR_BCN_ORD] ||
+ !info->attrs[IEEE802154_ATTR_SF_ORD] ||
+ !info->attrs[IEEE802154_ATTR_PAN_COORD] ||
+ !info->attrs[IEEE802154_ATTR_BAT_EXT] ||
+ !info->attrs[IEEE802154_ATTR_COORD_REALIGN]
+ )
+ return -EINVAL;
+
+ dev = ieee802154_nl_get_dev(info);
+ if (!dev)
+ return -ENODEV;
+
+ addr.addr_type = IEEE802154_ADDR_SHORT;
+ addr.short_addr = nla_get_u16(
+ info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
+ addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
+
+ channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
+ bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
+ sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]);
+ pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]);
+ blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
+ coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
+
+ if (info->attrs[IEEE802154_ATTR_PAGE])
+ page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
+ else
+ page = 0;
+
+
+ if (addr.short_addr == IEEE802154_ADDR_BROADCAST) {
+ ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
+ dev_put(dev);
+ return -EINVAL;
+ }
+
+ ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
+ bcn_ord, sf_ord, pan_coord, blx, coord_realign);
+
+ dev_put(dev);
+ return ret;
+}
+
+static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ int ret;
+ u8 type;
+ u32 channels;
+ u8 duration;
+ u8 page;
+
+ if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
+ !info->attrs[IEEE802154_ATTR_CHANNELS] ||
+ !info->attrs[IEEE802154_ATTR_DURATION])
+ return -EINVAL;
+
+ dev = ieee802154_nl_get_dev(info);
+ if (!dev)
+ return -ENODEV;
+
+ type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]);
+ channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
+ duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
+
+ if (info->attrs[IEEE802154_ATTR_PAGE])
+ page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
+ else
+ page = 0;
+
+
+ ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
+ duration);
+
+ dev_put(dev);
+ return ret;
+}
+
+static int ieee802154_list_iface(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ /* Request for interface name, index, type, IEEE address,
+ PAN Id, short address */
+ struct sk_buff *msg;
+ struct net_device *dev = NULL;
+ int rc = -ENOBUFS;
+
+ pr_debug("%s\n", __func__);
+
+ dev = ieee802154_nl_get_dev(info);
+ if (!dev)
+ return -ENODEV;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ goto out_dev;
+
+ rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq,
+ 0, dev);
+ if (rc < 0)
+ goto out_free;
+
+ dev_put(dev);
+
+ return genlmsg_reply(msg, info);
+out_free:
+ nlmsg_free(msg);
+out_dev:
+ dev_put(dev);
+ return rc;
+
+}
+
+static int ieee802154_dump_iface(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int idx;
+ int s_idx = cb->args[0];
+
+ pr_debug("%s\n", __func__);
+
+ idx = 0;
+ for_each_netdev(net, dev) {
+ if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
+ goto cont;
+
+ if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
+ break;
+cont:
+ idx++;
+ }
+ cb->args[0] = idx;
+
+ return skb->len;
+}
+
+static struct genl_ops ieee802154_coordinator_ops[] = {
+ IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
+ IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
+ IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
+ IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
+ IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
+ IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
+ ieee802154_dump_iface),
+};
+
+/*
+ * No need to unregister as family unregistration will do it.
+ */
+int nl802154_mac_register(void)
+{
+ int i;
+ int rc;
+
+ rc = genl_register_mc_group(&nl802154_family,
+ &ieee802154_coord_mcgrp);
+ if (rc)
+ return rc;
+
+ rc = genl_register_mc_group(&nl802154_family,
+ &ieee802154_beacon_mcgrp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) {
+ rc = genl_register_ops(&nl802154_family,
+ &ieee802154_coordinator_ops[i]);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
new file mode 100644
index 000000000000..199a2d9d12f9
--- /dev/null
+++ b/net/ieee802154/nl-phy.c
@@ -0,0 +1,344 @@
+/*
+ * Netlink inteface for IEEE 802.15.4 stack
+ *
+ * Copyright 2007, 2008 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Sergey Lapin <slapin@ossfans.org>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Maxim Osipov <maxim.osipov@siemens.com>
+ */
+
+#include <linux/kernel.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <net/wpan-phy.h>
+#include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/rtnetlink.h> /* for rtnl_{un,}lock */
+#include <linux/nl802154.h>
+
+#include "ieee802154.h"
+
+static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
+ u32 seq, int flags, struct wpan_phy *phy)
+{
+ void *hdr;
+ int i, pages = 0;
+ uint32_t *buf = kzalloc(32 * sizeof(uint32_t), GFP_KERNEL);
+
+ pr_debug("%s\n", __func__);
+
+ if (!buf)
+ goto out;
+
+ hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
+ IEEE802154_LIST_PHY);
+ if (!hdr)
+ goto out;
+
+ mutex_lock(&phy->pib_lock);
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
+
+ NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page);
+ NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel);
+ for (i = 0; i < 32; i++) {
+ if (phy->channels_supported[i])
+ buf[pages++] = phy->channels_supported[i] | (i << 27);
+ }
+ if (pages)
+ NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
+ pages * sizeof(uint32_t), buf);
+
+ mutex_unlock(&phy->pib_lock);
+ return genlmsg_end(msg, hdr);
+
+nla_put_failure:
+ mutex_unlock(&phy->pib_lock);
+ genlmsg_cancel(msg, hdr);
+out:
+ kfree(buf);
+ return -EMSGSIZE;
+}
+
+static int ieee802154_list_phy(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ /* Request for interface name, index, type, IEEE address,
+ PAN Id, short address */
+ struct sk_buff *msg;
+ struct wpan_phy *phy;
+ const char *name;
+ int rc = -ENOBUFS;
+
+ pr_debug("%s\n", __func__);
+
+ if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
+ if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
+ return -EINVAL; /* phy name should be null-terminated */
+
+
+ phy = wpan_phy_find(name);
+ if (!phy)
+ return -ENODEV;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ goto out_dev;
+
+ rc = ieee802154_nl_fill_phy(msg, info->snd_pid, info->snd_seq,
+ 0, phy);
+ if (rc < 0)
+ goto out_free;
+
+ wpan_phy_put(phy);
+
+ return genlmsg_reply(msg, info);
+out_free:
+ nlmsg_free(msg);
+out_dev:
+ wpan_phy_put(phy);
+ return rc;
+
+}
+
+struct dump_phy_data {
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
+ int idx, s_idx;
+};
+
+static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
+{
+ int rc;
+ struct dump_phy_data *data = _data;
+
+ pr_debug("%s\n", __func__);
+
+ if (data->idx++ < data->s_idx)
+ return 0;
+
+ rc = ieee802154_nl_fill_phy(data->skb,
+ NETLINK_CB(data->cb->skb).pid,
+ data->cb->nlh->nlmsg_seq,
+ NLM_F_MULTI,
+ phy);
+
+ if (rc < 0) {
+ data->idx--;
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ieee802154_dump_phy(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct dump_phy_data data = {
+ .cb = cb,
+ .skb = skb,
+ .s_idx = cb->args[0],
+ .idx = 0,
+ };
+
+ pr_debug("%s\n", __func__);
+
+ wpan_phy_for_each(ieee802154_dump_phy_iter, &data);
+
+ cb->args[0] = data.idx;
+
+ return skb->len;
+}
+
+static int ieee802154_add_iface(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct sk_buff *msg;
+ struct wpan_phy *phy;
+ const char *name;
+ const char *devname;
+ int rc = -ENOBUFS;
+ struct net_device *dev;
+
+ pr_debug("%s\n", __func__);
+
+ if (!info->attrs[IEEE802154_ATTR_PHY_NAME])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
+ if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
+ return -EINVAL; /* phy name should be null-terminated */
+
+ if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
+ devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
+ if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
+ != '\0')
+ return -EINVAL; /* phy name should be null-terminated */
+ } else {
+ devname = "wpan%d";
+ }
+
+ if (strlen(devname) >= IFNAMSIZ)
+ return -ENAMETOOLONG;
+
+ phy = wpan_phy_find(name);
+ if (!phy)
+ return -ENODEV;
+
+ msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE);
+ if (!msg)
+ goto out_dev;
+
+ if (!phy->add_iface) {
+ rc = -EINVAL;
+ goto nla_put_failure;
+ }
+
+ dev = phy->add_iface(phy, devname);
+ if (IS_ERR(dev)) {
+ rc = PTR_ERR(dev);
+ goto nla_put_failure;
+ }
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+
+ dev_put(dev);
+
+ wpan_phy_put(phy);
+
+ return ieee802154_nl_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+out_dev:
+ wpan_phy_put(phy);
+ return rc;
+}
+
+static int ieee802154_del_iface(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct sk_buff *msg;
+ struct wpan_phy *phy;
+ const char *name;
+ int rc;
+ struct net_device *dev;
+
+ pr_debug("%s\n", __func__);
+
+ if (!info->attrs[IEEE802154_ATTR_DEV_NAME])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]);
+ if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0')
+ return -EINVAL; /* name should be null-terminated */
+
+ dev = dev_get_by_name(genl_info_net(info), name);
+ if (!dev)
+ return -ENODEV;
+
+ phy = ieee802154_mlme_ops(dev)->get_phy(dev);
+ BUG_ON(!phy);
+
+ rc = -EINVAL;
+ /* phy name is optional, but should be checked if it's given */
+ if (info->attrs[IEEE802154_ATTR_PHY_NAME]) {
+ struct wpan_phy *phy2;
+
+ const char *pname =
+ nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
+ if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1]
+ != '\0')
+ /* name should be null-terminated */
+ goto out_dev;
+
+ phy2 = wpan_phy_find(pname);
+ if (!phy2)
+ goto out_dev;
+
+ if (phy != phy2) {
+ wpan_phy_put(phy2);
+ goto out_dev;
+ }
+ }
+
+ rc = -ENOBUFS;
+
+ msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE);
+ if (!msg)
+ goto out_dev;
+
+ if (!phy->del_iface) {
+ rc = -EINVAL;
+ goto nla_put_failure;
+ }
+
+ rtnl_lock();
+ phy->del_iface(phy, dev);
+
+ /* We don't have device anymore */
+ dev_put(dev);
+ dev = NULL;
+
+ rtnl_unlock();
+
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name);
+
+ wpan_phy_put(phy);
+
+ return ieee802154_nl_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+out_dev:
+ wpan_phy_put(phy);
+ if (dev)
+ dev_put(dev);
+
+ return rc;
+}
+
+static struct genl_ops ieee802154_phy_ops[] = {
+ IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy,
+ ieee802154_dump_phy),
+ IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
+ IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
+};
+
+/*
+ * No need to unregister as family unregistration will do it.
+ */
+int nl802154_phy_register(void)
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < ARRAY_SIZE(ieee802154_phy_ops); i++) {
+ rc = genl_register_ops(&nl802154_family,
+ &ieee802154_phy_ops[i]);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index 2363ebee02e7..6adda4d46f95 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -27,6 +27,7 @@
const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, },
[IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, },
+ [IEEE802154_ATTR_PHY_NAME] = { .type = NLA_STRING, },
[IEEE802154_ATTR_STATUS] = { .type = NLA_U8, },
[IEEE802154_ATTR_SHORT_ADDR] = { .type = NLA_U16, },
@@ -50,5 +51,6 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_CHANNELS] = { .type = NLA_U32, },
[IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
[IEEE802154_ATTR_ED_LIST] = { .len = 27 },
+ [IEEE802154_ATTR_CHANNEL_PAGE_LIST] = { .len = 32 * 4, },
};
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index f306604da67a..268691256a6d 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -22,6 +22,8 @@
#include <net/wpan-phy.h>
+#include "ieee802154.h"
+
#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
static ssize_t name ## _show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -30,7 +32,7 @@ static ssize_t name ## _show(struct device *dev, \
int ret; \
\
mutex_lock(&phy->pib_lock); \
- ret = sprintf(buf, format_string "\n", args); \
+ ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
mutex_unlock(&phy->pib_lock); \
return ret; \
}
@@ -40,12 +42,30 @@ static ssize_t name ## _show(struct device *dev, \
MASTER_SHOW(current_channel, "%d");
MASTER_SHOW(current_page, "%d");
-MASTER_SHOW(channels_supported, "%#x");
MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB",
((signed char) (phy->transmit_power << 2)) >> 2,
(phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 );
MASTER_SHOW(cca_mode, "%d");
+static ssize_t channels_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
+ int ret;
+ int i, len = 0;
+
+ mutex_lock(&phy->pib_lock);
+ for (i = 0; i < 32; i++) {
+ ret = snprintf(buf + len, PAGE_SIZE - len,
+ "%#09x\n", phy->channels_supported[i]);
+ if (ret < 0)
+ break;
+ len += ret;
+ }
+ mutex_unlock(&phy->pib_lock);
+ return len;
+}
+
static struct device_attribute pmib_attrs[] = {
__ATTR_RO(current_channel),
__ATTR_RO(current_page),
@@ -91,6 +111,31 @@ struct wpan_phy *wpan_phy_find(const char *str)
}
EXPORT_SYMBOL(wpan_phy_find);
+struct wpan_phy_iter_data {
+ int (*fn)(struct wpan_phy *phy, void *data);
+ void *data;
+};
+
+static int wpan_phy_iter(struct device *dev, void *_data)
+{
+ struct wpan_phy_iter_data *wpid = _data;
+ struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
+ return wpid->fn(phy, wpid->data);
+}
+
+int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data),
+ void *data)
+{
+ struct wpan_phy_iter_data wpid = {
+ .fn = fn,
+ .data = data,
+ };
+
+ return class_for_each_device(&wpan_phy_class, NULL,
+ &wpid, wpan_phy_iter);
+}
+EXPORT_SYMBOL(wpan_phy_for_each);
+
static int wpan_phy_idx_valid(int idx)
{
return idx >= 0;
@@ -118,14 +163,15 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
phy->dev.class = &wpan_phy_class;
+ phy->current_channel = -1; /* not initialised */
+ phy->current_page = 0; /* for compatibility */
+
return phy;
}
EXPORT_SYMBOL(wpan_phy_alloc);
-int wpan_phy_register(struct device *parent, struct wpan_phy *phy)
+int wpan_phy_register(struct wpan_phy *phy)
{
- phy->dev.parent = parent;
-
return device_add(&phy->dev);
}
EXPORT_SYMBOL(wpan_phy_register);
@@ -144,16 +190,31 @@ EXPORT_SYMBOL(wpan_phy_free);
static int __init wpan_phy_class_init(void)
{
- return class_register(&wpan_phy_class);
+ int rc;
+ rc = class_register(&wpan_phy_class);
+ if (rc)
+ goto err;
+
+ rc = ieee802154_nl_init();
+ if (rc)
+ goto err_nl;
+
+ return 0;
+err_nl:
+ class_unregister(&wpan_phy_class);
+err:
+ return rc;
}
subsys_initcall(wpan_phy_class_init);
static void __exit wpan_phy_class_exit(void)
{
+ ieee802154_nl_exit();
class_unregister(&wpan_phy_class);
}
module_exit(wpan_phy_class_exit);
-MODULE_DESCRIPTION("IEEE 802.15.4 device class");
MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface");
+MODULE_AUTHOR("Dmitry Eremin-Solenikov");
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 04a14b1600ac..7d12c6a9b19b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -262,7 +262,8 @@ static inline int inet_netns_ok(struct net *net, int protocol)
* Create an inet socket.
*/
-static int inet_create(struct net *net, struct socket *sock, int protocol)
+static int inet_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct inet_protosw *answer;
@@ -325,7 +326,7 @@ lookup_protocol:
}
err = -EPERM;
- if (answer->capability > 0 && !capable(answer->capability))
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
goto out_rcu_unlock;
err = -EAFNOSUPPORT;
@@ -685,7 +686,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
{
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
- struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
+ DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
sin->sin_family = AF_INET;
if (peer) {
@@ -947,7 +948,6 @@ static struct inet_protosw inetsw_array[] =
.protocol = IPPROTO_TCP,
.prot = &tcp_prot,
.ops = &inet_stream_ops,
- .capability = -1,
.no_check = 0,
.flags = INET_PROTOSW_PERMANENT |
INET_PROTOSW_ICSK,
@@ -958,7 +958,6 @@ static struct inet_protosw inetsw_array[] =
.protocol = IPPROTO_UDP,
.prot = &udp_prot,
.ops = &inet_dgram_ops,
- .capability = -1,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_PERMANENT,
},
@@ -969,7 +968,6 @@ static struct inet_protosw inetsw_array[] =
.protocol = IPPROTO_IP, /* wild card */
.prot = &raw_prot,
.ops = &inet_sockraw_ops,
- .capability = CAP_NET_RAW,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
}
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index d07b0c1dd350..7ed3e4ae93ae 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -444,7 +444,7 @@ static int ah_init_state(struct xfrm_state *x)
}
ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
- ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
+ ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5df2f6a0b0f0..e3126612fcbb 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -140,11 +140,11 @@ void in_dev_finish_destroy(struct in_device *idev)
#endif
dev_put(dev);
if (!idev->dead)
- printk("Freeing alive in_device %p\n", idev);
- else {
+ pr_err("Freeing alive in_device %p\n", idev);
+ else
kfree(idev);
- }
}
+EXPORT_SYMBOL(in_dev_finish_destroy);
static struct in_device *inetdev_init(struct net_device *dev)
{
@@ -159,7 +159,8 @@ static struct in_device *inetdev_init(struct net_device *dev)
sizeof(in_dev->cnf));
in_dev->cnf.sysctl = NULL;
in_dev->dev = dev;
- if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL)
+ in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
+ if (!in_dev->arp_parms)
goto out_kfree;
if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
dev_disable_lro(dev);
@@ -405,13 +406,15 @@ struct in_device *inetdev_by_index(struct net *net, int ifindex)
{
struct net_device *dev;
struct in_device *in_dev = NULL;
- read_lock(&dev_base_lock);
- dev = __dev_get_by_index(net, ifindex);
+
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, ifindex);
if (dev)
in_dev = in_dev_get(dev);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return in_dev;
}
+EXPORT_SYMBOL(inetdev_by_index);
/* Called only from RTNL semaphored context. No locks. */
@@ -557,7 +560,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
* Determine a default network mask, based on the IP address.
*/
-static __inline__ int inet_abc_len(__be32 addr)
+static inline int inet_abc_len(__be32 addr)
{
int rc = -1; /* Something else, probably a multicast. */
@@ -646,13 +649,15 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
rtnl_lock();
ret = -ENODEV;
- if ((dev = __dev_get_by_name(net, ifr.ifr_name)) == NULL)
+ dev = __dev_get_by_name(net, ifr.ifr_name);
+ if (!dev)
goto done;
if (colon)
*colon = ':';
- if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev) {
if (tryaddrmatch) {
/* Matthias Andree */
/* compare label and address (4.4BSD style) */
@@ -720,7 +725,8 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!ifa) {
ret = -ENOBUFS;
- if ((ifa = inet_alloc_ifa()) == NULL)
+ ifa = inet_alloc_ifa();
+ if (!ifa)
break;
if (colon)
memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
@@ -822,10 +828,10 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
struct ifreq ifr;
int done = 0;
- if (!in_dev || (ifa = in_dev->ifa_list) == NULL)
+ if (!in_dev)
goto out;
- for (; ifa; ifa = ifa->ifa_next) {
+ for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (!buf) {
done += sizeof(ifr);
continue;
@@ -875,36 +881,33 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
if (!addr)
addr = ifa->ifa_local;
} endfor_ifa(in_dev);
-no_in_dev:
- rcu_read_unlock();
if (addr)
- goto out;
+ goto out_unlock;
+no_in_dev:
/* Not loopback addresses on loopback should be preferred
in this case. It is importnat that lo is the first interface
in dev_base list.
*/
- read_lock(&dev_base_lock);
- rcu_read_lock();
- for_each_netdev(net, dev) {
- if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
+ for_each_netdev_rcu(net, dev) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
continue;
for_primary_ifa(in_dev) {
if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope) {
addr = ifa->ifa_local;
- goto out_unlock_both;
+ goto out_unlock;
}
} endfor_ifa(in_dev);
}
-out_unlock_both:
- read_unlock(&dev_base_lock);
+out_unlock:
rcu_read_unlock();
-out:
return addr;
}
+EXPORT_SYMBOL(inet_select_addr);
static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
__be32 local, int scope)
@@ -940,7 +943,7 @@ static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
}
} endfor_ifa(in_dev);
- return same? addr : 0;
+ return same ? addr : 0;
}
/*
@@ -961,17 +964,16 @@ __be32 inet_confirm_addr(struct in_device *in_dev,
return confirm_addr_indev(in_dev, dst, local, scope);
net = dev_net(in_dev->dev);
- read_lock(&dev_base_lock);
rcu_read_lock();
- for_each_netdev(net, dev) {
- if ((in_dev = __in_dev_get_rcu(dev))) {
+ for_each_netdev_rcu(net, dev) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (in_dev) {
addr = confirm_addr_indev(in_dev, dst, local, scope);
if (addr)
break;
}
}
rcu_read_unlock();
- read_unlock(&dev_base_lock);
return addr;
}
@@ -984,14 +986,16 @@ int register_inetaddr_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&inetaddr_chain, nb);
}
+EXPORT_SYMBOL(register_inetaddr_notifier);
int unregister_inetaddr_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
}
+EXPORT_SYMBOL(unregister_inetaddr_notifier);
-/* Rename ifa_labels for a device name change. Make some effort to preserve existing
- * alias numbering and to create unique labels if possible.
+/* Rename ifa_labels for a device name change. Make some effort to preserve
+ * existing alias numbering and to create unique labels if possible.
*/
static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
{
@@ -1010,11 +1014,10 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
sprintf(old, ":%d", named);
dot = old;
}
- if (strlen(dot) + strlen(dev->name) < IFNAMSIZ) {
+ if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
strcat(ifa->ifa_label, dot);
- } else {
+ else
strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
- }
skip:
rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
}
@@ -1061,8 +1064,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
if (!inetdev_valid_mtu(dev->mtu))
break;
if (dev->flags & IFF_LOOPBACK) {
- struct in_ifaddr *ifa;
- if ((ifa = inet_alloc_ifa()) != NULL) {
+ struct in_ifaddr *ifa = inet_alloc_ifa();
+
+ if (ifa) {
ifa->ifa_local =
ifa->ifa_address = htonl(INADDR_LOOPBACK);
ifa->ifa_prefixlen = 8;
@@ -1170,38 +1174,54 @@ nla_put_failure:
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int idx, ip_idx;
+ int h, s_h;
+ int idx, s_idx;
+ int ip_idx, s_ip_idx;
struct net_device *dev;
struct in_device *in_dev;
struct in_ifaddr *ifa;
- int s_ip_idx, s_idx = cb->args[0];
+ struct hlist_head *head;
+ struct hlist_node *node;
- s_ip_idx = ip_idx = cb->args[1];
- idx = 0;
- for_each_netdev(net, dev) {
- if (idx < s_idx)
- goto cont;
- if (idx > s_idx)
- s_ip_idx = 0;
- if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
- goto cont;
-
- for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
- ifa = ifa->ifa_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
- continue;
- if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
+ s_h = cb->args[0];
+ s_idx = idx = cb->args[1];
+ s_ip_idx = ip_idx = cb->args[2];
+
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ s_ip_idx = 0;
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev)
+ goto cont;
+
+ for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
+ ifa = ifa->ifa_next, ip_idx++) {
+ if (ip_idx < s_ip_idx)
+ continue;
+ if (inet_fill_ifaddr(skb, ifa,
+ NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
- RTM_NEWADDR, NLM_F_MULTI) <= 0)
- goto done;
- }
+ RTM_NEWADDR, NLM_F_MULTI) <= 0) {
+ rcu_read_unlock();
+ goto done;
+ }
+ }
cont:
- idx++;
+ idx++;
+ }
+ rcu_read_unlock();
}
done:
- cb->args[0] = idx;
- cb->args[1] = ip_idx;
+ cb->args[0] = h;
+ cb->args[1] = idx;
+ cb->args[2] = ip_idx;
return skb->len;
}
@@ -1239,18 +1259,18 @@ static void devinet_copy_dflt_conf(struct net *net, int i)
{
struct net_device *dev;
- read_lock(&dev_base_lock);
- for_each_netdev(net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
struct in_device *in_dev;
- rcu_read_lock();
+
in_dev = __in_dev_get_rcu(dev);
if (in_dev && !test_bit(i, in_dev->cnf.state))
in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
- rcu_read_unlock();
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
+/* called with RTNL locked */
static void inet_forward_change(struct net *net)
{
struct net_device *dev;
@@ -1259,7 +1279,6 @@ static void inet_forward_change(struct net *net)
IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
- read_lock(&dev_base_lock);
for_each_netdev(net, dev) {
struct in_device *in_dev;
if (on)
@@ -1270,7 +1289,6 @@ static void inet_forward_change(struct net *net)
IN_DEV_CONF_SET(in_dev, FORWARDING, on);
rcu_read_unlock();
}
- read_unlock(&dev_base_lock);
}
static int devinet_conf_proc(ctl_table *ctl, int write,
@@ -1450,6 +1468,7 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
"accept_source_route"),
+ DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
@@ -1587,7 +1606,7 @@ static __net_init int devinet_init_net(struct net *net)
all = &ipv4_devconf;
dflt = &ipv4_devconf_dflt;
- if (net != &init_net) {
+ if (!net_eq(net, &init_net)) {
all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
if (all == NULL)
goto err_alloc_all;
@@ -1680,8 +1699,3 @@ void __init devinet_init(void)
rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
}
-EXPORT_SYMBOL(in_dev_finish_destroy);
-EXPORT_SYMBOL(inet_select_addr);
-EXPORT_SYMBOL(inetdev_by_index);
-EXPORT_SYMBOL(register_inetaddr_notifier);
-EXPORT_SYMBOL(unregister_inetaddr_notifier);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 12f7287e902d..1948895beb6d 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -530,7 +530,7 @@ static int esp_init_authenc(struct xfrm_state *x)
}
err = crypto_aead_setauthsize(
- aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
+ aead, x->aalg->alg_trunc_len / 8);
if (err)
goto free_key;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f73dbed0f0d7..3323168ee52d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -229,25 +229,29 @@ unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
*/
int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
- struct net_device *dev, __be32 *spec_dst, u32 *itag)
+ struct net_device *dev, __be32 *spec_dst,
+ u32 *itag, u32 mark)
{
struct in_device *in_dev;
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = src,
.saddr = dst,
.tos = tos } },
+ .mark = mark,
.iif = oif };
+
struct fib_result res;
- int no_addr, rpf;
+ int no_addr, rpf, accept_local;
int ret;
struct net *net;
- no_addr = rpf = 0;
+ no_addr = rpf = accept_local = 0;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
no_addr = in_dev->ifa_list == NULL;
rpf = IN_DEV_RPFILTER(in_dev);
+ accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
}
rcu_read_unlock();
@@ -257,8 +261,10 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
net = dev_net(dev);
if (fib_lookup(net, &fl, &res))
goto last_resort;
- if (res.type != RTN_UNICAST)
- goto e_inval_res;
+ if (res.type != RTN_UNICAST) {
+ if (res.type != RTN_LOCAL || !accept_local)
+ goto e_inval_res;
+ }
*spec_dst = FIB_RES_PREFSRC(res);
fib_combine_itag(itag, &res);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -892,11 +898,11 @@ static void nl_fib_lookup_exit(struct net *net)
net->ipv4.fibnl = NULL;
}
-static void fib_disable_ip(struct net_device *dev, int force)
+static void fib_disable_ip(struct net_device *dev, int force, int delay)
{
if (fib_sync_down_dev(dev, force))
fib_flush(dev_net(dev));
- rt_cache_flush(dev_net(dev), 0);
+ rt_cache_flush(dev_net(dev), delay);
arp_ifdown(dev);
}
@@ -919,7 +925,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
/* Last address was deleted from this interface.
Disable IP.
*/
- fib_disable_ip(dev, 1);
+ fib_disable_ip(dev, 1, 0);
} else {
rt_cache_flush(dev_net(dev), -1);
}
@@ -934,7 +940,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
struct in_device *in_dev = __in_dev_get_rtnl(dev);
if (event == NETDEV_UNREGISTER) {
- fib_disable_ip(dev, 2);
+ fib_disable_ip(dev, 2, -1);
return NOTIFY_DONE;
}
@@ -952,12 +958,15 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
rt_cache_flush(dev_net(dev), -1);
break;
case NETDEV_DOWN:
- fib_disable_ip(dev, 0);
+ fib_disable_ip(dev, 0, 0);
break;
case NETDEV_CHANGEMTU:
case NETDEV_CHANGE:
rt_cache_flush(dev_net(dev), 0);
break;
+ case NETDEV_UNREGISTER_BATCH:
+ rt_cache_flush_batch();
+ break;
}
return NOTIFY_DONE;
}
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 835262c2b867..ca2d07b1c706 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -284,7 +284,7 @@ static int fib_default_rules_init(struct fib_rules_ops *ops)
{
int err;
- err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL, FIB_RULE_PERMANENT);
+ err = fib_default_rule_add(ops, 0, RT_TABLE_LOCAL, 0);
if (err < 0)
return err;
err = fib_default_rule_add(ops, 0x7FFE, RT_TABLE_MAIN, 0);
@@ -301,13 +301,9 @@ int __net_init fib4_rules_init(struct net *net)
int err;
struct fib_rules_ops *ops;
- ops = kmemdup(&fib4_rules_ops_template, sizeof(*ops), GFP_KERNEL);
- if (ops == NULL)
- return -ENOMEM;
- INIT_LIST_HEAD(&ops->rules_list);
- ops->fro_net = net;
-
- fib_rules_register(ops);
+ ops = fib_rules_register(&fib4_rules_ops_template, net);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
err = fib_default_rules_init(ops);
if (err < 0)
@@ -318,12 +314,10 @@ int __net_init fib4_rules_init(struct net *net)
fail:
/* also cleans all rules already added */
fib_rules_unregister(ops);
- kfree(ops);
return err;
}
void __net_exit fib4_rules_exit(struct net *net)
{
fib_rules_unregister(net->ipv4.rules_ops);
- kfree(net->ipv4.rules_ops);
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 9b096d6ff3f2..ed19aa6919c2 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -228,7 +228,7 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
head = &fib_info_hash[hash];
hlist_for_each_entry(fi, node, head, fib_hash) {
- if (fi->fib_net != nfi->fib_net)
+ if (!net_eq(fi->fib_net, nfi->fib_net))
continue;
if (fi->fib_nhs != nfi->fib_nhs)
continue;
@@ -1047,7 +1047,7 @@ int fib_sync_down_addr(struct net *net, __be32 local)
return 0;
hlist_for_each_entry(fi, node, head, fib_lhash) {
- if (fi->fib_net != net)
+ if (!net_eq(fi->fib_net, net))
continue;
if (fi->fib_prefsrc == local) {
fi->fib_flags |= RTNH_F_DEAD;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 84adb5754c96..fe11f60ce41b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -501,15 +501,16 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
if (!(rt->rt_flags & RTCF_LOCAL)) {
struct net_device *dev = NULL;
+ rcu_read_lock();
if (rt->fl.iif &&
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
- dev = dev_get_by_index(net, rt->fl.iif);
+ dev = dev_get_by_index_rcu(net, rt->fl.iif);
- if (dev) {
+ if (dev)
saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
- dev_put(dev);
- } else
+ else
saddr = 0;
+ rcu_read_unlock();
}
tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d41e5de79a82..76c08402c933 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1899,8 +1899,9 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
err = -EADDRNOTAVAIL;
for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
- if (pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr
- && pmc->multi.imr_ifindex == imr.imr_ifindex)
+ if ((pmc->multi.imr_multiaddr.s_addr ==
+ imr.imr_multiaddr.s_addr) &&
+ (pmc->multi.imr_ifindex == imr.imr_ifindex))
break;
}
if (!pmc) { /* must have a prior join */
@@ -2311,9 +2312,10 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
state->in_dev = NULL;
- for_each_netdev(net, state->dev) {
+ for_each_netdev_rcu(net, state->dev) {
struct in_device *in_dev;
- in_dev = in_dev_get(state->dev);
+
+ in_dev = __in_dev_get_rcu(state->dev);
if (!in_dev)
continue;
read_lock(&in_dev->mc_list_lock);
@@ -2323,7 +2325,6 @@ static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
break;
}
read_unlock(&in_dev->mc_list_lock);
- in_dev_put(in_dev);
}
return im;
}
@@ -2333,16 +2334,15 @@ static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_li
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
im = im->next;
while (!im) {
- if (likely(state->in_dev != NULL)) {
+ if (likely(state->in_dev != NULL))
read_unlock(&state->in_dev->mc_list_lock);
- in_dev_put(state->in_dev);
- }
- state->dev = next_net_device(state->dev);
+
+ state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->in_dev = NULL;
break;
}
- state->in_dev = in_dev_get(state->dev);
+ state->in_dev = __in_dev_get_rcu(state->dev);
if (!state->in_dev)
continue;
read_lock(&state->in_dev->mc_list_lock);
@@ -2361,9 +2361,9 @@ static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
}
static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(dev_base_lock)
+ __acquires(rcu)
{
- read_lock(&dev_base_lock);
+ rcu_read_lock();
return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
@@ -2379,16 +2379,15 @@ static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
- __releases(dev_base_lock)
+ __releases(rcu)
{
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
if (likely(state->in_dev != NULL)) {
read_unlock(&state->in_dev->mc_list_lock);
- in_dev_put(state->in_dev);
state->in_dev = NULL;
}
state->dev = NULL;
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static int igmp_mc_seq_show(struct seq_file *seq, void *v)
@@ -2462,9 +2461,9 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
state->idev = NULL;
state->im = NULL;
- for_each_netdev(net, state->dev) {
+ for_each_netdev_rcu(net, state->dev) {
struct in_device *idev;
- idev = in_dev_get(state->dev);
+ idev = __in_dev_get_rcu(state->dev);
if (unlikely(idev == NULL))
continue;
read_lock(&idev->mc_list_lock);
@@ -2480,7 +2479,6 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
spin_unlock_bh(&im->lock);
}
read_unlock(&idev->mc_list_lock);
- in_dev_put(idev);
}
return psf;
}
@@ -2494,16 +2492,15 @@ static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_l
spin_unlock_bh(&state->im->lock);
state->im = state->im->next;
while (!state->im) {
- if (likely(state->idev != NULL)) {
+ if (likely(state->idev != NULL))
read_unlock(&state->idev->mc_list_lock);
- in_dev_put(state->idev);
- }
- state->dev = next_net_device(state->dev);
+
+ state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
goto out;
}
- state->idev = in_dev_get(state->dev);
+ state->idev = __in_dev_get_rcu(state->dev);
if (!state->idev)
continue;
read_lock(&state->idev->mc_list_lock);
@@ -2528,8 +2525,9 @@ static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
}
static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
{
- read_lock(&dev_base_lock);
+ rcu_read_lock();
return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
@@ -2545,6 +2543,7 @@ static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
{
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
if (likely(state->im != NULL)) {
@@ -2553,11 +2552,10 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
}
if (likely(state->idev != NULL)) {
read_unlock(&state->idev->mc_list_lock);
- in_dev_put(state->idev);
state->idev = NULL;
}
state->dev = NULL;
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 26fb50e91311..ee16475f8fc3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -112,7 +112,7 @@ again:
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
- if (ib_net(tb) == net && tb->port == rover) {
+ if (net_eq(ib_net(tb), net) && tb->port == rover) {
if (tb->fastreuse > 0 &&
sk->sk_reuse &&
sk->sk_state != TCP_LISTEN &&
@@ -158,7 +158,7 @@ have_snum:
hashinfo->bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
- if (ib_net(tb) == net && tb->port == snum)
+ if (net_eq(ib_net(tb), net) && tb->port == snum)
goto tb_found;
}
tb = NULL;
@@ -531,7 +531,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
&expire, &resend);
if (!expire &&
(!resend ||
- !req->rsk_ops->rtx_syn_ack(parent, req) ||
+ !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
inet_rsk(req)->acked)) {
unsigned long timeo;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 47ad7aab51e3..94ef51aa5bc9 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -454,7 +454,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
* unique enough.
*/
inet_bind_bucket_for_each(tb, node, &head->chain) {
- if (ib_net(tb) == net && tb->port == port) {
+ if (net_eq(ib_net(tb), net) &&
+ tb->port == port) {
if (tb->fastreuse >= 0)
goto next_port;
WARN_ON(hlist_empty(&tb->owners));
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index 6a667dae315e..47038cb6c138 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -64,15 +64,15 @@ static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph,
if (iph->ihl != IPH_LEN_WO_OPTIONS)
return -1;
- if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack
- || tcph->rst || tcph->syn || tcph->fin)
+ if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack ||
+ tcph->rst || tcph->syn || tcph->fin)
return -1;
if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
return -1;
- if (tcph->doff != TCPH_LEN_WO_OPTIONS
- && tcph->doff != TCPH_LEN_W_TIMESTAMP)
+ if (tcph->doff != TCPH_LEN_WO_OPTIONS &&
+ tcph->doff != TCPH_LEN_W_TIMESTAMP)
return -1;
/* check tcp options (only timestamp allowed) */
@@ -262,10 +262,10 @@ static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
struct iphdr *iph,
struct tcphdr *tcph)
{
- if ((lro_desc->iph->saddr != iph->saddr)
- || (lro_desc->iph->daddr != iph->daddr)
- || (lro_desc->tcph->source != tcph->source)
- || (lro_desc->tcph->dest != tcph->dest))
+ if ((lro_desc->iph->saddr != iph->saddr) ||
+ (lro_desc->iph->daddr != iph->daddr) ||
+ (lro_desc->tcph->source != tcph->source) ||
+ (lro_desc->tcph->dest != tcph->dest))
return -1;
return 0;
}
@@ -339,9 +339,9 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
u64 flags;
int vlan_hdr_len = 0;
- if (!lro_mgr->get_skb_header
- || lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph,
- &flags, priv))
+ if (!lro_mgr->get_skb_header ||
+ lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph,
+ &flags, priv))
goto out;
if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
@@ -351,8 +351,8 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
if (!lro_desc)
goto out;
- if ((skb->protocol == htons(ETH_P_8021Q))
- && !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
+ if ((skb->protocol == htons(ETH_P_8021Q)) &&
+ !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
vlan_hdr_len = VLAN_HLEN;
if (!lro_desc->active) { /* start new lro session */
@@ -446,9 +446,9 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
int hdr_len = LRO_MAX_PG_HLEN;
int vlan_hdr_len = 0;
- if (!lro_mgr->get_frag_header
- || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
- (void *)&tcph, &flags, priv)) {
+ if (!lro_mgr->get_frag_header ||
+ lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
+ (void *)&tcph, &flags, priv)) {
mac_hdr = page_address(frags->page) + frags->page_offset;
goto out1;
}
@@ -472,8 +472,8 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
if (!skb)
goto out;
- if ((skb->protocol == htons(ETH_P_8021Q))
- && !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
+ if ((skb->protocol == htons(ETH_P_8021Q)) &&
+ !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
vlan_hdr_len = VLAN_HLEN;
iph = (void *)(skb->data + vlan_hdr_len);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 1f5d508bb18b..31f931ef3daf 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -421,37 +421,46 @@ out:
EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
-void inet_twsk_purge(struct net *net, struct inet_hashinfo *hashinfo,
+void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family)
{
struct inet_timewait_sock *tw;
struct sock *sk;
struct hlist_nulls_node *node;
- int h;
+ unsigned int slot;
- local_bh_disable();
- for (h = 0; h <= hashinfo->ehash_mask; h++) {
- struct inet_ehash_bucket *head =
- inet_ehash_bucket(hashinfo, h);
- spinlock_t *lock = inet_ehash_lockp(hashinfo, h);
+ for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
+ struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+restart_rcu:
+ rcu_read_lock();
restart:
- spin_lock(lock);
- sk_nulls_for_each(sk, node, &head->twchain) {
-
+ sk_nulls_for_each_rcu(sk, node, &head->twchain) {
tw = inet_twsk(sk);
- if (!net_eq(twsk_net(tw), net) ||
- tw->tw_family != family)
+ if ((tw->tw_family != family) ||
+ atomic_read(&twsk_net(tw)->count))
+ continue;
+
+ if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
continue;
- atomic_inc(&tw->tw_refcnt);
- spin_unlock(lock);
+ if (unlikely((tw->tw_family != family) ||
+ atomic_read(&twsk_net(tw)->count))) {
+ inet_twsk_put(tw);
+ goto restart;
+ }
+
+ rcu_read_unlock();
inet_twsk_deschedule(tw, twdr);
inet_twsk_put(tw);
-
- goto restart;
+ goto restart_rcu;
}
- spin_unlock(lock);
+ /* If the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != slot)
+ goto restart;
+ rcu_read_unlock();
}
- local_bh_enable();
}
EXPORT_SYMBOL_GPL(inet_twsk_purge);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index b1fbe18feb5a..6bcfe52a9c87 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -67,9 +67,6 @@
* ip_id_count: idlock
*/
-/* Exported for inet_getid inline function. */
-DEFINE_SPINLOCK(inet_peer_idlock);
-
static struct kmem_cache *peer_cachep __read_mostly;
#define node_height(x) x->avl_height
@@ -390,7 +387,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
n->v4daddr = daddr;
atomic_set(&n->refcnt, 1);
atomic_set(&n->rid, 0);
- n->ip_id_count = secure_ip_id(daddr);
+ atomic_set(&n->ip_id_count, secure_ip_id(daddr));
n->tcp_ts_stamp = 0;
write_lock_bh(&peer_pool_lock);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 575f9bd51ccd..c4735310a923 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -206,10 +206,11 @@ static void ip_expire(unsigned long arg)
struct sk_buff *head = qp->q.fragments;
/* Send an ICMP "Fragment Reassembly Timeout" message. */
- if ((head->dev = dev_get_by_index(net, qp->iif)) != NULL) {
+ rcu_read_lock();
+ head->dev = dev_get_by_index_rcu(net, qp->iif);
+ if (head->dev)
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
- dev_put(head->dev);
- }
+ rcu_read_unlock();
}
out:
spin_unlock(&qp->q.lock);
@@ -563,7 +564,7 @@ out_oversize:
printk(KERN_INFO "Oversized IP packet from %pI4.\n",
&qp->saddr);
out_fail:
- IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
return err;
}
@@ -657,7 +658,7 @@ static int ip4_frags_ns_ctl_register(struct net *net)
struct ctl_table_header *hdr;
table = ip4_frags_ns_ctl_table;
- if (net != &init_net) {
+ if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
if (table == NULL)
goto err_alloc;
@@ -675,7 +676,7 @@ static int ip4_frags_ns_ctl_register(struct net *net)
return 0;
err_reg:
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index a77807d449e3..f36ce156cac6 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -125,7 +125,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev);
#define HASH_SIZE 16
-static int ipgre_net_id;
+static int ipgre_net_id __read_mostly;
struct ipgre_net {
struct ip_tunnel *tunnels[4][HASH_SIZE];
@@ -1309,17 +1309,8 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
static int ipgre_init_net(struct net *net)
{
+ struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int err;
- struct ipgre_net *ign;
-
- err = -ENOMEM;
- ign = kzalloc(sizeof(struct ipgre_net), GFP_KERNEL);
- if (ign == NULL)
- goto err_alloc;
-
- err = net_assign_generic(net, ipgre_net_id, ign);
- if (err < 0)
- goto err_assign;
ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
ipgre_tunnel_setup);
@@ -1340,10 +1331,6 @@ static int ipgre_init_net(struct net *net)
err_reg_dev:
free_netdev(ign->fb_tunnel_dev);
err_alloc_dev:
- /* nothing */
-err_assign:
- kfree(ign);
-err_alloc:
return err;
}
@@ -1357,12 +1344,13 @@ static void ipgre_exit_net(struct net *net)
ipgre_destroy_tunnels(ign, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
- kfree(ign);
}
static struct pernet_operations ipgre_net_ops = {
.init = ipgre_init_net,
.exit = ipgre_exit_net,
+ .id = &ipgre_net_id,
+ .size = sizeof(struct ipgre_net),
};
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1476,14 +1464,14 @@ static void ipgre_tap_setup(struct net_device *dev)
ether_setup(dev);
- dev->netdev_ops = &ipgre_netdev_ops;
+ dev->netdev_ops = &ipgre_tap_netdev_ops;
dev->destructor = free_netdev;
dev->iflink = 0;
dev->features |= NETIF_F_NETNS_LOCAL;
}
-static int ipgre_newlink(struct net_device *dev, struct nlattr *tb[],
+static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip_tunnel *nt;
@@ -1537,25 +1525,29 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
if (t->dev != dev)
return -EEXIST;
} else {
- unsigned nflags = 0;
-
t = nt;
- if (ipv4_is_multicast(p.iph.daddr))
- nflags = IFF_BROADCAST;
- else if (p.iph.daddr)
- nflags = IFF_POINTOPOINT;
+ if (dev->type != ARPHRD_ETHER) {
+ unsigned nflags = 0;
- if ((dev->flags ^ nflags) &
- (IFF_POINTOPOINT | IFF_BROADCAST))
- return -EINVAL;
+ if (ipv4_is_multicast(p.iph.daddr))
+ nflags = IFF_BROADCAST;
+ else if (p.iph.daddr)
+ nflags = IFF_POINTOPOINT;
+
+ if ((dev->flags ^ nflags) &
+ (IFF_POINTOPOINT | IFF_BROADCAST))
+ return -EINVAL;
+ }
ipgre_tunnel_unlink(ign, t);
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
t->parms.i_key = p.i_key;
- memcpy(dev->dev_addr, &p.iph.saddr, 4);
- memcpy(dev->broadcast, &p.iph.daddr, 4);
+ if (dev->type != ARPHRD_ETHER) {
+ memcpy(dev->dev_addr, &p.iph.saddr, 4);
+ memcpy(dev->broadcast, &p.iph.daddr, 4);
+ }
ipgre_tunnel_link(ign, t);
netdev_state_change(dev);
}
@@ -1678,7 +1670,7 @@ static int __init ipgre_init(void)
return -EAGAIN;
}
- err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
+ err = register_pernet_device(&ipgre_net_ops);
if (err < 0)
goto gen_device_failed;
@@ -1696,7 +1688,7 @@ out:
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
- unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
+ unregister_pernet_device(&ipgre_net_ops);
gen_device_failed:
inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
goto out;
@@ -1706,7 +1698,7 @@ static void __exit ipgre_fini(void)
{
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
- unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
+ unregister_pernet_device(&ipgre_net_ops);
if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
printk(KERN_INFO "ipgre close: can't remove protocol\n");
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index fdf51badc8e5..c29de9879fda 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -164,7 +164,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
if (sk && inet_sk(sk)->inet_num == protocol &&
(!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == dev->ifindex) &&
- sock_net(sk) == dev_net(dev)) {
+ net_eq(sock_net(sk), dev_net(dev))) {
if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) {
read_unlock(&ip_ra_lock);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 322b40864ac0..e34013a78ef4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -264,9 +264,11 @@ int ip_mc_output(struct sk_buff *skb)
This check is duplicated in ip_mr_input at the moment.
*/
- && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
+ &&
+ ((rt->rt_flags & RTCF_LOCAL) ||
+ !(IPCB(skb)->flags & IPSKB_FORWARDED))
#endif
- ) {
+ ) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb,
@@ -501,8 +503,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
- truesizes += frag->truesize;
}
+ truesizes += frag->truesize;
}
/* Everything is OK. Generate! */
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index f8d04c256454..4e08b7f2331c 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1172,10 +1172,9 @@ static int __init ic_dynamic(void)
schedule_timeout_uninterruptible(1);
#ifdef IPCONFIG_DHCP
/* DHCP isn't done until we get a DHCPACK. */
- if ((ic_got_reply & IC_BOOTP)
- && (ic_proto_enabled & IC_USE_DHCP)
- && ic_dhcp_msgtype != DHCPACK)
- {
+ if ((ic_got_reply & IC_BOOTP) &&
+ (ic_proto_enabled & IC_USE_DHCP) &&
+ ic_dhcp_msgtype != DHCPACK) {
ic_got_reply = 0;
printk(",");
continue;
@@ -1344,9 +1343,9 @@ static int __init ip_auto_config(void)
*/
if (ic_myaddr == NONE ||
#ifdef CONFIG_ROOT_NFS
- (root_server_addr == NONE
- && ic_servaddr == NONE
- && ROOT_DEV == Root_NFS) ||
+ (root_server_addr == NONE &&
+ ic_servaddr == NONE &&
+ ROOT_DEV == Root_NFS) ||
#endif
ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index a2ca53da4372..eda04fed3379 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -119,7 +119,7 @@
#define HASH_SIZE 16
#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
-static int ipip_net_id;
+static int ipip_net_id __read_mostly;
struct ipip_net {
struct ip_tunnel *tunnels_r_l[HASH_SIZE];
struct ip_tunnel *tunnels_r[HASH_SIZE];
@@ -446,25 +446,27 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_error;
}
- if (tiph->frag_off)
+ df |= old_iph->frag_off & htons(IP_DF);
+
+ if (df) {
mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
- else
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
- if (mtu < 68) {
- stats->collisions++;
- ip_rt_put(rt);
- goto tx_error;
- }
- if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ if (mtu < 68) {
+ stats->collisions++;
+ ip_rt_put(rt);
+ goto tx_error;
+ }
- df |= (old_iph->frag_off&htons(IP_DF));
+ if (skb_dst(skb))
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
- if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
- ip_rt_put(rt);
- goto tx_error;
+ if ((old_iph->frag_off & htons(IP_DF)) &&
+ mtu < ntohs(old_iph->tot_len)) {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+ ip_rt_put(rt);
+ goto tx_error;
+ }
}
if (tunnel->err_count > 0) {
@@ -773,17 +775,8 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
static int ipip_init_net(struct net *net)
{
+ struct ipip_net *ipn = net_generic(net, ipip_net_id);
int err;
- struct ipip_net *ipn;
-
- err = -ENOMEM;
- ipn = kzalloc(sizeof(struct ipip_net), GFP_KERNEL);
- if (ipn == NULL)
- goto err_alloc;
-
- err = net_assign_generic(net, ipip_net_id, ipn);
- if (err < 0)
- goto err_assign;
ipn->tunnels[0] = ipn->tunnels_wc;
ipn->tunnels[1] = ipn->tunnels_l;
@@ -810,29 +803,26 @@ err_reg_dev:
free_netdev(ipn->fb_tunnel_dev);
err_alloc_dev:
/* nothing */
-err_assign:
- kfree(ipn);
-err_alloc:
return err;
}
static void ipip_exit_net(struct net *net)
{
- struct ipip_net *ipn;
+ struct ipip_net *ipn = net_generic(net, ipip_net_id);
LIST_HEAD(list);
- ipn = net_generic(net, ipip_net_id);
rtnl_lock();
ipip_destroy_tunnels(ipn, &list);
unregister_netdevice_queue(ipn->fb_tunnel_dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
- kfree(ipn);
}
static struct pernet_operations ipip_net_ops = {
.init = ipip_init_net,
.exit = ipip_exit_net,
+ .id = &ipip_net_id,
+ .size = sizeof(struct ipip_net),
};
static int __init ipip_init(void)
@@ -846,7 +836,7 @@ static int __init ipip_init(void)
return -EAGAIN;
}
- err = register_pernet_gen_device(&ipip_net_id, &ipip_net_ops);
+ err = register_pernet_device(&ipip_net_ops);
if (err)
xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
@@ -858,7 +848,7 @@ static void __exit ipip_fini(void)
if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET))
printk(KERN_INFO "ipip close: can't deregister tunnel\n");
- unregister_pernet_gen_device(ipip_net_id, &ipip_net_ops);
+ unregister_pernet_device(&ipip_net_ops);
}
module_init(ipip_init);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ef4ee45b928f..54596f73eff5 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -494,8 +494,10 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
return -EINVAL;
}
- if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
+ if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
+ dev_put(dev);
return -EADDRNOTAVAIL;
+ }
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
ip_rt_multicast_event(in_dev);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 1725dc0ef688..f53cb8df4182 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -155,10 +155,10 @@ static int nf_ip_reroute(struct sk_buff *skb,
if (entry->hook == NF_INET_LOCAL_OUT) {
const struct iphdr *iph = ip_hdr(skb);
- if (!(iph->tos == rt_info->tos
- && skb->mark == rt_info->mark
- && iph->daddr == rt_info->daddr
- && iph->saddr == rt_info->saddr))
+ if (!(iph->tos == rt_info->tos &&
+ skb->mark == rt_info->mark &&
+ iph->daddr == rt_info->daddr &&
+ iph->saddr == rt_info->saddr))
return ip_route_me_harder(skb, RTN_UNSPEC);
}
return 0;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 9f0787091951..49ad44712f46 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -499,7 +499,7 @@ ipq_rcv_nl_event(struct notifier_block *this,
if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
write_lock_bh(&queue_lock);
- if ((n->net == &init_net) && (n->pid == peer_pid))
+ if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
__ipq_reset();
write_unlock_bh(&queue_lock);
}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 68afc6ecd343..fe1a64479dd0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -750,6 +750,8 @@ static int __init nf_nat_init(void)
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup);
+ BUG_ON(nf_ct_nat_offset != NULL);
+ rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
return 0;
cleanup_extend:
@@ -764,6 +766,7 @@ static void __exit nf_nat_cleanup(void)
nf_ct_extend_unregister(&nat_extend);
rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
+ rcu_assign_pointer(nf_ct_nat_offset, NULL);
synchronize_net();
}
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 5bf6a92cc551..7f10a6be0191 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -69,6 +69,28 @@ adjust_tcp_sequence(u32 seq,
DUMP_OFFSET(this_way);
}
+/* Get the offset value, for conntrack */
+s16 nf_nat_get_offset(const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ u32 seq)
+{
+ struct nf_conn_nat *nat = nfct_nat(ct);
+ struct nf_nat_seq *this_way;
+ s16 offset;
+
+ if (!nat)
+ return 0;
+
+ this_way = &nat->seq[dir];
+ spin_lock_bh(&nf_nat_seqofs_lock);
+ offset = after(seq, this_way->correction_pos)
+ ? this_way->offset_after : this_way->offset_before;
+ spin_unlock_bh(&nf_nat_seqofs_lock);
+
+ return offset;
+}
+EXPORT_SYMBOL_GPL(nf_nat_get_offset);
+
/* Frobs data inside this packet, which is linear. */
static void mangle_contents(struct sk_buff *skb,
unsigned int dataoff,
@@ -185,11 +207,6 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
adjust_tcp_sequence(ntohl(tcph->seq),
(int)rep_len - (int)match_len,
ct, ctinfo);
- /* Tell TCP window tracking about seq change */
- nf_conntrack_tcp_update(skb, ip_hdrlen(skb),
- ct, CTINFO2DIR(ctinfo),
- (int)rep_len - (int)match_len);
-
nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
}
return 1;
@@ -411,12 +428,7 @@ nf_nat_seq_adjust(struct sk_buff *skb,
tcph->seq = newseq;
tcph->ack_seq = newack;
- if (!nf_nat_sack_adjust(skb, tcph, ct, ctinfo))
- return 0;
-
- nf_conntrack_tcp_update(skb, ip_hdrlen(skb), ct, dir, seqoff);
-
- return 1;
+ return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
}
/* Setup NAT on this expected conntrack so it follows master. */
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 9ef8c0829a77..ce154b47f1da 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -351,13 +351,24 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
- err = memcpy_fromiovecend((void *)iph, from, 0, length);
- if (err)
- goto error_fault;
+ err = -EFAULT;
+ if (memcpy_fromiovecend((void *)iph, from, 0, length))
+ goto error_free;
- /* We don't modify invalid header */
iphlen = iph->ihl * 4;
- if (iphlen >= sizeof(*iph) && iphlen <= length) {
+
+ /*
+ * We don't want to modify the ip header, but we do need to
+ * be sure that it won't cause problems later along the network
+ * stack. Specifically we want to make sure that iph->ihl is a
+ * sane value. If ihl points beyond the length of the buffer passed
+ * in, reject the frame as invalid
+ */
+ err = -EINVAL;
+ if (iphlen > length)
+ goto error_free;
+
+ if (iphlen >= sizeof(*iph)) {
if (!iph->saddr)
iph->saddr = rt->rt_src;
iph->check = 0;
@@ -380,8 +391,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
out:
return 0;
-error_fault:
- err = -EFAULT;
+error_free:
kfree_skb(skb);
error:
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 68fb22702051..90cdcfc32937 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -513,43 +513,42 @@ static const struct file_operations rt_cpu_seq_fops = {
};
#ifdef CONFIG_NET_CLS_ROUTE
-static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
- int length, int *eof, void *data)
-{
- unsigned int i;
-
- if ((offset & 3) || (length & 3))
- return -EIO;
-
- if (offset >= sizeof(struct ip_rt_acct) * 256) {
- *eof = 1;
- return 0;
- }
-
- if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
- length = sizeof(struct ip_rt_acct) * 256 - offset;
- *eof = 1;
+static int rt_acct_proc_show(struct seq_file *m, void *v)
+{
+ struct ip_rt_acct *dst, *src;
+ unsigned int i, j;
+
+ dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
+ for (j = 0; j < 256; j++) {
+ dst[j].o_bytes += src[j].o_bytes;
+ dst[j].o_packets += src[j].o_packets;
+ dst[j].i_bytes += src[j].i_bytes;
+ dst[j].i_packets += src[j].i_packets;
+ }
}
- offset /= sizeof(u32);
-
- if (length > 0) {
- u32 *dst = (u32 *) buffer;
-
- *start = buffer;
- memset(dst, 0, length);
-
- for_each_possible_cpu(i) {
- unsigned int j;
- u32 *src;
+ seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
+ kfree(dst);
+ return 0;
+}
- src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
- for (j = 0; j < length/4; j++)
- dst[j] += src[j];
- }
- }
- return length;
+static int rt_acct_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rt_acct_proc_show, NULL);
}
+
+static const struct file_operations rt_acct_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = rt_acct_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
static int __net_init ip_rt_do_proc_init(struct net *net)
@@ -567,8 +566,7 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
goto err2;
#ifdef CONFIG_NET_CLS_ROUTE
- pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
- ip_rt_acct_read, NULL);
+ pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
if (!pde)
goto err3;
#endif
@@ -703,7 +701,7 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
{
- return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
+ return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev));
}
static inline int rt_is_expired(struct rtable *rth)
@@ -902,6 +900,12 @@ void rt_cache_flush(struct net *net, int delay)
rt_do_flush(!in_softirq());
}
+/* Flush previous cache invalidated entries from the cache */
+void rt_cache_flush_batch(void)
+{
+ rt_do_flush(!in_softirq());
+}
+
/*
* We change rt_genid and let gc do the cleanup
*/
@@ -1346,9 +1350,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
return;
net = dev_net(dev);
- if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
- || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
- || ipv4_is_zeronet(new_gw))
+ if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
+ ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
+ ipv4_is_zeronet(new_gw))
goto reject_redirect;
if (!rt_caching(net))
@@ -1851,7 +1855,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto e_inval;
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
} else if (fib_validate_source(saddr, 0, tos, 0,
- dev, &spec_dst, &itag) < 0)
+ dev, &spec_dst, &itag, 0) < 0)
goto e_inval;
rth = dst_alloc(&ipv4_dst_ops);
@@ -1964,7 +1968,7 @@ static int __mkroute_input(struct sk_buff *skb,
err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
- in_dev->dev, &spec_dst, &itag);
+ in_dev->dev, &spec_dst, &itag, skb->mark);
if (err < 0) {
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
@@ -2138,7 +2142,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
int result;
result = fib_validate_source(saddr, daddr, tos,
net->loopback_dev->ifindex,
- dev, &spec_dst, &itag);
+ dev, &spec_dst, &itag, skb->mark);
if (result < 0)
goto martian_source;
if (result)
@@ -2167,7 +2171,7 @@ brd_input:
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
else {
err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
- &itag);
+ &itag, skb->mark);
if (err < 0)
goto martian_source;
if (err)
@@ -2311,10 +2315,11 @@ skip_cache:
ip_hdr(skb)->protocol);
if (our
#ifdef CONFIG_IP_MROUTE
- || (!ipv4_is_local_multicast(daddr) &&
- IN_DEV_MFORWARD(in_dev))
+ ||
+ (!ipv4_is_local_multicast(daddr) &&
+ IN_DEV_MFORWARD(in_dev))
#endif
- ) {
+ ) {
rcu_read_unlock();
return ip_route_input_mc(skb, daddr, saddr,
tos, dev, our);
@@ -2511,9 +2516,9 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
of another iface. --ANK
*/
- if (oldflp->oif == 0
- && (ipv4_is_multicast(oldflp->fl4_dst) ||
- oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
+ if (oldflp->oif == 0 &&
+ (ipv4_is_multicast(oldflp->fl4_dst) ||
+ oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
dev_out = ip_dev_find(net, oldflp->fl4_src);
if (dev_out == NULL)
@@ -2852,7 +2857,7 @@ static int rt_fill_info(struct net *net,
error = rt->u.dst.error;
expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
if (rt->peer) {
- id = rt->peer->ip_id_count;
+ id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
if (rt->peer->tcp_ts_stamp) {
ts = rt->peer->tcp_ts;
tsage = get_seconds() - rt->peer->tcp_ts_stamp;
@@ -3309,7 +3314,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
struct ctl_table *tbl;
tbl = ipv4_route_flush_table;
- if (net != &init_net) {
+ if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
if (tbl == NULL)
goto err_dup;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 3146cc401748..26399ad2a289 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -253,6 +253,8 @@ EXPORT_SYMBOL(cookie_check_timestamp);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
+ struct tcp_options_received tcp_opt;
+ u8 *hash_location;
struct inet_request_sock *ireq;
struct tcp_request_sock *treq;
struct tcp_sock *tp = tcp_sk(sk);
@@ -263,7 +265,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
int mss;
struct rtable *rt;
__u8 rcv_wscale;
- struct tcp_options_received tcp_opt;
if (!sysctl_tcp_syncookies || !th->ack)
goto out;
@@ -341,7 +342,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, 0, &rt->u.dst);
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0, &rt->u.dst);
if (tcp_opt.saw_tstamp)
cookie_check_timestamp(&tcp_opt);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 2dcf04d9b005..13f7ab6ad6a0 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -714,6 +714,14 @@ static struct ctl_table ipv4_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
+ .procname = "tcp_cookie_size",
+ .data = &sysctl_tcp_cookie_size,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
@@ -818,7 +826,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
struct ctl_table *table;
table = ipv4_net_table;
- if (net != &init_net) {
+ if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
if (table == NULL)
goto err_alloc;
@@ -849,7 +857,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
return 0;
err_reg:
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e0cfa633680a..c8666b70cde0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -264,6 +264,7 @@
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/crypto.h>
+#include <linux/time.h>
#include <net/icmp.h>
#include <net/tcp.h>
@@ -1183,7 +1184,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
#if TCP_DEBUG
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
- WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
+ WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+ KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+ tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
#endif
if (inet_csk_ack_scheduled(sk)) {
@@ -1430,11 +1433,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* Now that we have two receive queues this
* shouldn't happen.
*/
- if (before(*seq, TCP_SKB_CB(skb)->seq)) {
- printk(KERN_INFO "recvmsg bug: copied %X "
- "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
+ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
+ KERN_INFO "recvmsg bug: copied %X "
+ "seq %X rcvnxt %X fl %X\n", *seq,
+ TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+ flags))
break;
- }
+
offset = *seq - TCP_SKB_CB(skb)->seq;
if (tcp_hdr(skb)->syn)
offset--;
@@ -1443,8 +1448,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (tcp_hdr(skb)->fin)
goto found_fin_ok;
WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
- "copied %X seq %X\n", *seq,
- TCP_SKB_CB(skb)->seq);
+ "copied %X seq %X rcvnxt %X fl %X\n",
+ *seq, TCP_SKB_CB(skb)->seq,
+ tp->rcv_nxt, flags);
}
/* Well, if we have backlog, try to process it now yet. */
@@ -2054,6 +2060,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_cnt = 0;
tp->bytes_acked = 0;
+ tp->window_clamp = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
@@ -2078,8 +2085,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
int val;
int err = 0;
- /* This is a string value all the others are int's */
- if (optname == TCP_CONGESTION) {
+ /* These are data/string values, all the others are ints */
+ switch (optname) {
+ case TCP_CONGESTION: {
char name[TCP_CA_NAME_MAX];
if (optlen < 1)
@@ -2096,6 +2104,93 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
release_sock(sk);
return err;
}
+ case TCP_COOKIE_TRANSACTIONS: {
+ struct tcp_cookie_transactions ctd;
+ struct tcp_cookie_values *cvp = NULL;
+
+ if (sizeof(ctd) > optlen)
+ return -EINVAL;
+ if (copy_from_user(&ctd, optval, sizeof(ctd)))
+ return -EFAULT;
+
+ if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
+ ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
+ return -EINVAL;
+
+ if (ctd.tcpct_cookie_desired == 0) {
+ /* default to global value */
+ } else if ((0x1 & ctd.tcpct_cookie_desired) ||
+ ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
+ ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
+ return -EINVAL;
+ }
+
+ if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
+ /* Supercedes all other values */
+ lock_sock(sk);
+ if (tp->cookie_values != NULL) {
+ kref_put(&tp->cookie_values->kref,
+ tcp_cookie_values_release);
+ tp->cookie_values = NULL;
+ }
+ tp->rx_opt.cookie_in_always = 0; /* false */
+ tp->rx_opt.cookie_out_never = 1; /* true */
+ release_sock(sk);
+ return err;
+ }
+
+ /* Allocate ancillary memory before locking.
+ */
+ if (ctd.tcpct_used > 0 ||
+ (tp->cookie_values == NULL &&
+ (sysctl_tcp_cookie_size > 0 ||
+ ctd.tcpct_cookie_desired > 0 ||
+ ctd.tcpct_s_data_desired > 0))) {
+ cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
+ GFP_KERNEL);
+ if (cvp == NULL)
+ return -ENOMEM;
+ }
+ lock_sock(sk);
+ tp->rx_opt.cookie_in_always =
+ (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
+ tp->rx_opt.cookie_out_never = 0; /* false */
+
+ if (tp->cookie_values != NULL) {
+ if (cvp != NULL) {
+ /* Changed values are recorded by a changed
+ * pointer, ensuring the cookie will differ,
+ * without separately hashing each value later.
+ */
+ kref_put(&tp->cookie_values->kref,
+ tcp_cookie_values_release);
+ kref_init(&cvp->kref);
+ tp->cookie_values = cvp;
+ } else {
+ cvp = tp->cookie_values;
+ }
+ }
+ if (cvp != NULL) {
+ cvp->cookie_desired = ctd.tcpct_cookie_desired;
+
+ if (ctd.tcpct_used > 0) {
+ memcpy(cvp->s_data_payload, ctd.tcpct_value,
+ ctd.tcpct_used);
+ cvp->s_data_desired = ctd.tcpct_used;
+ cvp->s_data_constant = 1; /* true */
+ } else {
+ /* No constant payload data. */
+ cvp->s_data_desired = ctd.tcpct_s_data_desired;
+ cvp->s_data_constant = 0; /* false */
+ }
+ }
+ release_sock(sk);
+ return err;
+ }
+ default:
+ /* fallthru */
+ break;
+ };
if (optlen < sizeof(int))
return -EINVAL;
@@ -2420,6 +2515,47 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
return -EFAULT;
return 0;
+
+ case TCP_COOKIE_TRANSACTIONS: {
+ struct tcp_cookie_transactions ctd;
+ struct tcp_cookie_values *cvp = tp->cookie_values;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+ if (len < sizeof(ctd))
+ return -EINVAL;
+
+ memset(&ctd, 0, sizeof(ctd));
+ ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
+ TCP_COOKIE_IN_ALWAYS : 0)
+ | (tp->rx_opt.cookie_out_never ?
+ TCP_COOKIE_OUT_NEVER : 0);
+
+ if (cvp != NULL) {
+ ctd.tcpct_flags |= (cvp->s_data_in ?
+ TCP_S_DATA_IN : 0)
+ | (cvp->s_data_out ?
+ TCP_S_DATA_OUT : 0);
+
+ ctd.tcpct_cookie_desired = cvp->cookie_desired;
+ ctd.tcpct_s_data_desired = cvp->s_data_desired;
+
+ /* Cookie(s) saved, return as nonce */
+ if (sizeof(ctd.tcpct_value) < cvp->cookie_pair_size) {
+ /* impossible? */
+ return -EINVAL;
+ }
+ memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
+ cvp->cookie_pair_size);
+ ctd.tcpct_used = cvp->cookie_pair_size;
+ }
+
+ if (put_user(sizeof(ctd), optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &ctd, sizeof(ctd)))
+ return -EFAULT;
+ return 0;
+ }
default:
return -ENOPROTOOPT;
}
@@ -2842,6 +2978,135 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
#endif
+/**
+ * Each Responder maintains up to two secret values concurrently for
+ * efficient secret rollover. Each secret value has 4 states:
+ *
+ * Generating. (tcp_secret_generating != tcp_secret_primary)
+ * Generates new Responder-Cookies, but not yet used for primary
+ * verification. This is a short-term state, typically lasting only
+ * one round trip time (RTT).
+ *
+ * Primary. (tcp_secret_generating == tcp_secret_primary)
+ * Used both for generation and primary verification.
+ *
+ * Retiring. (tcp_secret_retiring != tcp_secret_secondary)
+ * Used for verification, until the first failure that can be
+ * verified by the newer Generating secret. At that time, this
+ * cookie's state is changed to Secondary, and the Generating
+ * cookie's state is changed to Primary. This is a short-term state,
+ * typically lasting only one round trip time (RTT).
+ *
+ * Secondary. (tcp_secret_retiring == tcp_secret_secondary)
+ * Used for secondary verification, after primary verification
+ * failures. This state lasts no more than twice the Maximum Segment
+ * Lifetime (2MSL). Then, the secret is discarded.
+ */
+struct tcp_cookie_secret {
+ /* The secret is divided into two parts. The digest part is the
+ * equivalent of previously hashing a secret and saving the state,
+ * and serves as an initialization vector (IV). The message part
+ * serves as the trailing secret.
+ */
+ u32 secrets[COOKIE_WORKSPACE_WORDS];
+ unsigned long expires;
+};
+
+#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
+#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
+#define TCP_SECRET_LIFE (HZ * 600)
+
+static struct tcp_cookie_secret tcp_secret_one;
+static struct tcp_cookie_secret tcp_secret_two;
+
+/* Essentially a circular list, without dynamic allocation. */
+static struct tcp_cookie_secret *tcp_secret_generating;
+static struct tcp_cookie_secret *tcp_secret_primary;
+static struct tcp_cookie_secret *tcp_secret_retiring;
+static struct tcp_cookie_secret *tcp_secret_secondary;
+
+static DEFINE_SPINLOCK(tcp_secret_locker);
+
+/* Select a pseudo-random word in the cookie workspace.
+ */
+static inline u32 tcp_cookie_work(const u32 *ws, const int n)
+{
+ return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
+}
+
+/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
+ * Called in softirq context.
+ * Returns: 0 for success.
+ */
+int tcp_cookie_generator(u32 *bakery)
+{
+ unsigned long jiffy = jiffies;
+
+ if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
+ spin_lock_bh(&tcp_secret_locker);
+ if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
+ /* refreshed by another */
+ memcpy(bakery,
+ &tcp_secret_generating->secrets[0],
+ COOKIE_WORKSPACE_WORDS);
+ } else {
+ /* still needs refreshing */
+ get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
+
+ /* The first time, paranoia assumes that the
+ * randomization function isn't as strong. But,
+ * this secret initialization is delayed until
+ * the last possible moment (packet arrival).
+ * Although that time is observable, it is
+ * unpredictably variable. Mash in the most
+ * volatile clock bits available, and expire the
+ * secret extra quickly.
+ */
+ if (unlikely(tcp_secret_primary->expires ==
+ tcp_secret_secondary->expires)) {
+ struct timespec tv;
+
+ getnstimeofday(&tv);
+ bakery[COOKIE_DIGEST_WORDS+0] ^=
+ (u32)tv.tv_nsec;
+
+ tcp_secret_secondary->expires = jiffy
+ + TCP_SECRET_1MSL
+ + (0x0f & tcp_cookie_work(bakery, 0));
+ } else {
+ tcp_secret_secondary->expires = jiffy
+ + TCP_SECRET_LIFE
+ + (0xff & tcp_cookie_work(bakery, 1));
+ tcp_secret_primary->expires = jiffy
+ + TCP_SECRET_2MSL
+ + (0x1f & tcp_cookie_work(bakery, 2));
+ }
+ memcpy(&tcp_secret_secondary->secrets[0],
+ bakery, COOKIE_WORKSPACE_WORDS);
+
+ rcu_assign_pointer(tcp_secret_generating,
+ tcp_secret_secondary);
+ rcu_assign_pointer(tcp_secret_retiring,
+ tcp_secret_primary);
+ /*
+ * Neither call_rcu() nor synchronize_rcu() needed.
+ * Retiring data is not freed. It is replaced after
+ * further (locked) pointer updates, and a quiet time
+ * (minimum 1MSL, maximum LIFE - 2MSL).
+ */
+ }
+ spin_unlock_bh(&tcp_secret_locker);
+ } else {
+ rcu_read_lock_bh();
+ memcpy(bakery,
+ &rcu_dereference(tcp_secret_generating)->secrets[0],
+ COOKIE_WORKSPACE_WORDS);
+ rcu_read_unlock_bh();
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tcp_cookie_generator);
+
void tcp_done(struct sock *sk)
{
if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
@@ -2876,6 +3141,7 @@ void __init tcp_init(void)
struct sk_buff *skb = NULL;
unsigned long nr_pages, limit;
int order, i, max_share;
+ unsigned long jiffy = jiffies;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -2969,6 +3235,15 @@ void __init tcp_init(void)
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
tcp_register_congestion_control(&tcp_reno);
+
+ memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
+ memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
+ tcp_secret_one.expires = jiffy; /* past due */
+ tcp_secret_two.expires = jiffy; /* past due */
+ tcp_secret_generating = &tcp_secret_one;
+ tcp_secret_primary = &tcp_secret_one;
+ tcp_secret_retiring = &tcp_secret_two;
+ tcp_secret_secondary = &tcp_secret_two;
}
EXPORT_SYMBOL(tcp_close);
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 26d5c7fc7de5..7c94a4955416 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -92,8 +92,8 @@ static inline void measure_rtt(struct sock *sk, u32 srtt)
if (icsk->icsk_ca_state == TCP_CA_Open) {
if (ca->maxRTT < ca->minRTT)
ca->maxRTT = ca->minRTT;
- if (ca->maxRTT < srtt
- && srtt <= ca->maxRTT + msecs_to_jiffies(20))
+ if (ca->maxRTT < srtt &&
+ srtt <= ca->maxRTT + msecs_to_jiffies(20))
ca->maxRTT = srtt;
}
}
@@ -123,9 +123,9 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt
ca->packetcount += pkts_acked;
- if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1)
- && now - ca->lasttime >= ca->minRTT
- && ca->minRTT > 0) {
+ if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) &&
+ now - ca->lasttime >= ca->minRTT &&
+ ca->minRTT > 0) {
__u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime);
if (htcp_ccount(ca) <= 3) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ba0eab65fe80..57ae96a04220 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
* "len" is invariant segment length, including TCP header.
*/
len += skb->data - skb_transport_header(skb);
- if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
+ if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
/* If PSH is not set, packet should be
* full sized, provided peer TCP is not badly broken.
* This observation (if it is correct 8)) allows
@@ -411,7 +411,7 @@ void tcp_initialize_rcv_mss(struct sock *sk)
unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
hint = min(hint, tp->rcv_wnd / 2);
- hint = min(hint, TCP_MIN_RCVMSS);
+ hint = min(hint, TCP_MSS_DEFAULT);
hint = max(hint, TCP_MIN_MSS);
inet_csk(sk)->icsk_ack.rcv_mss = hint;
@@ -3698,14 +3698,12 @@ old_ack:
* the fast version below fails.
*/
void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
- int estab, struct dst_entry *dst)
+ u8 **hvpp, int estab, struct dst_entry *dst)
{
unsigned char *ptr;
struct tcphdr *th = tcp_hdr(skb);
int length = (th->doff * 4) - sizeof(struct tcphdr);
- BUG_ON(!estab && !dst);
-
ptr = (unsigned char *)(th + 1);
opt_rx->saw_tstamp = 0;
@@ -3787,7 +3785,30 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
*/
break;
#endif
- }
+ case TCPOPT_COOKIE:
+ /* This option is variable length.
+ */
+ switch (opsize) {
+ case TCPOLEN_COOKIE_BASE:
+ /* not yet implemented */
+ break;
+ case TCPOLEN_COOKIE_PAIR:
+ /* not yet implemented */
+ break;
+ case TCPOLEN_COOKIE_MIN+0:
+ case TCPOLEN_COOKIE_MIN+2:
+ case TCPOLEN_COOKIE_MIN+4:
+ case TCPOLEN_COOKIE_MIN+6:
+ case TCPOLEN_COOKIE_MAX:
+ /* 16-bit multiple */
+ opt_rx->cookie_plus = opsize;
+ *hvpp = ptr;
+ default:
+ /* ignore option */
+ break;
+ };
+ break;
+ };
ptr += opsize-2;
length -= opsize;
@@ -3815,17 +3836,20 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
* If it is wrong it falls back on tcp_parse_options().
*/
static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
- struct tcp_sock *tp)
+ struct tcp_sock *tp, u8 **hvpp)
{
- if (th->doff == sizeof(struct tcphdr) >> 2) {
+ /* In the spirit of fast parsing, compare doff directly to constant
+ * values. Because equality is used, short doff can be ignored here.
+ */
+ if (th->doff == (sizeof(*th) / 4)) {
tp->rx_opt.saw_tstamp = 0;
return 0;
} else if (tp->rx_opt.tstamp_ok &&
- th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
+ th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
if (tcp_parse_aligned_timestamp(tp, th))
return 1;
}
- tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
+ tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
return 1;
}
@@ -4854,11 +4878,11 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
struct tcp_sock *tp = tcp_sk(sk);
/* More than one full frame received... */
- if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss
+ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
/* ... and right edge of window advances far enough.
* (tcp_recvmsg() will send ACK otherwise). Or...
*/
- && __tcp_select_window(sk) >= tp->rcv_wnd) ||
+ __tcp_select_window(sk) >= tp->rcv_wnd) ||
/* We ACK each frame or... */
tcp_in_quickack_mode(sk) ||
/* We have out of order data. */
@@ -5079,10 +5103,12 @@ out:
static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
struct tcphdr *th, int syn_inerr)
{
+ u8 *hash_location;
struct tcp_sock *tp = tcp_sk(sk);
/* RFC1323: H1. Apply PAWS check first. */
- if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
+ if (tcp_fast_parse_options(skb, th, tp, &hash_location) &&
+ tp->rx_opt.saw_tstamp &&
tcp_paws_discard(sk, skb)) {
if (!th->rst) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@ -5370,12 +5396,14 @@ discard:
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct tcphdr *th, unsigned len)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ u8 *hash_location;
struct inet_connection_sock *icsk = inet_csk(sk);
- int saved_clamp = tp->rx_opt.mss_clamp;
+ struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
+ struct tcp_cookie_values *cvp = tp->cookie_values;
+ int saved_clamp = tp->rx_opt.mss_clamp;
- tcp_parse_options(skb, &tp->rx_opt, 0, dst);
+ tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, dst);
if (th->ack) {
/* rfc793:
@@ -5472,6 +5500,31 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* Change state from SYN-SENT only after copied_seq
* is initialized. */
tp->copied_seq = tp->rcv_nxt;
+
+ if (cvp != NULL &&
+ cvp->cookie_pair_size > 0 &&
+ tp->rx_opt.cookie_plus > 0) {
+ int cookie_size = tp->rx_opt.cookie_plus
+ - TCPOLEN_COOKIE_BASE;
+ int cookie_pair_size = cookie_size
+ + cvp->cookie_desired;
+
+ /* A cookie extension option was sent and returned.
+ * Note that each incoming SYNACK replaces the
+ * Responder cookie. The initial exchange is most
+ * fragile, as protection against spoofing relies
+ * entirely upon the sequence and timestamp (above).
+ * This replacement strategy allows the correct pair to
+ * pass through, while any others will be filtered via
+ * Responder verification later.
+ */
+ if (sizeof(cvp->cookie_pair) >= cookie_pair_size) {
+ memcpy(&cvp->cookie_pair[cvp->cookie_desired],
+ hash_location, cookie_size);
+ cvp->cookie_pair_size = cookie_pair_size;
+ }
+ }
+
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 657ae334f125..fee9aabd5aa1 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -204,7 +204,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
* when trying new connection.
*/
if (peer != NULL &&
- peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
+ (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
tp->rx_opt.ts_recent = peer->tcp_ts;
}
@@ -217,7 +217,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (inet->opt)
inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
- tp->rx_opt.mss_clamp = 536;
+ tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
/* Socket identity is still unknown (sport may be zero).
* However we set state to SYN-SENT and not releasing socket
@@ -742,8 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
* This still operates on a request_sock only, not on a big
* socket.
*/
-static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
- struct dst_entry *dst)
+static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
+ struct request_sock *req,
+ struct request_values *rvp)
{
const struct inet_request_sock *ireq = inet_rsk(req);
int err = -1;
@@ -753,7 +754,7 @@ static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
return -1;
- skb = tcp_make_synack(sk, dst, req);
+ skb = tcp_make_synack(sk, dst, req, rvp);
if (skb) {
struct tcphdr *th = tcp_hdr(skb);
@@ -774,9 +775,10 @@ static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
return err;
}
-static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
+static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
+ struct request_values *rvp)
{
- return __tcp_v4_send_synack(sk, req, NULL);
+ return __tcp_v4_send_synack(sk, NULL, req, rvp);
}
/*
@@ -1211,13 +1213,16 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
- struct inet_request_sock *ireq;
+ struct tcp_extend_values tmp_ext;
struct tcp_options_received tmp_opt;
+ u8 *hash_location;
struct request_sock *req;
+ struct inet_request_sock *ireq;
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct dst_entry *dst = NULL;
__be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when;
- struct dst_entry *dst = NULL;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
#else
@@ -1268,16 +1273,50 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
tcp_clear_options(&tmp_opt);
- tmp_opt.mss_clamp = 536;
- tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
+ tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
+ tmp_opt.user_mss = tp->rx_opt.user_mss;
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
+
+ if (tmp_opt.cookie_plus > 0 &&
+ tmp_opt.saw_tstamp &&
+ !tp->rx_opt.cookie_out_never &&
+ (sysctl_tcp_cookie_size > 0 ||
+ (tp->cookie_values != NULL &&
+ tp->cookie_values->cookie_desired > 0))) {
+ u8 *c;
+ u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
+ int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
+
+ if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
+ goto drop_and_release;
+
+ /* Secret recipe starts with IP addresses */
+ *mess++ ^= daddr;
+ *mess++ ^= saddr;
+
+ /* plus variable length Initiator Cookie */
+ c = (u8 *)mess;
+ while (l-- > 0)
+ *c++ ^= *hash_location++;
- tcp_parse_options(skb, &tmp_opt, 0, dst);
+#ifdef CONFIG_SYN_COOKIES
+ want_cookie = 0; /* not our kind of cookie */
+#endif
+ tmp_ext.cookie_out_never = 0; /* false */
+ tmp_ext.cookie_plus = tmp_opt.cookie_plus;
+ } else if (!tp->rx_opt.cookie_in_always) {
+ /* redundant indications, but ensure initialization. */
+ tmp_ext.cookie_out_never = 1; /* true */
+ tmp_ext.cookie_plus = 0;
+ } else {
+ goto drop_and_release;
+ }
+ tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
-
tcp_openreq_init(req, &tmp_opt, skb);
if (security_inet_conn_request(sk, skb, req))
@@ -1308,7 +1347,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_death_row.sysctl_tw_recycle &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) {
- if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
+ if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
@@ -1337,7 +1376,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
}
tcp_rsk(req)->snt_isn = isn;
- if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
+ if (__tcp_v4_send_synack(sk, dst, req,
+ (struct request_values *)&tmp_ext) ||
+ want_cookie)
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
@@ -1727,9 +1768,9 @@ int tcp_v4_remember_stamp(struct sock *sk)
if (peer) {
if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
- (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
- peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
- peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
+ ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
+ peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
+ peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
peer->tcp_ts = tp->rx_opt.ts_recent;
}
if (release_it)
@@ -1748,9 +1789,9 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
- (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
- peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
- peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
+ ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
+ peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
+ peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
peer->tcp_ts = tcptw->tw_ts_recent;
}
inet_putpeer(peer);
@@ -1815,7 +1856,7 @@ static int tcp_v4_init_sock(struct sock *sk)
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
- tp->mss_cache = 536;
+ tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sysctl_tcp_reordering;
icsk->icsk_ca_ops = &tcp_init_congestion_ops;
@@ -1831,6 +1872,19 @@ static int tcp_v4_init_sock(struct sock *sk)
tp->af_specific = &tcp_sock_ipv4_specific;
#endif
+ /* TCP Cookie Transactions */
+ if (sysctl_tcp_cookie_size > 0) {
+ /* Default, cookies without s_data_payload. */
+ tp->cookie_values =
+ kzalloc(sizeof(*tp->cookie_values),
+ sk->sk_allocation);
+ if (tp->cookie_values != NULL)
+ kref_init(&tp->cookie_values->kref);
+ }
+ /* Presumed zeroed, in order of appearance:
+ * cookie_in_always, cookie_out_never,
+ * s_data_constant, s_data_in, s_data_out
+ */
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -1884,6 +1938,13 @@ void tcp_v4_destroy_sock(struct sock *sk)
sk->sk_sndmsg_page = NULL;
}
+ /* TCP Cookie Transactions */
+ if (tp->cookie_values != NULL) {
+ kref_put(&tp->cookie_values->kref,
+ tcp_cookie_values_release);
+ tp->cookie_values = NULL;
+ }
+
percpu_counter_dec(&tcp_sockets_allocated);
}
@@ -2468,12 +2529,17 @@ static int __net_init tcp_sk_init(struct net *net)
static void __net_exit tcp_sk_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv4.tcp_sock);
- inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
+}
+
+static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
+{
+ inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
}
static struct pernet_operations __net_initdata tcp_sk_ops = {
- .init = tcp_sk_init,
- .exit = tcp_sk_exit,
+ .init = tcp_sk_init,
+ .exit = tcp_sk_exit,
+ .exit_batch = tcp_sk_exit_batch,
};
void __init tcp_v4_init(void)
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index ce3c41ff50b2..de870377fbba 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -143,8 +143,8 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
goto out;
/* we can't calc remote HZ with no different!! */
- if (tp->rx_opt.rcv_tsval == lp->remote_ref_time
- || tp->rx_opt.rcv_tsecr == lp->local_ref_time)
+ if (tp->rx_opt.rcv_tsval == lp->remote_ref_time ||
+ tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out;
m = HZ * (tp->rx_opt.rcv_tsval -
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 463d51b53d37..87accec8d097 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -26,13 +26,7 @@
#include <net/inet_common.h>
#include <net/xfrm.h>
-#ifdef CONFIG_SYSCTL
-#define SYNC_INIT 0 /* let the user enable it */
-#else
-#define SYNC_INIT 1
-#endif
-
-int sysctl_tcp_syncookies __read_mostly = SYNC_INIT;
+int sysctl_tcp_syncookies __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_syncookies);
int sysctl_tcp_abort_on_overflow __read_mostly;
@@ -96,13 +90,14 @@ enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
const struct tcphdr *th)
{
- struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
struct tcp_options_received tmp_opt;
+ u8 *hash_location;
+ struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
int paws_reject = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
tmp_opt.tstamp_ok = 1;
- tcp_parse_options(skb, &tmp_opt, 1, NULL);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -389,14 +384,43 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
const struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk = inet_csk(newsk);
- struct tcp_sock *newtp;
+ struct tcp_sock *newtp = tcp_sk(newsk);
+ struct tcp_sock *oldtp = tcp_sk(sk);
+ struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
+
+ /* TCP Cookie Transactions require space for the cookie pair,
+ * as it differs for each connection. There is no need to
+ * copy any s_data_payload stored at the original socket.
+ * Failure will prevent resuming the connection.
+ *
+ * Presumed copied, in order of appearance:
+ * cookie_in_always, cookie_out_never
+ */
+ if (oldcvp != NULL) {
+ struct tcp_cookie_values *newcvp =
+ kzalloc(sizeof(*newtp->cookie_values),
+ GFP_ATOMIC);
+
+ if (newcvp != NULL) {
+ kref_init(&newcvp->kref);
+ newcvp->cookie_desired =
+ oldcvp->cookie_desired;
+ newtp->cookie_values = newcvp;
+ } else {
+ /* Not Yet Implemented */
+ newtp->cookie_values = NULL;
+ }
+ }
/* Now setup tcp_sock */
- newtp = tcp_sk(newsk);
newtp->pred_flags = 0;
- newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1;
- newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1;
- newtp->snd_up = treq->snt_isn + 1;
+
+ newtp->rcv_wup = newtp->copied_seq =
+ newtp->rcv_nxt = treq->rcv_isn + 1;
+
+ newtp->snd_sml = newtp->snd_una =
+ newtp->snd_nxt = newtp->snd_up =
+ treq->snt_isn + 1 + tcp_s_data_size(oldtp);
tcp_prequeue_init(newtp);
@@ -429,8 +453,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
tcp_set_ca_state(newsk, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
skb_queue_head_init(&newtp->out_of_order_queue);
- newtp->write_seq = treq->snt_isn + 1;
- newtp->pushed_seq = newtp->write_seq;
+ newtp->write_seq = newtp->pushed_seq =
+ treq->snt_isn + 1 + tcp_s_data_size(oldtp);
newtp->rx_opt.saw_tstamp = 0;
@@ -476,7 +500,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
if (newtp->af_specific->md5_lookup(sk, newsk))
newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
- if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
+ if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
@@ -495,16 +519,16 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct request_sock **prev)
{
+ struct tcp_options_received tmp_opt;
+ u8 *hash_location;
+ struct sock *child;
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0;
- struct tcp_options_received tmp_opt;
- struct sock *child;
- struct dst_entry *dst = inet_csk_route_req(sk, req);
- tmp_opt.saw_tstamp = 0;
- if (th->doff > (sizeof(struct tcphdr)>>2)) {
- tcp_parse_options(skb, &tmp_opt, 0, dst);
+ if ((th->doff > (sizeof(*th) >> 2)) && (req->ts_recent)) {
+ tmp_opt.tstamp_ok = 1;
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 1, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
@@ -517,8 +541,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
}
}
- dst_release(dst);
-
/* Check for pure retransmitted SYN. */
if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
flg == TCP_FLAG_SYN &&
@@ -540,7 +562,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* Enforce "SYN-ACK" according to figure 8, figure 6
* of RFC793, fixed by RFC1122.
*/
- req->rsk_ops->rtx_syn_ack(sk, req);
+ req->rsk_ops->rtx_syn_ack(sk, req, NULL);
return NULL;
}
@@ -599,7 +621,8 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* Invalid ACK: reset will be sent by listening socket
*/
if ((flg & TCP_FLAG_ACK) &&
- (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
+ (TCP_SKB_CB(skb)->ack_seq !=
+ tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
return sk;
/* Also, it would be not so bad idea to check rcv_tsecr, which
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 616c686ca253..93316a96d820 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -59,6 +59,10 @@ int sysctl_tcp_base_mss __read_mostly = 512;
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
+int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
+EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
+
+
/* Account for new data that has been sent to the network. */
static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
{
@@ -362,15 +366,45 @@ static inline int tcp_urg_mode(const struct tcp_sock *tp)
#define OPTION_TS (1 << 1)
#define OPTION_MD5 (1 << 2)
#define OPTION_WSCALE (1 << 3)
+#define OPTION_COOKIE_EXTENSION (1 << 4)
struct tcp_out_options {
u8 options; /* bit field of OPTION_* */
u8 ws; /* window scale, 0 to disable */
u8 num_sack_blocks; /* number of SACK blocks to include */
+ u8 hash_size; /* bytes in hash_location */
u16 mss; /* 0 to disable */
__u32 tsval, tsecr; /* need to include OPTION_TS */
+ __u8 *hash_location; /* temporary pointer, overloaded */
};
+/* The sysctl int routines are generic, so check consistency here.
+ */
+static u8 tcp_cookie_size_check(u8 desired)
+{
+ if (desired > 0) {
+ /* previously specified */
+ return desired;
+ }
+ if (sysctl_tcp_cookie_size <= 0) {
+ /* no default specified */
+ return 0;
+ }
+ if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
+ /* value too small, specify minimum */
+ return TCP_COOKIE_MIN;
+ }
+ if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
+ /* value too large, specify maximum */
+ return TCP_COOKIE_MAX;
+ }
+ if (0x1 & sysctl_tcp_cookie_size) {
+ /* 8-bit multiple, illegal, fix it */
+ return (u8)(sysctl_tcp_cookie_size + 0x1);
+ }
+ return (u8)sysctl_tcp_cookie_size;
+}
+
/* Write previously computed TCP options to the packet.
*
* Beware: Something in the Internet is very sensitive to the ordering of
@@ -385,17 +419,34 @@ struct tcp_out_options {
* (but it may well be that other scenarios fail similarly).
*/
static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
- const struct tcp_out_options *opts,
- __u8 **md5_hash) {
- if (unlikely(OPTION_MD5 & opts->options)) {
- *ptr++ = htonl((TCPOPT_NOP << 24) |
- (TCPOPT_NOP << 16) |
- (TCPOPT_MD5SIG << 8) |
- TCPOLEN_MD5SIG);
- *md5_hash = (__u8 *)ptr;
+ struct tcp_out_options *opts)
+{
+ u8 options = opts->options; /* mungable copy */
+
+ /* Having both authentication and cookies for security is redundant,
+ * and there's certainly not enough room. Instead, the cookie-less
+ * extension variant is proposed.
+ *
+ * Consider the pessimal case with authentication. The options
+ * could look like:
+ * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40
+ */
+ if (unlikely(OPTION_MD5 & options)) {
+ if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
+ *ptr++ = htonl((TCPOPT_COOKIE << 24) |
+ (TCPOLEN_COOKIE_BASE << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ } else {
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ }
+ options &= ~OPTION_COOKIE_EXTENSION;
+ /* overload cookie hash location */
+ opts->hash_location = (__u8 *)ptr;
ptr += 4;
- } else {
- *md5_hash = NULL;
}
if (unlikely(opts->mss)) {
@@ -404,12 +455,13 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
opts->mss);
}
- if (likely(OPTION_TS & opts->options)) {
- if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) {
+ if (likely(OPTION_TS & options)) {
+ if (unlikely(OPTION_SACK_ADVERTISE & options)) {
*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
(TCPOLEN_SACK_PERM << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
+ options &= ~OPTION_SACK_ADVERTISE;
} else {
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
@@ -420,15 +472,52 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
*ptr++ = htonl(opts->tsecr);
}
- if (unlikely(OPTION_SACK_ADVERTISE & opts->options &&
- !(OPTION_TS & opts->options))) {
+ /* Specification requires after timestamp, so do it now.
+ *
+ * Consider the pessimal case without authentication. The options
+ * could look like:
+ * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40
+ */
+ if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
+ __u8 *cookie_copy = opts->hash_location;
+ u8 cookie_size = opts->hash_size;
+
+ /* 8-bit multiple handled in tcp_cookie_size_check() above,
+ * and elsewhere.
+ */
+ if (0x2 & cookie_size) {
+ __u8 *p = (__u8 *)ptr;
+
+ /* 16-bit multiple */
+ *p++ = TCPOPT_COOKIE;
+ *p++ = TCPOLEN_COOKIE_BASE + cookie_size;
+ *p++ = *cookie_copy++;
+ *p++ = *cookie_copy++;
+ ptr++;
+ cookie_size -= 2;
+ } else {
+ /* 32-bit multiple */
+ *ptr++ = htonl(((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_COOKIE << 8) |
+ TCPOLEN_COOKIE_BASE) +
+ cookie_size);
+ }
+
+ if (cookie_size > 0) {
+ memcpy(ptr, cookie_copy, cookie_size);
+ ptr += (cookie_size / 4);
+ }
+ }
+
+ if (unlikely(OPTION_SACK_ADVERTISE & options)) {
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_SACK_PERM << 8) |
TCPOLEN_SACK_PERM);
}
- if (unlikely(OPTION_WSCALE & opts->options)) {
+ if (unlikely(OPTION_WSCALE & options)) {
*ptr++ = htonl((TCPOPT_NOP << 24) |
(TCPOPT_WINDOW << 16) |
(TCPOLEN_WINDOW << 8) |
@@ -463,14 +552,18 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5) {
struct tcp_sock *tp = tcp_sk(sk);
- unsigned size = 0;
+ struct tcp_cookie_values *cvp = tp->cookie_values;
struct dst_entry *dst = __sk_dst_get(sk);
+ unsigned remaining = MAX_TCP_OPTION_SPACE;
+ u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
+ tcp_cookie_size_check(cvp->cookie_desired) :
+ 0;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) {
opts->options |= OPTION_MD5;
- size += TCPOLEN_MD5SIG_ALIGNED;
+ remaining -= TCPOLEN_MD5SIG_ALIGNED;
}
#else
*md5 = NULL;
@@ -486,7 +579,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
* SACKs don't matter, we never delay an ACK when we have any of those
* going out. */
opts->mss = tcp_advertise_mss(sk);
- size += TCPOLEN_MSS_ALIGNED;
+ remaining -= TCPOLEN_MSS_ALIGNED;
if (likely(sysctl_tcp_timestamps &&
!dst_feature(dst, RTAX_FEATURE_NO_TSTAMP) &&
@@ -494,22 +587,68 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when;
opts->tsecr = tp->rx_opt.ts_recent;
- size += TCPOLEN_TSTAMP_ALIGNED;
+ remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
if (likely(sysctl_tcp_window_scaling &&
!dst_feature(dst, RTAX_FEATURE_NO_WSCALE))) {
opts->ws = tp->rx_opt.rcv_wscale;
opts->options |= OPTION_WSCALE;
- size += TCPOLEN_WSCALE_ALIGNED;
+ remaining -= TCPOLEN_WSCALE_ALIGNED;
}
if (likely(sysctl_tcp_sack &&
!dst_feature(dst, RTAX_FEATURE_NO_SACK))) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!(OPTION_TS & opts->options)))
- size += TCPOLEN_SACKPERM_ALIGNED;
+ remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
- return size;
+ /* Note that timestamps are required by the specification.
+ *
+ * Odd numbers of bytes are prohibited by the specification, ensuring
+ * that the cookie is 16-bit aligned, and the resulting cookie pair is
+ * 32-bit aligned.
+ */
+ if (*md5 == NULL &&
+ (OPTION_TS & opts->options) &&
+ cookie_size > 0) {
+ int need = TCPOLEN_COOKIE_BASE + cookie_size;
+
+ if (0x2 & need) {
+ /* 32-bit multiple */
+ need += 2; /* NOPs */
+
+ if (need > remaining) {
+ /* try shrinking cookie to fit */
+ cookie_size -= 2;
+ need -= 4;
+ }
+ }
+ while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
+ cookie_size -= 4;
+ need -= 4;
+ }
+ if (TCP_COOKIE_MIN <= cookie_size) {
+ opts->options |= OPTION_COOKIE_EXTENSION;
+ opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
+ opts->hash_size = cookie_size;
+
+ /* Remember for future incarnations. */
+ cvp->cookie_desired = cookie_size;
+
+ if (cvp->cookie_desired != cvp->cookie_pair_size) {
+ /* Currently use random bytes as a nonce,
+ * assuming these are completely unpredictable
+ * by hostile users of the same system.
+ */
+ get_random_bytes(&cvp->cookie_pair[0],
+ cookie_size);
+ cvp->cookie_pair_size = cookie_size;
+ }
+
+ remaining -= need;
+ }
+ }
+ return MAX_TCP_OPTION_SPACE - remaining;
}
/* Set up TCP options for SYN-ACKs. */
@@ -517,48 +656,77 @@ static unsigned tcp_synack_options(struct sock *sk,
struct request_sock *req,
unsigned mss, struct sk_buff *skb,
struct tcp_out_options *opts,
- struct tcp_md5sig_key **md5) {
- unsigned size = 0;
+ struct tcp_md5sig_key **md5,
+ struct tcp_extend_values *xvp)
+{
struct inet_request_sock *ireq = inet_rsk(req);
- char doing_ts;
+ unsigned remaining = MAX_TCP_OPTION_SPACE;
+ u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
+ xvp->cookie_plus :
+ 0;
+ bool doing_ts = ireq->tstamp_ok;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
if (*md5) {
opts->options |= OPTION_MD5;
- size += TCPOLEN_MD5SIG_ALIGNED;
+ remaining -= TCPOLEN_MD5SIG_ALIGNED;
+
+ /* We can't fit any SACK blocks in a packet with MD5 + TS
+ * options. There was discussion about disabling SACK
+ * rather than TS in order to fit in better with old,
+ * buggy kernels, but that was deemed to be unnecessary.
+ */
+ doing_ts &= !ireq->sack_ok;
}
#else
*md5 = NULL;
#endif
- /* we can't fit any SACK blocks in a packet with MD5 + TS
- options. There was discussion about disabling SACK rather than TS in
- order to fit in better with old, buggy kernels, but that was deemed
- to be unnecessary. */
- doing_ts = ireq->tstamp_ok && !(*md5 && ireq->sack_ok);
-
+ /* We always send an MSS option. */
opts->mss = mss;
- size += TCPOLEN_MSS_ALIGNED;
+ remaining -= TCPOLEN_MSS_ALIGNED;
if (likely(ireq->wscale_ok)) {
opts->ws = ireq->rcv_wscale;
opts->options |= OPTION_WSCALE;
- size += TCPOLEN_WSCALE_ALIGNED;
+ remaining -= TCPOLEN_WSCALE_ALIGNED;
}
if (likely(doing_ts)) {
opts->options |= OPTION_TS;
opts->tsval = TCP_SKB_CB(skb)->when;
opts->tsecr = req->ts_recent;
- size += TCPOLEN_TSTAMP_ALIGNED;
+ remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
if (likely(ireq->sack_ok)) {
opts->options |= OPTION_SACK_ADVERTISE;
if (unlikely(!doing_ts))
- size += TCPOLEN_SACKPERM_ALIGNED;
+ remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
- return size;
+ /* Similar rationale to tcp_syn_options() applies here, too.
+ * If the <SYN> options fit, the same options should fit now!
+ */
+ if (*md5 == NULL &&
+ doing_ts &&
+ cookie_plus > TCPOLEN_COOKIE_BASE) {
+ int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
+
+ if (0x2 & need) {
+ /* 32-bit multiple */
+ need += 2; /* NOPs */
+ }
+ if (need <= remaining) {
+ opts->options |= OPTION_COOKIE_EXTENSION;
+ opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
+ remaining -= need;
+ } else {
+ /* There's no error return, so flag it. */
+ xvp->cookie_out_never = 1; /* true */
+ opts->hash_size = 0;
+ }
+ }
+ return MAX_TCP_OPTION_SPACE - remaining;
}
/* Compute TCP options for ESTABLISHED sockets. This is not the
@@ -624,7 +792,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
struct tcp_out_options opts;
unsigned tcp_options_size, tcp_header_size;
struct tcp_md5sig_key *md5;
- __u8 *md5_hash_location;
struct tcphdr *th;
int err;
@@ -695,7 +862,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
}
}
- tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
+ tcp_options_write((__be32 *)(th + 1), tp, &opts);
if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
TCP_ECN_send(sk, skb, tcp_header_size);
@@ -703,7 +870,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
/* Calculate the MD5 hash, as we have all we need now */
if (md5) {
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
- tp->af_specific->calc_md5_hash(md5_hash_location,
+ tp->af_specific->calc_md5_hash(opts.hash_location,
md5, sk, NULL, skb);
}
#endif
@@ -1923,8 +2090,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
* case, when window is shrunk to zero. In this case
* our retransmit serves as a zero window probe.
*/
- if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))
- && TCP_SKB_CB(skb)->seq != tp->snd_una)
+ if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
+ TCP_SKB_CB(skb)->seq != tp->snd_una)
return -EAGAIN;
if (skb->len > cur_mss) {
@@ -2224,16 +2391,17 @@ int tcp_send_synack(struct sock *sk)
/* Prepare a SYN-ACK. */
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
- struct request_sock *req)
+ struct request_sock *req,
+ struct request_values *rvp)
{
+ struct tcp_out_options opts;
+ struct tcp_extend_values *xvp = tcp_xv(rvp);
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_sock *tp = tcp_sk(sk);
struct tcphdr *th;
- int tcp_header_size;
- struct tcp_out_options opts;
struct sk_buff *skb;
struct tcp_md5sig_key *md5;
- __u8 *md5_hash_location;
+ int tcp_header_size;
int mss;
skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
@@ -2271,8 +2439,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
#endif
TCP_SKB_CB(skb)->when = tcp_time_stamp;
tcp_header_size = tcp_synack_options(sk, req, mss,
- skb, &opts, &md5) +
- sizeof(struct tcphdr);
+ skb, &opts, &md5, xvp)
+ + sizeof(*th);
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
@@ -2289,19 +2457,58 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
*/
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
+
+ if (OPTION_COOKIE_EXTENSION & opts.options) {
+ const struct tcp_cookie_values *cvp = tp->cookie_values;
+
+ if (cvp != NULL &&
+ cvp->s_data_constant &&
+ cvp->s_data_desired > 0) {
+ u8 *buf = skb_put(skb, cvp->s_data_desired);
+
+ /* copy data directly from the listening socket. */
+ memcpy(buf, cvp->s_data_payload, cvp->s_data_desired);
+ TCP_SKB_CB(skb)->end_seq += cvp->s_data_desired;
+ }
+
+ if (opts.hash_size > 0) {
+ __u32 workspace[SHA_WORKSPACE_WORDS];
+ u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
+ u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
+
+ /* Secret recipe depends on the Timestamp, (future)
+ * Sequence and Acknowledgment Numbers, Initiator
+ * Cookie, and others handled by IP variant caller.
+ */
+ *tail-- ^= opts.tsval;
+ *tail-- ^= tcp_rsk(req)->rcv_isn + 1;
+ *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
+
+ /* recommended */
+ *tail-- ^= ((th->dest << 16) | th->source);
+ *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
+
+ sha_transform((__u32 *)&xvp->cookie_bakery[0],
+ (char *)mess,
+ &workspace[0]);
+ opts.hash_location =
+ (__u8 *)&xvp->cookie_bakery[0];
+ }
+ }
+
th->seq = htonl(TCP_SKB_CB(skb)->seq);
th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(min(req->rcv_wnd, 65535U));
- tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
+ tcp_options_write((__be32 *)(th + 1), tp, &opts);
th->doff = (tcp_header_size >> 2);
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
if (md5) {
- tcp_rsk(req)->af_specific->calc_md5_hash(md5_hash_location,
+ tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
md5, NULL, req, skb);
}
#endif
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 7a3cc2ffad84..bb110c5ce1d2 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -95,8 +95,8 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
/* Only update if port matches */
if ((port == 0 || ntohs(inet->inet_dport) == port ||
- ntohs(inet->inet_sport) == port)
- && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
+ ntohs(inet->inet_sport) == port) &&
+ (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
spin_lock(&tcp_probe.lock);
/* If log fills, just silently drop */
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index e9bbff746488..b612acf76183 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -165,9 +165,8 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* every other rtt.
*/
if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- if (veno->inc
- && tp->snd_cwnd <
- tp->snd_cwnd_clamp) {
+ if (veno->inc &&
+ tp->snd_cwnd < tp->snd_cwnd_clamp) {
tp->snd_cwnd++;
veno->inc = 0;
} else
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 66b6821b984e..a0f240358892 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -157,8 +157,8 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
if (queue > TCP_YEAH_ALPHA ||
rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
- if (queue > TCP_YEAH_ALPHA
- && tp->snd_cwnd > yeah->reno_count) {
+ if (queue > TCP_YEAH_ALPHA &&
+ tp->snd_cwnd > yeah->reno_count) {
u32 reduction = min(queue / TCP_YEAH_GAMMA ,
tp->snd_cwnd >> TCP_YEAH_EPSILON);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4274c1cc78fd..1f9534846ca9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -136,33 +136,67 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
struct hlist_nulls_node *node;
sk_nulls_for_each(sk2, node, &hslot->head)
- if (net_eq(sock_net(sk2), net) &&
- sk2 != sk &&
- (bitmap || sk2->sk_hash == num) &&
- (!sk2->sk_reuse || !sk->sk_reuse) &&
- (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
- || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
+ if (net_eq(sock_net(sk2), net) &&
+ sk2 != sk &&
+ (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
+ (!sk2->sk_reuse || !sk->sk_reuse) &&
+ (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
+ sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(*saddr_comp)(sk, sk2)) {
if (bitmap)
- __set_bit(sk2->sk_hash >> log, bitmap);
+ __set_bit(udp_sk(sk2)->udp_port_hash >> log,
+ bitmap);
else
return 1;
}
return 0;
}
+/*
+ * Note: we still hold spinlock of primary hash chain, so no other writer
+ * can insert/delete a socket with local_port == num
+ */
+static int udp_lib_lport_inuse2(struct net *net, __u16 num,
+ struct udp_hslot *hslot2,
+ struct sock *sk,
+ int (*saddr_comp)(const struct sock *sk1,
+ const struct sock *sk2))
+{
+ struct sock *sk2;
+ struct hlist_nulls_node *node;
+ int res = 0;
+
+ spin_lock(&hslot2->lock);
+ udp_portaddr_for_each_entry(sk2, node, &hslot2->head)
+ if (net_eq(sock_net(sk2), net) &&
+ sk2 != sk &&
+ (udp_sk(sk2)->udp_port_hash == num) &&
+ (!sk2->sk_reuse || !sk->sk_reuse) &&
+ (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
+ sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
+ (*saddr_comp)(sk, sk2)) {
+ res = 1;
+ break;
+ }
+ spin_unlock(&hslot2->lock);
+ return res;
+}
+
/**
* udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
*
* @sk: socket struct in question
* @snum: port number to look up
* @saddr_comp: AF-dependent comparison of bound local IP addresses
+ * @hash2_nulladdr: AF-dependant hash value in secondary hash chains,
+ * with NULL address
*/
int udp_lib_get_port(struct sock *sk, unsigned short snum,
int (*saddr_comp)(const struct sock *sk1,
- const struct sock *sk2))
+ const struct sock *sk2),
+ unsigned int hash2_nulladdr)
{
- struct udp_hslot *hslot;
+ struct udp_hslot *hslot, *hslot2;
struct udp_table *udptable = sk->sk_prot->h.udp_table;
int error = 1;
struct net *net = sock_net(sk);
@@ -209,16 +243,49 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
} else {
hslot = udp_hashslot(udptable, net, snum);
spin_lock_bh(&hslot->lock);
+ if (hslot->count > 10) {
+ int exist;
+ unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
+
+ slot2 &= udptable->mask;
+ hash2_nulladdr &= udptable->mask;
+
+ hslot2 = udp_hashslot2(udptable, slot2);
+ if (hslot->count < hslot2->count)
+ goto scan_primary_hash;
+
+ exist = udp_lib_lport_inuse2(net, snum, hslot2,
+ sk, saddr_comp);
+ if (!exist && (hash2_nulladdr != slot2)) {
+ hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
+ exist = udp_lib_lport_inuse2(net, snum, hslot2,
+ sk, saddr_comp);
+ }
+ if (exist)
+ goto fail_unlock;
+ else
+ goto found;
+ }
+scan_primary_hash:
if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
saddr_comp, 0))
goto fail_unlock;
}
found:
inet_sk(sk)->inet_num = snum;
- sk->sk_hash = snum;
+ udp_sk(sk)->udp_port_hash = snum;
+ udp_sk(sk)->udp_portaddr_hash ^= snum;
if (sk_unhashed(sk)) {
sk_nulls_add_node_rcu(sk, &hslot->head);
+ hslot->count++;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ spin_lock(&hslot2->lock);
+ hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+ &hslot2->head);
+ hslot2->count++;
+ spin_unlock(&hslot2->lock);
}
error = 0;
fail_unlock:
@@ -237,9 +304,22 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
inet1->inet_rcv_saddr == inet2->inet_rcv_saddr));
}
+static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
+ unsigned int port)
+{
+ return jhash_1word(saddr, net_hash_mix(net)) ^ port;
+}
+
int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
- return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal);
+ unsigned int hash2_nulladdr =
+ udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum);
+ unsigned int hash2_partial =
+ udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
+
+ /* precompute partial secondary hash */
+ udp_sk(sk)->udp_portaddr_hash = hash2_partial;
+ return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
}
static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
@@ -248,7 +328,7 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
{
int score = -1;
- if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
+ if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
!ipv6_only_sock(sk)) {
struct inet_sock *inet = inet_sk(sk);
@@ -277,6 +357,89 @@ static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr,
return score;
}
+/*
+ * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
+ */
+#define SCORE2_MAX (1 + 2 + 2 + 2)
+static inline int compute_score2(struct sock *sk, struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned int hnum, int dif)
+{
+ int score = -1;
+
+ if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) {
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (inet->inet_rcv_saddr != daddr)
+ return -1;
+ if (inet->inet_num != hnum)
+ return -1;
+
+ score = (sk->sk_family == PF_INET ? 1 : 0);
+ if (inet->inet_daddr) {
+ if (inet->inet_daddr != saddr)
+ return -1;
+ score += 2;
+ }
+ if (inet->inet_dport) {
+ if (inet->inet_dport != sport)
+ return -1;
+ score += 2;
+ }
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+ return -1;
+ score += 2;
+ }
+ }
+ return score;
+}
+
+
+/* called with read_rcu_lock() */
+static struct sock *udp4_lib_lookup2(struct net *net,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned int hnum, int dif,
+ struct udp_hslot *hslot2, unsigned int slot2)
+{
+ struct sock *sk, *result;
+ struct hlist_nulls_node *node;
+ int score, badness;
+
+begin:
+ result = NULL;
+ badness = -1;
+ udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+ score = compute_score2(sk, net, saddr, sport,
+ daddr, hnum, dif);
+ if (score > badness) {
+ result = sk;
+ badness = score;
+ if (score == SCORE2_MAX)
+ goto exact_match;
+ }
+ }
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != slot2)
+ goto begin;
+
+ if (result) {
+exact_match:
+ if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+ result = NULL;
+ else if (unlikely(compute_score2(result, net, saddr, sport,
+ daddr, hnum, dif) < badness)) {
+ sock_put(result);
+ goto begin;
+ }
+ }
+ return result;
+}
+
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
@@ -287,11 +450,35 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
- unsigned int hash = udp_hashfn(net, hnum, udptable->mask);
- struct udp_hslot *hslot = &udptable->hash[hash];
+ unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
+ struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness;
rcu_read_lock();
+ if (hslot->count > 10) {
+ hash2 = udp4_portaddr_hash(net, daddr, hnum);
+ slot2 = hash2 & udptable->mask;
+ hslot2 = &udptable->hash2[slot2];
+ if (hslot->count < hslot2->count)
+ goto begin;
+
+ result = udp4_lib_lookup2(net, saddr, sport,
+ daddr, hnum, dif,
+ hslot2, slot2);
+ if (!result) {
+ hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum);
+ slot2 = hash2 & udptable->mask;
+ hslot2 = &udptable->hash2[slot2];
+ if (hslot->count < hslot2->count)
+ goto begin;
+
+ result = udp4_lib_lookup2(net, INADDR_ANY, sport,
+ daddr, hnum, dif,
+ hslot2, slot2);
+ }
+ rcu_read_unlock();
+ return result;
+ }
begin:
result = NULL;
badness = -1;
@@ -308,7 +495,7 @@ begin:
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
- if (get_nulls_value(node) != hash)
+ if (get_nulls_value(node) != slot)
goto begin;
if (result) {
@@ -358,13 +545,13 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
sk_nulls_for_each_from(s, node) {
struct inet_sock *inet = inet_sk(s);
- if (!net_eq(sock_net(s), net) ||
- s->sk_hash != hnum ||
- (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
- (inet->inet_dport != rmt_port && inet->inet_dport) ||
- (inet->inet_rcv_saddr &&
- inet->inet_rcv_saddr != loc_addr) ||
- ipv6_only_sock(s) ||
+ if (!net_eq(sock_net(s), net) ||
+ udp_sk(s)->udp_port_hash != hnum ||
+ (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
+ (inet->inet_dport != rmt_port && inet->inet_dport) ||
+ (inet->inet_rcv_saddr &&
+ inet->inet_rcv_saddr != loc_addr) ||
+ ipv6_only_sock(s) ||
(s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
continue;
if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
@@ -1005,9 +1192,7 @@ try_again:
err = ulen;
out_free:
- lock_sock(sk);
- skb_free_datagram(sk, skb);
- release_sock(sk);
+ skb_free_datagram_locked(sk, skb);
out:
return err;
@@ -1050,13 +1235,22 @@ void udp_lib_unhash(struct sock *sk)
{
if (sk_hashed(sk)) {
struct udp_table *udptable = sk->sk_prot->h.udp_table;
- struct udp_hslot *hslot = udp_hashslot(udptable, sock_net(sk),
- sk->sk_hash);
+ struct udp_hslot *hslot, *hslot2;
+
+ hslot = udp_hashslot(udptable, sock_net(sk),
+ udp_sk(sk)->udp_port_hash);
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock_bh(&hslot->lock);
if (sk_nulls_del_node_init_rcu(sk)) {
+ hslot->count--;
inet_sk(sk)->inet_num = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+
+ spin_lock(&hslot2->lock);
+ hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+ hslot2->count--;
+ spin_unlock(&hslot2->lock);
}
spin_unlock_bh(&hslot->lock);
}
@@ -1192,49 +1386,83 @@ drop:
return -1;
}
+
+static void flush_stack(struct sock **stack, unsigned int count,
+ struct sk_buff *skb, unsigned int final)
+{
+ unsigned int i;
+ struct sk_buff *skb1 = NULL;
+ struct sock *sk;
+
+ for (i = 0; i < count; i++) {
+ sk = stack[i];
+ if (likely(skb1 == NULL))
+ skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+
+ if (!skb1) {
+ atomic_inc(&sk->sk_drops);
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+ IS_UDPLITE(sk));
+ UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
+ IS_UDPLITE(sk));
+ }
+
+ if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
+ skb1 = NULL;
+ }
+ if (unlikely(skb1))
+ kfree_skb(skb1);
+}
+
/*
* Multicasts and broadcasts go to each listener.
*
- * Note: called only from the BH handler context,
- * so we don't need to lock the hashes.
+ * Note: called only from the BH handler context.
*/
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udphdr *uh,
__be32 saddr, __be32 daddr,
struct udp_table *udptable)
{
- struct sock *sk;
+ struct sock *sk, *stack[256 / sizeof(struct sock *)];
struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
int dif;
+ unsigned int i, count = 0;
spin_lock(&hslot->lock);
sk = sk_nulls_head(&hslot->head);
dif = skb->dev->ifindex;
sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
- if (sk) {
- struct sock *sknext = NULL;
-
- do {
- struct sk_buff *skb1 = skb;
-
- sknext = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
- daddr, uh->source, saddr,
- dif);
- if (sknext)
- skb1 = skb_clone(skb, GFP_ATOMIC);
-
- if (skb1) {
- int ret = udp_queue_rcv_skb(sk, skb1);
- if (ret > 0)
- /* we should probably re-process instead
- * of dropping packets here. */
- kfree_skb(skb1);
- }
- sk = sknext;
- } while (sknext);
- } else
- consume_skb(skb);
+ while (sk) {
+ stack[count++] = sk;
+ sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
+ daddr, uh->source, saddr, dif);
+ if (unlikely(count == ARRAY_SIZE(stack))) {
+ if (!sk)
+ break;
+ flush_stack(stack, count, skb, ~0);
+ count = 0;
+ }
+ }
+ /*
+ * before releasing chain lock, we must take a reference on sockets
+ */
+ for (i = 0; i < count; i++)
+ sock_hold(stack[i]);
+
spin_unlock(&hslot->lock);
+
+ /*
+ * do the slow work with no lock held
+ */
+ if (count) {
+ flush_stack(stack, count, skb, count - 1);
+
+ for (i = 0; i < count; i++)
+ sock_put(stack[i]);
+ } else {
+ kfree_skb(skb);
+ }
return 0;
}
@@ -1844,7 +2072,7 @@ void __init udp_table_init(struct udp_table *table, const char *name)
if (!CONFIG_BASE_SMALL)
table->hash = alloc_large_system_hash(name,
- sizeof(struct udp_hslot),
+ 2 * sizeof(struct udp_hslot),
uhash_entries,
21, /* one slot per 2 MB */
0,
@@ -1856,16 +2084,23 @@ void __init udp_table_init(struct udp_table *table, const char *name)
*/
if (CONFIG_BASE_SMALL || table->mask < UDP_HTABLE_SIZE_MIN - 1) {
table->hash = kmalloc(UDP_HTABLE_SIZE_MIN *
- sizeof(struct udp_hslot), GFP_KERNEL);
+ 2 * sizeof(struct udp_hslot), GFP_KERNEL);
if (!table->hash)
panic(name);
table->log = ilog2(UDP_HTABLE_SIZE_MIN);
table->mask = UDP_HTABLE_SIZE_MIN - 1;
}
+ table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i);
+ table->hash[i].count = 0;
spin_lock_init(&table->hash[i].lock);
}
+ for (i = 0; i <= table->mask; i++) {
+ INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i);
+ table->hash2[i].count = 0;
+ spin_lock_init(&table->hash2[i].lock);
+ }
}
void __init udp_init(void)
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 470c504b9554..66f79513f4a5 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -64,7 +64,6 @@ static struct inet_protosw udplite4_protosw = {
.protocol = IPPROTO_UDPLITE,
.prot = &udplite_prot,
.ops = &inet_dgram_ops,
- .capability = -1,
.no_check = 0, /* must checksum (RFC 3828) */
.flags = INET_PROTOSW_PERMANENT,
};
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 918648409612..b1ce8fc62049 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -481,9 +481,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
struct net_device *dev;
struct inet6_dev *idev;
- read_lock(&dev_base_lock);
- for_each_netdev(net, dev) {
- rcu_read_lock();
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -491,9 +490,8 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
if (changed)
dev_forward_change(idev);
}
- rcu_read_unlock();
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
@@ -1137,10 +1135,9 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
hiscore->rule = -1;
hiscore->ifa = NULL;
- read_lock(&dev_base_lock);
rcu_read_lock();
- for_each_netdev(net, dev) {
+ for_each_netdev_rcu(net, dev) {
struct inet6_dev *idev;
/* Candidate Source Address (section 4)
@@ -1235,7 +1232,6 @@ try_nextdev:
read_unlock_bh(&idev->lock);
}
rcu_read_unlock();
- read_unlock(&dev_base_lock);
if (!hiscore->ifa)
return -EADDRNOTAVAIL;
@@ -3485,85 +3481,114 @@ enum addr_type_t
ANYCAST_ADDR,
};
+/* called with rcu_read_lock() */
+static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
+ struct netlink_callback *cb, enum addr_type_t type,
+ int s_ip_idx, int *p_ip_idx)
+{
+ struct inet6_ifaddr *ifa;
+ struct ifmcaddr6 *ifmca;
+ struct ifacaddr6 *ifaca;
+ int err = 1;
+ int ip_idx = *p_ip_idx;
+
+ read_lock_bh(&idev->lock);
+ switch (type) {
+ case UNICAST_ADDR:
+ /* unicast address incl. temp addr */
+ for (ifa = idev->addr_list; ifa;
+ ifa = ifa->if_next, ip_idx++) {
+ if (ip_idx < s_ip_idx)
+ continue;
+ err = inet6_fill_ifaddr(skb, ifa,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWADDR,
+ NLM_F_MULTI);
+ if (err <= 0)
+ break;
+ }
+ break;
+ case MULTICAST_ADDR:
+ /* multicast address */
+ for (ifmca = idev->mc_list; ifmca;
+ ifmca = ifmca->next, ip_idx++) {
+ if (ip_idx < s_ip_idx)
+ continue;
+ err = inet6_fill_ifmcaddr(skb, ifmca,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_GETMULTICAST,
+ NLM_F_MULTI);
+ if (err <= 0)
+ break;
+ }
+ break;
+ case ANYCAST_ADDR:
+ /* anycast address */
+ for (ifaca = idev->ac_list; ifaca;
+ ifaca = ifaca->aca_next, ip_idx++) {
+ if (ip_idx < s_ip_idx)
+ continue;
+ err = inet6_fill_ifacaddr(skb, ifaca,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_GETANYCAST,
+ NLM_F_MULTI);
+ if (err <= 0)
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+ *p_ip_idx = ip_idx;
+ return err;
+}
+
static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
enum addr_type_t type)
{
+ struct net *net = sock_net(skb->sk);
+ int h, s_h;
int idx, ip_idx;
int s_idx, s_ip_idx;
- int err = 1;
struct net_device *dev;
- struct inet6_dev *idev = NULL;
- struct inet6_ifaddr *ifa;
- struct ifmcaddr6 *ifmca;
- struct ifacaddr6 *ifaca;
- struct net *net = sock_net(skb->sk);
-
- s_idx = cb->args[0];
- s_ip_idx = ip_idx = cb->args[1];
+ struct inet6_dev *idev;
+ struct hlist_head *head;
+ struct hlist_node *node;
- idx = 0;
- for_each_netdev(net, dev) {
- if (idx < s_idx)
- goto cont;
- if (idx > s_idx)
- s_ip_idx = 0;
- ip_idx = 0;
- if ((idev = in6_dev_get(dev)) == NULL)
- goto cont;
- read_lock_bh(&idev->lock);
- switch (type) {
- case UNICAST_ADDR:
- /* unicast address incl. temp addr */
- for (ifa = idev->addr_list; ifa;
- ifa = ifa->if_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
- continue;
- err = inet6_fill_ifaddr(skb, ifa,
- NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- RTM_NEWADDR,
- NLM_F_MULTI);
- }
- break;
- case MULTICAST_ADDR:
- /* multicast address */
- for (ifmca = idev->mc_list; ifmca;
- ifmca = ifmca->next, ip_idx++) {
- if (ip_idx < s_ip_idx)
- continue;
- err = inet6_fill_ifmcaddr(skb, ifmca,
- NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- RTM_GETMULTICAST,
- NLM_F_MULTI);
- }
- break;
- case ANYCAST_ADDR:
- /* anycast address */
- for (ifaca = idev->ac_list; ifaca;
- ifaca = ifaca->aca_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
- continue;
- err = inet6_fill_ifacaddr(skb, ifaca,
- NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- RTM_GETANYCAST,
- NLM_F_MULTI);
- }
- break;
- default:
- break;
- }
- read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
+ s_h = cb->args[0];
+ s_idx = idx = cb->args[1];
+ s_ip_idx = ip_idx = cb->args[2];
- if (err <= 0)
- break;
+ rcu_read_lock();
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+ hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ if (idx > s_idx)
+ s_ip_idx = 0;
+ ip_idx = 0;
+ if ((idev = __in6_dev_get(dev)) == NULL)
+ goto cont;
+
+ if (in6_dump_addrs(idev, skb, cb, type,
+ s_ip_idx, &ip_idx) <= 0)
+ goto done;
cont:
- idx++;
+ idx++;
+ }
}
- cb->args[0] = idx;
- cb->args[1] = ip_idx;
+done:
+ rcu_read_unlock();
+ cb->args[0] = h;
+ cb->args[1] = idx;
+ cb->args[2] = ip_idx;
+
return skb->len;
}
@@ -3827,28 +3852,39 @@ nla_put_failure:
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- int idx, err;
- int s_idx = cb->args[0];
+ int h, s_h;
+ int idx = 0, s_idx;
struct net_device *dev;
struct inet6_dev *idev;
+ struct hlist_head *head;
+ struct hlist_node *node;
- read_lock(&dev_base_lock);
- idx = 0;
- for_each_netdev(net, dev) {
- if (idx < s_idx)
- goto cont;
- if ((idev = in6_dev_get(dev)) == NULL)
- goto cont;
- err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWLINK, NLM_F_MULTI);
- in6_dev_put(idev);
- if (err <= 0)
- break;
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+
+ rcu_read_lock();
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+ hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ idev = __in6_dev_get(dev);
+ if (!idev)
+ goto cont;
+ if (inet6_fill_ifinfo(skb, idev,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWLINK, NLM_F_MULTI) <= 0)
+ goto out;
cont:
- idx++;
+ idx++;
+ }
}
- read_unlock(&dev_base_lock);
- cb->args[0] = idx;
+out:
+ rcu_read_unlock();
+ cb->args[1] = idx;
+ cb->args[0] = h;
return skb->len;
}
@@ -4052,9 +4088,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
struct net_device *dev;
struct inet6_dev *idev;
- read_lock(&dev_base_lock);
- for_each_netdev(net, dev) {
- rcu_read_lock();
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -4062,9 +4097,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
if (changed)
dev_disable_change(idev);
}
- rcu_read_unlock();
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
@@ -4464,7 +4498,7 @@ static int addrconf_init_net(struct net *net)
all = &ipv6_devconf;
dflt = &ipv6_devconf_dflt;
- if (net != &init_net) {
+ if (!net_eq(net, &init_net)) {
all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
if (all == NULL)
goto err_alloc_all;
@@ -4512,7 +4546,7 @@ static void addrconf_exit_net(struct net *net)
__addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
__addrconf_sysctl_unregister(net->ipv6.devconf_all);
#endif
- if (net != &init_net) {
+ if (!net_eq(net, &init_net)) {
kfree(net->ipv6.devconf_dflt);
kfree(net->ipv6.devconf_all);
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b6d058818673..12e69d364dd5 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -95,7 +95,8 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
}
-static int inet6_create(struct net *net, struct socket *sock, int protocol)
+static int inet6_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct inet_sock *inet;
struct ipv6_pinfo *np;
@@ -158,7 +159,7 @@ lookup_protocol:
}
err = -EPERM;
- if (answer->capability > 0 && !capable(answer->capability))
+ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
goto out_rcu_unlock;
sock->ops = answer->ops;
@@ -314,6 +315,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
+ rcu_read_lock();
if (addr_type & IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
addr->sin6_scope_id) {
@@ -326,12 +328,12 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Binding to link-local address requires an interface */
if (!sk->sk_bound_dev_if) {
err = -EINVAL;
- goto out;
+ goto out_unlock;
}
- dev = dev_get_by_index(net, sk->sk_bound_dev_if);
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
if (!dev) {
err = -ENODEV;
- goto out;
+ goto out_unlock;
}
}
@@ -342,14 +344,11 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
if (!ipv6_chk_addr(net, &addr->sin6_addr,
dev, 0)) {
- if (dev)
- dev_put(dev);
err = -EADDRNOTAVAIL;
- goto out;
+ goto out_unlock;
}
}
- if (dev)
- dev_put(dev);
+ rcu_read_unlock();
}
}
@@ -381,6 +380,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
out:
release_sock(sk);
return err;
+out_unlock:
+ rcu_read_unlock();
+ goto out;
}
EXPORT_SYMBOL(inet6_bind);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 0f526f8ea518..c2f300c314be 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -667,7 +667,7 @@ static int ah6_init_state(struct xfrm_state *x)
}
ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
- ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
+ ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 1ae58bec1de0..f1c74c8ef9de 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -404,13 +404,13 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
if (dev)
return ipv6_chk_acast_dev(dev, addr);
- read_lock(&dev_base_lock);
- for_each_netdev(net, dev)
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev)
if (ipv6_chk_acast_dev(dev, addr)) {
found = 1;
break;
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return found;
}
@@ -431,9 +431,9 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
struct net *net = seq_file_net(seq);
state->idev = NULL;
- for_each_netdev(net, state->dev) {
+ for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
- idev = in6_dev_get(state->dev);
+ idev = __in6_dev_get(state->dev);
if (!idev)
continue;
read_lock_bh(&idev->lock);
@@ -443,7 +443,6 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq)
break;
}
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
}
return im;
}
@@ -454,16 +453,15 @@ static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im
im = im->aca_next;
while (!im) {
- if (likely(state->idev != NULL)) {
+ if (likely(state->idev != NULL))
read_unlock_bh(&state->idev->lock);
- in6_dev_put(state->idev);
- }
- state->dev = next_net_device(state->dev);
+
+ state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
break;
}
- state->idev = in6_dev_get(state->dev);
+ state->idev = __in6_dev_get(state->dev);
if (!state->idev)
continue;
read_lock_bh(&state->idev->lock);
@@ -482,29 +480,30 @@ static struct ifacaddr6 *ac6_get_idx(struct seq_file *seq, loff_t pos)
}
static void *ac6_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(dev_base_lock)
+ __acquires(RCU)
{
- read_lock(&dev_base_lock);
+ rcu_read_lock();
return ac6_get_idx(seq, *pos);
}
static void *ac6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct ifacaddr6 *im;
- im = ac6_get_next(seq, v);
+ struct ifacaddr6 *im = ac6_get_next(seq, v);
+
++*pos;
return im;
}
static void ac6_seq_stop(struct seq_file *seq, void *v)
- __releases(dev_base_lock)
+ __releases(RCU)
{
struct ac6_iter_state *state = ac6_seq_private(seq);
+
if (likely(state->idev != NULL)) {
read_unlock_bh(&state->idev->lock);
- in6_dev_put(state->idev);
+ state->idev = NULL;
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static int ac6_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9f70452a69e7..e6f9cdf780fe 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -537,12 +537,17 @@ int datagram_send_ctl(struct net *net,
addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
+ rcu_read_lock();
if (fl->oif) {
- dev = dev_get_by_index(net, fl->oif);
- if (!dev)
+ dev = dev_get_by_index_rcu(net, fl->oif);
+ if (!dev) {
+ rcu_read_unlock();
return -ENODEV;
- } else if (addr_type & IPV6_ADDR_LINKLOCAL)
+ }
+ } else if (addr_type & IPV6_ADDR_LINKLOCAL) {
+ rcu_read_unlock();
return -EINVAL;
+ }
if (addr_type != IPV6_ADDR_ANY) {
int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL;
@@ -553,8 +558,7 @@ int datagram_send_ctl(struct net *net,
ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr);
}
- if (dev)
- dev_put(dev);
+ rcu_read_unlock();
if (err)
goto exit_f;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index af597c73ebe9..668a46b655e6 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -473,7 +473,7 @@ static int esp_init_authenc(struct xfrm_state *x)
}
err = crypto_aead_setauthsize(
- aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
+ aead, x->aalg->alg_trunc_len / 8);
if (err)
goto free_key;
}
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 00a7a5e4ac97..b7aa7c64cc4a 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -264,44 +264,36 @@ static struct fib_rules_ops fib6_rules_ops_template = {
static int fib6_rules_net_init(struct net *net)
{
+ struct fib_rules_ops *ops;
int err = -ENOMEM;
- net->ipv6.fib6_rules_ops = kmemdup(&fib6_rules_ops_template,
- sizeof(*net->ipv6.fib6_rules_ops),
- GFP_KERNEL);
- if (!net->ipv6.fib6_rules_ops)
- goto out;
+ ops = fib_rules_register(&fib6_rules_ops_template, net);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+ net->ipv6.fib6_rules_ops = ops;
- net->ipv6.fib6_rules_ops->fro_net = net;
- INIT_LIST_HEAD(&net->ipv6.fib6_rules_ops->rules_list);
err = fib_default_rule_add(net->ipv6.fib6_rules_ops, 0,
- RT6_TABLE_LOCAL, FIB_RULE_PERMANENT);
+ RT6_TABLE_LOCAL, 0);
if (err)
goto out_fib6_rules_ops;
err = fib_default_rule_add(net->ipv6.fib6_rules_ops,
0x7FFE, RT6_TABLE_MAIN, 0);
if (err)
- goto out_fib6_default_rule_add;
+ goto out_fib6_rules_ops;
- err = fib_rules_register(net->ipv6.fib6_rules_ops);
- if (err)
- goto out_fib6_default_rule_add;
out:
return err;
-out_fib6_default_rule_add:
- fib_rules_cleanup_ops(net->ipv6.fib6_rules_ops);
out_fib6_rules_ops:
- kfree(net->ipv6.fib6_rules_ops);
+ fib_rules_unregister(ops);
goto out;
}
static void fib6_rules_net_exit(struct net *net)
{
fib_rules_unregister(net->ipv6.fib6_rules_ops);
- kfree(net->ipv6.fib6_rules_ops);
}
static struct pernet_operations fib6_rules_net_ops = {
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 7712578bdc66..6e7bffa2205e 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -67,7 +67,7 @@ static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
struct ip6_flowlabel *fl;
for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
- if (fl->label == label && fl->fl_net == net)
+ if (fl->label == label && net_eq(fl->fl_net, net))
return fl;
}
return NULL;
@@ -163,7 +163,8 @@ static void ip6_fl_purge(struct net *net)
struct ip6_flowlabel *fl, **flp;
flp = &fl_ht[i];
while ((fl = *flp) != NULL) {
- if (fl->fl_net == net && atomic_read(&fl->users) == 0) {
+ if (net_eq(fl->fl_net, net) &&
+ atomic_read(&fl->users) == 0) {
*flp = fl->next;
fl_free(fl);
atomic_dec(&fl_size);
@@ -377,8 +378,8 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
goto done;
fl->share = freq->flr_share;
addr_type = ipv6_addr_type(&freq->flr_dst);
- if ((addr_type&IPV6_ADDR_MAPPED)
- || addr_type == IPV6_ADDR_ANY) {
+ if ((addr_type & IPV6_ADDR_MAPPED) ||
+ addr_type == IPV6_ADDR_ANY) {
err = -EINVAL;
goto done;
}
@@ -421,8 +422,8 @@ static int mem_check(struct sock *sk)
if (room <= 0 ||
((count >= FL_MAX_PER_SOCK ||
- (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4)
- && !capable(CAP_NET_ADMIN)))
+ (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
+ !capable(CAP_NET_ADMIN)))
return -ENOBUFS;
return 0;
@@ -630,7 +631,7 @@ static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
fl = fl_ht[state->bucket];
- while (fl && fl->fl_net != net)
+ while (fl && !net_eq(fl->fl_net, net))
fl = fl->next;
if (fl)
break;
@@ -645,7 +646,7 @@ static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flo
fl = fl->next;
try_again:
- while (fl && fl->fl_net != net)
+ while (fl && !net_eq(fl->fl_net, net))
fl = fl->next;
while (!fl) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6c1b5c98e818..d453d07b0dfe 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -78,7 +78,7 @@ static void ip6_fb_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_setup(struct net_device *dev);
-static int ip6_tnl_net_id;
+static int ip6_tnl_net_id __read_mostly;
struct ip6_tnl_net {
/* the IPv6 tunnel fallback device */
struct net_device *fb_tnl_dev;
@@ -658,6 +658,7 @@ static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
IP6_ECN_set_ce(ipv6_hdr(skb));
}
+/* called with rcu_read_lock() */
static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
{
struct ip6_tnl_parm *p = &t->parms;
@@ -668,15 +669,13 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
struct net_device *ldev = NULL;
if (p->link)
- ldev = dev_get_by_index(net, p->link);
+ ldev = dev_get_by_index_rcu(net, p->link);
if ((ipv6_addr_is_multicast(&p->laddr) ||
likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) &&
likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0)))
ret = 1;
- if (ldev)
- dev_put(ldev);
}
return ret;
}
@@ -804,8 +803,9 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
if (p->flags & IP6_TNL_F_CAP_XMIT) {
struct net_device *ldev = NULL;
+ rcu_read_lock();
if (p->link)
- ldev = dev_get_by_index(net, p->link);
+ ldev = dev_get_by_index_rcu(net, p->link);
if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
printk(KERN_WARNING
@@ -819,8 +819,7 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
p->name);
else
ret = 1;
- if (ldev)
- dev_put(ldev);
+ rcu_read_unlock();
}
return ret;
}
@@ -1410,17 +1409,8 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
static int ip6_tnl_init_net(struct net *net)
{
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
int err;
- struct ip6_tnl_net *ip6n;
-
- err = -ENOMEM;
- ip6n = kzalloc(sizeof(struct ip6_tnl_net), GFP_KERNEL);
- if (ip6n == NULL)
- goto err_alloc;
-
- err = net_assign_generic(net, ip6_tnl_net_id, ip6n);
- if (err < 0)
- goto err_assign;
ip6n->tnls[0] = ip6n->tnls_wc;
ip6n->tnls[1] = ip6n->tnls_r_l;
@@ -1443,27 +1433,23 @@ static int ip6_tnl_init_net(struct net *net)
err_register:
free_netdev(ip6n->fb_tnl_dev);
err_alloc_dev:
- /* nothing */
-err_assign:
- kfree(ip6n);
-err_alloc:
return err;
}
static void ip6_tnl_exit_net(struct net *net)
{
- struct ip6_tnl_net *ip6n;
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- ip6n = net_generic(net, ip6_tnl_net_id);
rtnl_lock();
ip6_tnl_destroy_tunnels(ip6n);
rtnl_unlock();
- kfree(ip6n);
}
static struct pernet_operations ip6_tnl_net_ops = {
.init = ip6_tnl_init_net,
.exit = ip6_tnl_exit_net,
+ .id = &ip6_tnl_net_id,
+ .size = sizeof(struct ip6_tnl_net),
};
/**
@@ -1488,7 +1474,7 @@ static int __init ip6_tunnel_init(void)
goto unreg_ip4ip6;
}
- err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops);
+ err = register_pernet_device(&ip6_tnl_net_ops);
if (err < 0)
goto err_pernet;
return 0;
@@ -1512,7 +1498,7 @@ static void __exit ip6_tunnel_cleanup(void)
if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
- unregister_pernet_gen_device(ip6_tnl_net_id, &ip6_tnl_net_ops);
+ unregister_pernet_device(&ip6_tnl_net_ops);
}
module_init(ip6_tunnel_init);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index f9fcf690bd5d..1f9c44442e65 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2375,9 +2375,9 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
struct net *net = seq_file_net(seq);
state->idev = NULL;
- for_each_netdev(net, state->dev) {
+ for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
- idev = in6_dev_get(state->dev);
+ idev = __in6_dev_get(state->dev);
if (!idev)
continue;
read_lock_bh(&idev->lock);
@@ -2387,7 +2387,6 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
break;
}
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
}
return im;
}
@@ -2398,16 +2397,15 @@ static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr
im = im->next;
while (!im) {
- if (likely(state->idev != NULL)) {
+ if (likely(state->idev != NULL))
read_unlock_bh(&state->idev->lock);
- in6_dev_put(state->idev);
- }
- state->dev = next_net_device(state->dev);
+
+ state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
break;
}
- state->idev = in6_dev_get(state->dev);
+ state->idev = __in6_dev_get(state->dev);
if (!state->idev)
continue;
read_lock_bh(&state->idev->lock);
@@ -2426,31 +2424,31 @@ static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
}
static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(dev_base_lock)
+ __acquires(RCU)
{
- read_lock(&dev_base_lock);
+ rcu_read_lock();
return igmp6_mc_get_idx(seq, *pos);
}
static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct ifmcaddr6 *im;
- im = igmp6_mc_get_next(seq, v);
+ struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
+
++*pos;
return im;
}
static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
- __releases(dev_base_lock)
+ __releases(RCU)
{
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
+
if (likely(state->idev != NULL)) {
read_unlock_bh(&state->idev->lock);
- in6_dev_put(state->idev);
state->idev = NULL;
}
state->dev = NULL;
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
@@ -2507,9 +2505,9 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
state->idev = NULL;
state->im = NULL;
- for_each_netdev(net, state->dev) {
+ for_each_netdev_rcu(net, state->dev) {
struct inet6_dev *idev;
- idev = in6_dev_get(state->dev);
+ idev = __in6_dev_get(state->dev);
if (unlikely(idev == NULL))
continue;
read_lock_bh(&idev->lock);
@@ -2525,7 +2523,6 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
spin_unlock_bh(&im->mca_lock);
}
read_unlock_bh(&idev->lock);
- in6_dev_put(idev);
}
return psf;
}
@@ -2539,16 +2536,15 @@ static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_s
spin_unlock_bh(&state->im->mca_lock);
state->im = state->im->next;
while (!state->im) {
- if (likely(state->idev != NULL)) {
+ if (likely(state->idev != NULL))
read_unlock_bh(&state->idev->lock);
- in6_dev_put(state->idev);
- }
- state->dev = next_net_device(state->dev);
+
+ state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
goto out;
}
- state->idev = in6_dev_get(state->dev);
+ state->idev = __in6_dev_get(state->dev);
if (!state->idev)
continue;
read_lock_bh(&state->idev->lock);
@@ -2573,9 +2569,9 @@ static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
}
static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(dev_base_lock)
+ __acquires(RCU)
{
- read_lock(&dev_base_lock);
+ rcu_read_lock();
return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
@@ -2591,7 +2587,7 @@ static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
- __releases(dev_base_lock)
+ __releases(RCU)
{
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
if (likely(state->im != NULL)) {
@@ -2600,11 +2596,10 @@ static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
}
if (likely(state->idev != NULL)) {
read_unlock_bh(&state->idev->lock);
- in6_dev_put(state->idev);
state->idev = NULL;
}
state->dev = NULL;
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 47a3623e7119..db4d5725cce8 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -501,7 +501,7 @@ ipq_rcv_nl_event(struct notifier_block *this,
if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
write_lock_bh(&queue_lock);
- if ((n->net == &init_net) && (n->pid == peer_pid))
+ if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
__ipq_reset();
write_unlock_bh(&queue_lock);
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index cb834ab7f071..926ce8eeffaf 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -249,7 +249,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* Raw sockets are IPv6 only */
if (addr_type == IPV6_ADDR_MAPPED)
- return(-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
lock_sock(sk);
@@ -257,6 +257,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (sk->sk_state != TCP_CLOSE)
goto out;
+ rcu_read_lock();
/* Check if the address belongs to the host. */
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
@@ -272,13 +273,13 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* Binding to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
- goto out;
+ goto out_unlock;
- dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
- if (!dev) {
- err = -ENODEV;
- goto out;
- }
+ err = -ENODEV;
+ dev = dev_get_by_index_rcu(sock_net(sk),
+ sk->sk_bound_dev_if);
+ if (!dev)
+ goto out_unlock;
}
/* ipv4 addr of the socket is invalid. Only the
@@ -289,13 +290,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
- if (dev)
- dev_put(dev);
- goto out;
+ goto out_unlock;
}
}
- if (dev)
- dev_put(dev);
}
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
@@ -303,6 +300,8 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (!(addr_type & IPV6_ADDR_MULTICAST))
ipv6_addr_copy(&np->saddr, &addr->sin6_addr);
err = 0;
+out_unlock:
+ rcu_read_unlock();
out:
release_sock(sk);
return err;
@@ -1336,7 +1335,6 @@ static struct inet_protosw rawv6_protosw = {
.protocol = IPPROTO_IP, /* wild card */
.prot = &rawv6_prot,
.ops = &inet6_sockraw_ops,
- .capability = CAP_NET_RAW,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
};
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index da5bd0ed83df..45efc39753e2 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -208,18 +208,17 @@ static void ip6_frag_expire(unsigned long data)
fq_kill(fq);
net = container_of(fq->q.net, struct net, ipv6.frags);
- dev = dev_get_by_index(net, fq->iif);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(net, fq->iif);
if (!dev)
- goto out;
+ goto out_rcu_unlock;
- rcu_read_lock();
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
- rcu_read_unlock();
/* Don't send error if the first segment did not arrive. */
if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
- goto out;
+ goto out_rcu_unlock;
/*
But use as source device on which LAST ARRIVED
@@ -228,9 +227,9 @@ static void ip6_frag_expire(unsigned long data)
*/
fq->q.fragments->dev = dev;
icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
+out_rcu_unlock:
+ rcu_read_unlock();
out:
- if (dev)
- dev_put(dev);
spin_unlock(&fq->q.lock);
fq_put(fq);
}
@@ -682,7 +681,7 @@ static int ip6_frags_ns_sysctl_register(struct net *net)
struct ctl_table_header *hdr;
table = ip6_frags_ns_ctl_table;
- if (net != &init_net) {
+ if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
if (table == NULL)
goto err_alloc;
@@ -700,7 +699,7 @@ static int ip6_frags_ns_sysctl_register(struct net *net)
return 0;
err_reg:
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 2362a3397e91..976e68244b99 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -66,7 +66,7 @@ static void ipip6_fb_tunnel_init(struct net_device *dev);
static void ipip6_tunnel_init(struct net_device *dev);
static void ipip6_tunnel_setup(struct net_device *dev);
-static int sit_net_id;
+static int sit_net_id __read_mostly;
struct sit_net {
struct ip_tunnel *tunnels_r_l[HASH_SIZE];
struct ip_tunnel *tunnels_r[HASH_SIZE];
@@ -637,6 +637,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct iphdr *tiph = &tunnel->parms.iph;
struct ipv6hdr *iph6 = ipv6_hdr(skb);
u8 tos = tunnel->parms.iph.tos;
+ __be16 df = tiph->frag_off;
struct rtable *rt; /* Route to the other host */
struct net_device *tdev; /* Device to other host */
struct iphdr *iph; /* Our new IP header */
@@ -726,25 +727,28 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
goto tx_error;
}
- if (tiph->frag_off)
+ if (df) {
mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
- else
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
- if (mtu < 68) {
- stats->collisions++;
- ip_rt_put(rt);
- goto tx_error;
- }
- if (mtu < IPV6_MIN_MTU)
- mtu = IPV6_MIN_MTU;
- if (tunnel->parms.iph.daddr && skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ if (mtu < 68) {
+ stats->collisions++;
+ ip_rt_put(rt);
+ goto tx_error;
+ }
- if (skb->len > mtu) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
- ip_rt_put(rt);
- goto tx_error;
+ if (mtu < IPV6_MIN_MTU) {
+ mtu = IPV6_MIN_MTU;
+ df = 0;
+ }
+
+ if (tunnel->parms.iph.daddr && skb_dst(skb))
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+
+ if (skb->len > mtu) {
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ ip_rt_put(rt);
+ goto tx_error;
+ }
}
if (tunnel->err_count > 0) {
@@ -792,11 +796,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = sizeof(struct iphdr)>>2;
- if (mtu > IPV6_MIN_MTU)
- iph->frag_off = tiph->frag_off;
- else
- iph->frag_off = 0;
-
+ iph->frag_off = df;
iph->protocol = IPPROTO_IPV6;
iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
iph->daddr = rt->rt_dst;
@@ -1164,17 +1164,8 @@ static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
static int sit_init_net(struct net *net)
{
+ struct sit_net *sitn = net_generic(net, sit_net_id);
int err;
- struct sit_net *sitn;
-
- err = -ENOMEM;
- sitn = kzalloc(sizeof(struct sit_net), GFP_KERNEL);
- if (sitn == NULL)
- goto err_alloc;
-
- err = net_assign_generic(net, sit_net_id, sitn);
- if (err < 0)
- goto err_assign;
sitn->tunnels[0] = sitn->tunnels_wc;
sitn->tunnels[1] = sitn->tunnels_l;
@@ -1201,37 +1192,33 @@ err_reg_dev:
dev_put(sitn->fb_tunnel_dev);
free_netdev(sitn->fb_tunnel_dev);
err_alloc_dev:
- /* nothing */
-err_assign:
- kfree(sitn);
-err_alloc:
return err;
}
static void sit_exit_net(struct net *net)
{
- struct sit_net *sitn;
+ struct sit_net *sitn = net_generic(net, sit_net_id);
LIST_HEAD(list);
- sitn = net_generic(net, sit_net_id);
rtnl_lock();
sit_destroy_tunnels(sitn, &list);
unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
unregister_netdevice_many(&list);
rtnl_unlock();
- kfree(sitn);
}
static struct pernet_operations sit_net_ops = {
.init = sit_init_net,
.exit = sit_exit_net,
+ .id = &sit_net_id,
+ .size = sizeof(struct sit_net),
};
static void __exit sit_cleanup(void)
{
xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
- unregister_pernet_gen_device(sit_net_id, &sit_net_ops);
+ unregister_pernet_device(&sit_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
@@ -1246,7 +1233,7 @@ static int __init sit_init(void)
return -EAGAIN;
}
- err = register_pernet_gen_device(&sit_net_id, &sit_net_ops);
+ err = register_pernet_device(&sit_net_ops);
if (err < 0)
xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 612fc53e0bb9..5b9af508b8f2 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -159,6 +159,8 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
{
+ struct tcp_options_received tcp_opt;
+ u8 *hash_location;
struct inet_request_sock *ireq;
struct inet6_request_sock *ireq6;
struct tcp_request_sock *treq;
@@ -171,7 +173,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
int mss;
struct dst_entry *dst;
__u8 rcv_wscale;
- struct tcp_options_received tcp_opt;
if (!sysctl_tcp_syncookies || !th->ack)
goto out;
@@ -254,7 +255,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, 0, dst);
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0, dst);
if (tcp_opt.saw_tstamp)
cookie_check_timestamp(&tcp_opt);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 34925f089e07..aadd7cef73b3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -461,7 +461,8 @@ out:
}
-static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
+static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
+ struct request_values *rvp)
{
struct inet6_request_sock *treq = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -499,7 +500,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
goto done;
- skb = tcp_make_synack(sk, dst, req);
+ skb = tcp_make_synack(sk, dst, req, rvp);
if (skb) {
struct tcphdr *th = tcp_hdr(skb);
@@ -1161,13 +1162,15 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
*/
static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
+ struct tcp_extend_values tmp_ext;
+ struct tcp_options_received tmp_opt;
+ u8 *hash_location;
+ struct request_sock *req;
struct inet6_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
- struct tcp_options_received tmp_opt;
struct tcp_sock *tp = tcp_sk(sk);
- struct request_sock *req = NULL;
- __u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = __sk_dst_get(sk);
+ __u32 isn = TCP_SKB_CB(skb)->when;
#ifdef CONFIG_SYN_COOKIES
int want_cookie = 0;
#else
@@ -1205,8 +1208,52 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, dst);
+
+ if (tmp_opt.cookie_plus > 0 &&
+ tmp_opt.saw_tstamp &&
+ !tp->rx_opt.cookie_out_never &&
+ (sysctl_tcp_cookie_size > 0 ||
+ (tp->cookie_values != NULL &&
+ tp->cookie_values->cookie_desired > 0))) {
+ u8 *c;
+ u32 *d;
+ u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
+ int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
+
+ if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
+ goto drop_and_free;
+
+ /* Secret recipe starts with IP addresses */
+ d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
+ *mess++ ^= *d++;
+ *mess++ ^= *d++;
+ *mess++ ^= *d++;
+ *mess++ ^= *d++;
+ d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
+ *mess++ ^= *d++;
+ *mess++ ^= *d++;
+ *mess++ ^= *d++;
+ *mess++ ^= *d++;
+
+ /* plus variable length Initiator Cookie */
+ c = (u8 *)mess;
+ while (l-- > 0)
+ *c++ ^= *hash_location++;
- tcp_parse_options(skb, &tmp_opt, 0, dst);
+#ifdef CONFIG_SYN_COOKIES
+ want_cookie = 0; /* not our kind of cookie */
+#endif
+ tmp_ext.cookie_out_never = 0; /* false */
+ tmp_ext.cookie_plus = tmp_opt.cookie_plus;
+ } else if (!tp->rx_opt.cookie_in_always) {
+ /* redundant indications, but ensure initialization. */
+ tmp_ext.cookie_out_never = 1; /* true */
+ tmp_ext.cookie_plus = 0;
+ } else {
+ goto drop_and_free;
+ }
+ tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
if (want_cookie && !tmp_opt.saw_tstamp)
tcp_clear_options(&tmp_opt);
@@ -1239,23 +1286,21 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
isn = tcp_v6_init_sequence(skb);
}
-
tcp_rsk(req)->snt_isn = isn;
security_inet_conn_request(sk, skb, req);
- if (tcp_v6_send_synack(sk, req))
- goto drop;
+ if (tcp_v6_send_synack(sk, req,
+ (struct request_values *)&tmp_ext) ||
+ want_cookie)
+ goto drop_and_free;
- if (!want_cookie) {
- inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
- return 0;
- }
+ inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+ return 0;
+drop_and_free:
+ reqsk_free(req);
drop:
- if (req)
- reqsk_free(req);
-
return 0; /* don't send reset */
}
@@ -1851,7 +1896,7 @@ static int tcp_v6_init_sock(struct sock *sk)
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
- tp->mss_cache = 536;
+ tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sysctl_tcp_reordering;
@@ -1867,6 +1912,19 @@ static int tcp_v6_init_sock(struct sock *sk)
tp->af_specific = &tcp_sock_ipv6_specific;
#endif
+ /* TCP Cookie Transactions */
+ if (sysctl_tcp_cookie_size > 0) {
+ /* Default, cookies without s_data_payload. */
+ tp->cookie_values =
+ kzalloc(sizeof(*tp->cookie_values),
+ sk->sk_allocation);
+ if (tp->cookie_values != NULL)
+ kref_init(&tp->cookie_values->kref);
+ }
+ /* Presumed zeroed, in order of appearance:
+ * cookie_in_always, cookie_out_never,
+ * s_data_constant, s_data_in, s_data_out
+ */
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -2112,7 +2170,6 @@ static struct inet_protosw tcpv6_protosw = {
.protocol = IPPROTO_TCP,
.prot = &tcpv6_prot,
.ops = &inet6_stream_ops,
- .capability = -1,
.no_check = 0,
.flags = INET_PROTOSW_PERMANENT |
INET_PROTOSW_ICSK,
@@ -2127,12 +2184,17 @@ static int tcpv6_net_init(struct net *net)
static void tcpv6_net_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv6.tcp_sk);
- inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET6);
+}
+
+static void tcpv6_net_exit_batch(struct list_head *net_exit_list)
+{
+ inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
}
static struct pernet_operations tcpv6_net_ops = {
- .init = tcpv6_net_init,
- .exit = tcpv6_net_exit,
+ .init = tcpv6_net_init,
+ .exit = tcpv6_net_exit,
+ .exit_batch = tcpv6_net_exit_batch,
};
int __init tcpv6_init(void)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d3b59d73f507..69ebdbe78c47 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -81,9 +81,33 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
return 0;
}
+static unsigned int udp6_portaddr_hash(struct net *net,
+ const struct in6_addr *addr6,
+ unsigned int port)
+{
+ unsigned int hash, mix = net_hash_mix(net);
+
+ if (ipv6_addr_any(addr6))
+ hash = jhash_1word(0, mix);
+ else if (ipv6_addr_v4mapped(addr6))
+ hash = jhash_1word(addr6->s6_addr32[3], mix);
+ else
+ hash = jhash2(addr6->s6_addr32, 4, mix);
+
+ return hash ^ port;
+}
+
+
int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
- return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal);
+ unsigned int hash2_nulladdr =
+ udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
+ unsigned int hash2_partial =
+ udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
+
+ /* precompute partial secondary hash */
+ udp_sk(sk)->udp_portaddr_hash = hash2_partial;
+ return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
static inline int compute_score(struct sock *sk, struct net *net,
@@ -94,7 +118,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
{
int score = -1;
- if (net_eq(sock_net(sk), net) && sk->sk_hash == hnum &&
+ if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
@@ -124,6 +148,86 @@ static inline int compute_score(struct sock *sk, struct net *net,
return score;
}
+#define SCORE2_MAX (1 + 1 + 1)
+static inline int compute_score2(struct sock *sk, struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, unsigned short hnum,
+ int dif)
+{
+ int score = -1;
+
+ if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
+ sk->sk_family == PF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+ return -1;
+ score = 0;
+ if (inet->inet_dport) {
+ if (inet->inet_dport != sport)
+ return -1;
+ score++;
+ }
+ if (!ipv6_addr_any(&np->daddr)) {
+ if (!ipv6_addr_equal(&np->daddr, saddr))
+ return -1;
+ score++;
+ }
+ if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if != dif)
+ return -1;
+ score++;
+ }
+ }
+ return score;
+}
+
+
+/* called with read_rcu_lock() */
+static struct sock *udp6_lib_lookup2(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, unsigned int hnum, int dif,
+ struct udp_hslot *hslot2, unsigned int slot2)
+{
+ struct sock *sk, *result;
+ struct hlist_nulls_node *node;
+ int score, badness;
+
+begin:
+ result = NULL;
+ badness = -1;
+ udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+ score = compute_score2(sk, net, saddr, sport,
+ daddr, hnum, dif);
+ if (score > badness) {
+ result = sk;
+ badness = score;
+ if (score == SCORE2_MAX)
+ goto exact_match;
+ }
+ }
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (get_nulls_value(node) != slot2)
+ goto begin;
+
+ if (result) {
+exact_match:
+ if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+ result = NULL;
+ else if (unlikely(compute_score2(result, net, saddr, sport,
+ daddr, hnum, dif) < badness)) {
+ sock_put(result);
+ goto begin;
+ }
+ }
+ return result;
+}
+
static struct sock *__udp6_lib_lookup(struct net *net,
struct in6_addr *saddr, __be16 sport,
struct in6_addr *daddr, __be16 dport,
@@ -132,11 +236,35 @@ static struct sock *__udp6_lib_lookup(struct net *net,
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
- unsigned int hash = udp_hashfn(net, hnum, udptable->mask);
- struct udp_hslot *hslot = &udptable->hash[hash];
+ unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
+ struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness;
rcu_read_lock();
+ if (hslot->count > 10) {
+ hash2 = udp6_portaddr_hash(net, daddr, hnum);
+ slot2 = hash2 & udptable->mask;
+ hslot2 = &udptable->hash2[slot2];
+ if (hslot->count < hslot2->count)
+ goto begin;
+
+ result = udp6_lib_lookup2(net, saddr, sport,
+ daddr, hnum, dif,
+ hslot2, slot2);
+ if (!result) {
+ hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
+ slot2 = hash2 & udptable->mask;
+ hslot2 = &udptable->hash2[slot2];
+ if (hslot->count < hslot2->count)
+ goto begin;
+
+ result = udp6_lib_lookup2(net, &in6addr_any, sport,
+ daddr, hnum, dif,
+ hslot2, slot2);
+ }
+ rcu_read_unlock();
+ return result;
+ }
begin:
result = NULL;
badness = -1;
@@ -152,7 +280,7 @@ begin:
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
- if (get_nulls_value(node) != hash)
+ if (get_nulls_value(node) != slot)
goto begin;
if (result) {
@@ -288,9 +416,7 @@ try_again:
err = ulen;
out_free:
- lock_sock(sk);
- skb_free_datagram(sk, skb);
- release_sock(sk);
+ skb_free_datagram_locked(sk, skb);
out:
return err;
@@ -417,7 +543,8 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
if (!net_eq(sock_net(s), net))
continue;
- if (s->sk_hash == num && s->sk_family == PF_INET6) {
+ if (udp_sk(s)->udp_port_hash == num &&
+ s->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(s);
if (inet->inet_dport) {
if (inet->inet_dport != rmt_port)
@@ -442,6 +569,33 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
return NULL;
}
+static void flush_stack(struct sock **stack, unsigned int count,
+ struct sk_buff *skb, unsigned int final)
+{
+ unsigned int i;
+ struct sock *sk;
+ struct sk_buff *skb1;
+
+ for (i = 0; i < count; i++) {
+ skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
+
+ sk = stack[i];
+ if (skb1) {
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk))
+ udpv6_queue_rcv_skb(sk, skb1);
+ else
+ sk_add_backlog(sk, skb1);
+ bh_unlock_sock(sk);
+ } else {
+ atomic_inc(&sk->sk_drops);
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
+ UDP6_INC_STATS_BH(sock_net(sk),
+ UDP_MIB_INERRORS, IS_UDPLITE(sk));
+ }
+ }
+}
/*
* Note: called only from the BH handler context,
* so we don't need to lock the hashes.
@@ -450,41 +604,43 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct in6_addr *saddr, struct in6_addr *daddr,
struct udp_table *udptable)
{
- struct sock *sk, *sk2;
+ struct sock *sk, *stack[256 / sizeof(struct sock *)];
const struct udphdr *uh = udp_hdr(skb);
struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
int dif;
+ unsigned int i, count = 0;
spin_lock(&hslot->lock);
sk = sk_nulls_head(&hslot->head);
dif = inet6_iif(skb);
sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
- if (!sk) {
- kfree_skb(skb);
- goto out;
- }
-
- sk2 = sk;
- while ((sk2 = udp_v6_mcast_next(net, sk_nulls_next(sk2), uh->dest, daddr,
- uh->source, saddr, dif))) {
- struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
- if (buff) {
- bh_lock_sock(sk2);
- if (!sock_owned_by_user(sk2))
- udpv6_queue_rcv_skb(sk2, buff);
- else
- sk_add_backlog(sk2, buff);
- bh_unlock_sock(sk2);
+ while (sk) {
+ stack[count++] = sk;
+ sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
+ uh->source, saddr, dif);
+ if (unlikely(count == ARRAY_SIZE(stack))) {
+ if (!sk)
+ break;
+ flush_stack(stack, count, skb, ~0);
+ count = 0;
}
}
- bh_lock_sock(sk);
- if (!sock_owned_by_user(sk))
- udpv6_queue_rcv_skb(sk, skb);
- else
- sk_add_backlog(sk, skb);
- bh_unlock_sock(sk);
-out:
+ /*
+ * before releasing the lock, we must take reference on sockets
+ */
+ for (i = 0; i < count; i++)
+ sock_hold(stack[i]);
+
spin_unlock(&hslot->lock);
+
+ if (count) {
+ flush_stack(stack, count, skb, count - 1);
+
+ for (i = 0; i < count; i++)
+ sock_put(stack[i]);
+ } else {
+ kfree_skb(skb);
+ }
return 0;
}
@@ -1286,7 +1442,6 @@ static struct inet_protosw udpv6_protosw = {
.protocol = IPPROTO_UDP,
.prot = &udpv6_prot,
.ops = &inet6_dgram_ops,
- .capability =-1,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_PERMANENT,
};
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index d737a27ee010..6ea6938919e6 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -62,7 +62,6 @@ static struct inet_protosw udplite6_protosw = {
.protocol = IPPROTO_UDPLITE,
.prot = &udplitev6_prot,
.ops = &inet6_dgram_ops,
- .capability = -1,
.no_check = 0,
.flags = INET_PROTOSW_PERMANENT,
};
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 6481ee4bdf72..f9759b54a6de 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1298,6 +1298,7 @@ static int ipx_setsockopt(struct socket *sock, int level, int optname,
int opt;
int rc = -EINVAL;
+ lock_kernel();
if (optlen != sizeof(int))
goto out;
@@ -1312,6 +1313,7 @@ static int ipx_setsockopt(struct socket *sock, int level, int optname,
ipx_sk(sk)->type = opt;
rc = 0;
out:
+ unlock_kernel();
return rc;
}
@@ -1323,6 +1325,7 @@ static int ipx_getsockopt(struct socket *sock, int level, int optname,
int len;
int rc = -ENOPROTOOPT;
+ lock_kernel();
if (!(level == SOL_IPX && optname == IPX_TYPE))
goto out;
@@ -1343,6 +1346,7 @@ static int ipx_getsockopt(struct socket *sock, int level, int optname,
rc = 0;
out:
+ unlock_kernel();
return rc;
}
@@ -1352,12 +1356,13 @@ static struct proto ipx_proto = {
.obj_size = sizeof(struct ipx_sock),
};
-static int ipx_create(struct net *net, struct socket *sock, int protocol)
+static int ipx_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
int rc = -ESOCKTNOSUPPORT;
struct sock *sk;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
/*
@@ -1390,6 +1395,7 @@ static int ipx_release(struct socket *sock)
if (!sk)
goto out;
+ lock_kernel();
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
@@ -1397,6 +1403,7 @@ static int ipx_release(struct socket *sock)
sock->sk = NULL;
sk_refcnt_debug_release(sk);
ipx_destroy_socket(sk);
+ unlock_kernel();
out:
return 0;
}
@@ -1424,7 +1431,8 @@ static __be16 ipx_first_free_socketnum(struct ipx_interface *intrfc)
return htons(socketNum);
}
-static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+static int __ipx_bind(struct socket *sock,
+ struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct ipx_sock *ipxs = ipx_sk(sk);
@@ -1519,6 +1527,17 @@ out:
return rc;
}
+static int ipx_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+ int rc;
+
+ lock_kernel();
+ rc = __ipx_bind(sock, uaddr, addr_len);
+ unlock_kernel();
+
+ return rc;
+}
+
static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
@@ -1531,6 +1550,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
+ lock_kernel();
if (addr_len != sizeof(*addr))
goto out;
addr = (struct sockaddr_ipx *)uaddr;
@@ -1550,7 +1570,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
IPX_NODE_LEN);
#endif /* CONFIG_IPX_INTERN */
- rc = ipx_bind(sock, (struct sockaddr *)&uaddr,
+ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
if (rc)
goto out;
@@ -1577,6 +1597,7 @@ static int ipx_connect(struct socket *sock, struct sockaddr *uaddr,
ipxrtr_put(rt);
rc = 0;
out:
+ unlock_kernel();
return rc;
}
@@ -1592,6 +1613,7 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
*uaddr_len = sizeof(struct sockaddr_ipx);
+ lock_kernel();
if (peer) {
rc = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
@@ -1626,6 +1648,19 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
rc = 0;
out:
+ unlock_kernel();
+ return rc;
+}
+
+static unsigned int ipx_datagram_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ int rc;
+
+ lock_kernel();
+ rc = datagram_poll(file, sock, wait);
+ unlock_kernel();
+
return rc;
}
@@ -1700,6 +1735,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
int rc = -EINVAL;
int flags = msg->msg_flags;
+ lock_kernel();
/* Socket gets bound below anyway */
/* if (sk->sk_zapped)
return -EIO; */ /* Socket not bound */
@@ -1723,7 +1759,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
memcpy(uaddr.sipx_node, ipxs->intrfc->if_node,
IPX_NODE_LEN);
#endif
- rc = ipx_bind(sock, (struct sockaddr *)&uaddr,
+ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
if (rc)
goto out;
@@ -1751,6 +1787,7 @@ static int ipx_sendmsg(struct kiocb *iocb, struct socket *sock,
if (rc >= 0)
rc = len;
out:
+ unlock_kernel();
return rc;
}
@@ -1765,6 +1802,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int copied, rc;
+ lock_kernel();
/* put the autobinding in */
if (!ipxs->port) {
struct sockaddr_ipx uaddr;
@@ -1779,7 +1817,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
memcpy(uaddr.sipx_node, ipxs->intrfc->if_node, IPX_NODE_LEN);
#endif /* CONFIG_IPX_INTERN */
- rc = ipx_bind(sock, (struct sockaddr *)&uaddr,
+ rc = __ipx_bind(sock, (struct sockaddr *)&uaddr,
sizeof(struct sockaddr_ipx));
if (rc)
goto out;
@@ -1823,6 +1861,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
out_free:
skb_free_datagram(sk, skb);
out:
+ unlock_kernel();
return rc;
}
@@ -1834,6 +1873,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
+ lock_kernel();
switch (cmd) {
case TIOCOUTQ:
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
@@ -1896,6 +1936,7 @@ static int ipx_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
rc = -ENOIOCTLCMD;
break;
}
+ unlock_kernel();
return rc;
}
@@ -1933,7 +1974,7 @@ static const struct net_proto_family ipx_family_ops = {
.owner = THIS_MODULE,
};
-static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = {
+static const struct proto_ops ipx_dgram_ops = {
.family = PF_IPX,
.owner = THIS_MODULE,
.release = ipx_release,
@@ -1942,7 +1983,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = ipx_getname,
- .poll = datagram_poll,
+ .poll = ipx_datagram_poll,
.ioctl = ipx_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ipx_compat_ioctl,
@@ -1957,8 +1998,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(ipx_dgram_ops) = {
.sendpage = sock_no_sendpage,
};
-SOCKOPS_WRAP(ipx_dgram, PF_IPX);
-
static struct packet_type ipx_8023_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_802_3),
.func = ipx_rcv,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 9429e4002bca..10093aab6173 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -61,7 +61,7 @@
#include <net/irda/af_irda.h>
-static int irda_create(struct net *net, struct socket *sock, int protocol);
+static int irda_create(struct net *net, struct socket *sock, int protocol, int kern);
static const struct proto_ops irda_stream_ops;
static const struct proto_ops irda_seqpacket_ops;
@@ -714,11 +714,14 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
struct sockaddr_irda saddr;
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
+ int err;
+ lock_kernel();
memset(&saddr, 0, sizeof(saddr));
if (peer) {
+ err = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
- return -ENOTCONN;
+ goto out;
saddr.sir_family = AF_IRDA;
saddr.sir_lsap_sel = self->dtsap_sel;
@@ -735,8 +738,10 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
/* uaddr_len come to us uninitialised */
*uaddr_len = sizeof (struct sockaddr_irda);
memcpy(uaddr, &saddr, *uaddr_len);
-
- return 0;
+ err = 0;
+out:
+ unlock_kernel();
+ return err;
}
/*
@@ -748,21 +753,25 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
static int irda_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
+ int err = -EOPNOTSUPP;
IRDA_DEBUG(2, "%s()\n", __func__);
+ lock_kernel();
if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
(sk->sk_type != SOCK_DGRAM))
- return -EOPNOTSUPP;
+ goto out;
if (sk->sk_state != TCP_LISTEN) {
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
- return 0;
+ err = 0;
}
+out:
+ unlock_kernel();
- return -EOPNOTSUPP;
+ return err;
}
/*
@@ -783,36 +792,40 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len != sizeof(struct sockaddr_irda))
return -EINVAL;
+ lock_kernel();
#ifdef CONFIG_IRDA_ULTRA
/* Special care for Ultra sockets */
if ((sk->sk_type == SOCK_DGRAM) &&
(sk->sk_protocol == IRDAPROTO_ULTRA)) {
self->pid = addr->sir_lsap_sel;
+ err = -EOPNOTSUPP;
if (self->pid & 0x80) {
IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
- return -EOPNOTSUPP;
+ goto out;
}
err = irda_open_lsap(self, self->pid);
if (err < 0)
- return err;
+ goto out;
/* Pretend we are connected */
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
+ err = 0;
- return 0;
+ goto out;
}
#endif /* CONFIG_IRDA_ULTRA */
self->ias_obj = irias_new_object(addr->sir_name, jiffies);
+ err = -ENOMEM;
if (self->ias_obj == NULL)
- return -ENOMEM;
+ goto out;
err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
if (err < 0) {
kfree(self->ias_obj->name);
kfree(self->ias_obj);
- return err;
+ goto out;
}
/* Register with LM-IAS */
@@ -820,7 +833,10 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
self->stsap_sel, IAS_KERNEL_ATTR);
irias_insert_object(self->ias_obj);
- return 0;
+ err = 0;
+out:
+ unlock_kernel();
+ return err;
}
/*
@@ -839,22 +855,26 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
IRDA_DEBUG(2, "%s()\n", __func__);
- err = irda_create(sock_net(sk), newsock, sk->sk_protocol);
+ lock_kernel();
+ err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
if (err)
- return err;
+ goto out;
+ err = -EINVAL;
if (sock->state != SS_UNCONNECTED)
- return -EINVAL;
+ goto out;
if ((sk = sock->sk) == NULL)
- return -EINVAL;
+ goto out;
+ err = -EOPNOTSUPP;
if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) &&
(sk->sk_type != SOCK_DGRAM))
- return -EOPNOTSUPP;
+ goto out;
+ err = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
- return -EINVAL;
+ goto out;
/*
* The read queue this time is holding sockets ready to use
@@ -875,18 +895,20 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
break;
/* Non blocking operation */
+ err = -EWOULDBLOCK;
if (flags & O_NONBLOCK)
- return -EWOULDBLOCK;
+ goto out;
err = wait_event_interruptible(*(sk->sk_sleep),
skb_peek(&sk->sk_receive_queue));
if (err)
- return err;
+ goto out;
}
newsk = newsock->sk;
+ err = -EIO;
if (newsk == NULL)
- return -EIO;
+ goto out;
newsk->sk_state = TCP_ESTABLISHED;
@@ -894,10 +916,11 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
/* Now attach up the new socket */
new->tsap = irttp_dup(self->tsap, new);
+ err = -EPERM; /* value does not seem to make sense. -arnd */
if (!new->tsap) {
IRDA_DEBUG(0, "%s(), dup failed!\n", __func__);
kfree_skb(skb);
- return -1;
+ goto out;
}
new->stsap_sel = new->tsap->stsap_sel;
@@ -921,8 +944,10 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
newsock->state = SS_CONNECTED;
irda_connect_response(new);
-
- return 0;
+ err = 0;
+out:
+ unlock_kernel();
+ return err;
}
/*
@@ -955,28 +980,34 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
IRDA_DEBUG(2, "%s(%p)\n", __func__, self);
+ lock_kernel();
/* Don't allow connect for Ultra sockets */
+ err = -ESOCKTNOSUPPORT;
if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA))
- return -ESOCKTNOSUPPORT;
+ goto out;
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
- return 0; /* Connect completed during a ERESTARTSYS event */
+ err = 0;
+ goto out; /* Connect completed during a ERESTARTSYS event */
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
- return -ECONNREFUSED;
+ err = -ECONNREFUSED;
+ goto out;
}
+ err = -EISCONN; /* No reconnect on a seqpacket socket */
if (sk->sk_state == TCP_ESTABLISHED)
- return -EISCONN; /* No reconnect on a seqpacket socket */
+ goto out;
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
+ err = -EINVAL;
if (addr_len != sizeof(struct sockaddr_irda))
- return -EINVAL;
+ goto out;
/* Check if user supplied any destination device address */
if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) {
@@ -984,7 +1015,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name);
if (err) {
IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__);
- return err;
+ goto out;
}
} else {
/* Use the one provided by the user */
@@ -1000,7 +1031,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
err = irda_find_lsap_sel(self, addr->sir_name);
if (err) {
IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
- return err;
+ goto out;
}
} else {
/* Directly connect to the remote LSAP
@@ -1025,29 +1056,35 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
self->max_sdu_size_rx, NULL);
if (err) {
IRDA_DEBUG(0, "%s(), connect failed!\n", __func__);
- return err;
+ goto out;
}
/* Now the loop */
+ err = -EINPROGRESS;
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
- return -EINPROGRESS;
+ goto out;
+ err = -ERESTARTSYS;
if (wait_event_interruptible(*(sk->sk_sleep),
(sk->sk_state != TCP_SYN_SENT)))
- return -ERESTARTSYS;
+ goto out;
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk);
- return err? err : -ECONNRESET;
+ if (!err)
+ err = -ECONNRESET;
+ goto out;
}
sock->state = SS_CONNECTED;
/* At this point, IrLMP has assigned our source address */
self->saddr = irttp_get_saddr(self->tsap);
-
- return 0;
+ err = 0;
+out:
+ unlock_kernel();
+ return err;
}
static struct proto irda_proto = {
@@ -1062,7 +1099,8 @@ static struct proto irda_proto = {
* Create IrDA socket
*
*/
-static int irda_create(struct net *net, struct socket *sock, int protocol)
+static int irda_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct irda_sock *self;
@@ -1192,6 +1230,7 @@ static int irda_release(struct socket *sock)
if (sk == NULL)
return 0;
+ lock_kernel();
lock_sock(sk);
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
@@ -1210,6 +1249,7 @@ static int irda_release(struct socket *sock)
/* Destroy networking socket if we are the last reference on it,
* i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */
sock_put(sk);
+ unlock_kernel();
/* Notes on socket locking and deallocation... - Jean II
* In theory we should put pairs of sock_hold() / sock_put() to
@@ -1257,28 +1297,37 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
+ lock_kernel();
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
- MSG_NOSIGNAL))
- return -EINVAL;
+ MSG_NOSIGNAL)) {
+ err = -EINVAL;
+ goto out;
+ }
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto out_err;
- if (sk->sk_state != TCP_ESTABLISHED)
- return -ENOTCONN;
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ err = -ENOTCONN;
+ goto out;
+ }
self = irda_sk(sk);
/* Check if IrTTP is wants us to slow down */
if (wait_event_interruptible(*(sk->sk_sleep),
- (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED)))
- return -ERESTARTSYS;
+ (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
/* Check if we are still connected */
- if (sk->sk_state != TCP_ESTABLISHED)
- return -ENOTCONN;
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ err = -ENOTCONN;
+ goto out;
+ }
/* Check that we don't send out too big frames */
if (len > self->max_data_size) {
@@ -1310,11 +1359,16 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
goto out_err;
}
+
+ unlock_kernel();
/* Tell client how much data we actually sent */
return len;
- out_err:
- return sk_stream_error(sk, msg->msg_flags, err);
+out_err:
+ err = sk_stream_error(sk, msg->msg_flags, err);
+out:
+ unlock_kernel();
+ return err;
}
@@ -1335,13 +1389,14 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
IRDA_DEBUG(4, "%s()\n", __func__);
+ lock_kernel();
if ((err = sock_error(sk)) < 0)
- return err;
+ goto out;
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &err);
if (!skb)
- return err;
+ goto out;
skb_reset_transport_header(skb);
copied = skb->len;
@@ -1369,8 +1424,12 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
irttp_flow_request(self->tsap, FLOW_START);
}
}
-
+ unlock_kernel();
return copied;
+
+out:
+ unlock_kernel();
+ return err;
}
/*
@@ -1388,15 +1447,19 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
IRDA_DEBUG(3, "%s()\n", __func__);
+ lock_kernel();
if ((err = sock_error(sk)) < 0)
- return err;
+ goto out;
+ err = -EINVAL;
if (sock->flags & __SO_ACCEPTCON)
- return(-EINVAL);
+ goto out;
+ err =-EOPNOTSUPP;
if (flags & MSG_OOB)
- return -EOPNOTSUPP;
+ goto out;
+ err = 0;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, noblock);
@@ -1408,7 +1471,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
if (skb == NULL) {
DEFINE_WAIT(wait);
- int ret = 0;
+ err = 0;
if (copied >= target)
break;
@@ -1418,25 +1481,25 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
/*
* POSIX 1003.1g mandates this order.
*/
- ret = sock_error(sk);
- if (ret)
+ err = sock_error(sk);
+ if (err)
;
else if (sk->sk_shutdown & RCV_SHUTDOWN)
;
else if (noblock)
- ret = -EAGAIN;
+ err = -EAGAIN;
else if (signal_pending(current))
- ret = sock_intr_errno(timeo);
+ err = sock_intr_errno(timeo);
else if (sk->sk_state != TCP_ESTABLISHED)
- ret = -ENOTCONN;
+ err = -ENOTCONN;
else if (skb_peek(&sk->sk_receive_queue) == NULL)
/* Wait process until data arrives */
schedule();
finish_wait(sk->sk_sleep, &wait);
- if (ret)
- return ret;
+ if (err)
+ goto out;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
@@ -1489,7 +1552,9 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
}
}
- return copied;
+out:
+ unlock_kernel();
+ return err ? : copied;
}
/*
@@ -1507,18 +1572,23 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int err;
+ lock_kernel();
+
IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
+ err = -EINVAL;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
- return -EINVAL;
+ goto out;
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
- return -EPIPE;
+ err = -EPIPE;
+ goto out;
}
+ err = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
- return -ENOTCONN;
+ goto out;
self = irda_sk(sk);
@@ -1535,8 +1605,9 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
skb = sock_alloc_send_skb(sk, len + self->max_header_size,
msg->msg_flags & MSG_DONTWAIT, &err);
+ err = -ENOBUFS;
if (!skb)
- return -ENOBUFS;
+ goto out;
skb_reserve(skb, self->max_header_size);
skb_reset_transport_header(skb);
@@ -1546,7 +1617,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
if (err) {
kfree_skb(skb);
- return err;
+ goto out;
}
/*
@@ -1556,9 +1627,13 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock,
err = irttp_udata_request(self->tsap, skb);
if (err) {
IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
- return err;
+ goto out;
}
+ unlock_kernel();
return len;
+out:
+ unlock_kernel();
+ return err;
}
/*
@@ -1580,12 +1655,15 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len);
+ lock_kernel();
+ err = -EINVAL;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
- return -EINVAL;
+ goto out;
+ err = -EPIPE;
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
- return -EPIPE;
+ goto out;
}
self = irda_sk(sk);
@@ -1593,16 +1671,18 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
/* Check if an address was specified with sendto. Jean II */
if (msg->msg_name) {
struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name;
+ err = -EINVAL;
/* Check address, extract pid. Jean II */
if (msg->msg_namelen < sizeof(*addr))
- return -EINVAL;
+ goto out;
if (addr->sir_family != AF_IRDA)
- return -EINVAL;
+ goto out;
pid = addr->sir_lsap_sel;
if (pid & 0x80) {
IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
} else {
/* Check that the socket is properly bound to an Ultra
@@ -1611,7 +1691,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
(sk->sk_state != TCP_ESTABLISHED)) {
IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n",
__func__);
- return -ENOTCONN;
+ err = -ENOTCONN;
+ goto out;
}
/* Use PID from socket */
bound = 1;
@@ -1630,8 +1711,9 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
skb = sock_alloc_send_skb(sk, len + self->max_header_size,
msg->msg_flags & MSG_DONTWAIT, &err);
+ err = -ENOBUFS;
if (!skb)
- return -ENOBUFS;
+ goto out;
skb_reserve(skb, self->max_header_size);
skb_reset_transport_header(skb);
@@ -1641,16 +1723,16 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock,
err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
if (err) {
kfree_skb(skb);
- return err;
+ goto out;
}
err = irlmp_connless_data_request((bound ? self->lsap : NULL),
skb, pid);
- if (err) {
+ if (err)
IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err);
- return err;
- }
- return len;
+out:
+ unlock_kernel();
+ return err ? : len;
}
#endif /* CONFIG_IRDA_ULTRA */
@@ -1664,6 +1746,8 @@ static int irda_shutdown(struct socket *sock, int how)
IRDA_DEBUG(1, "%s(%p)\n", __func__, self);
+ lock_kernel();
+
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
@@ -1684,6 +1768,8 @@ static int irda_shutdown(struct socket *sock, int how)
self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */
self->saddr = 0x0; /* so IrLMP assign us any link */
+ unlock_kernel();
+
return 0;
}
@@ -1699,6 +1785,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
IRDA_DEBUG(4, "%s()\n", __func__);
+ lock_kernel();
poll_wait(file, sk->sk_sleep, wait);
mask = 0;
@@ -1746,18 +1833,34 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
default:
break;
}
+ unlock_kernel();
return mask;
}
+static unsigned int irda_datagram_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ int err;
+
+ lock_kernel();
+ err = datagram_poll(file, sock, wait);
+ unlock_kernel();
+
+ return err;
+}
+
/*
* Function irda_ioctl (sock, cmd, arg)
*/
static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
+ int err;
IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd);
+ lock_kernel();
+ err = -EINVAL;
switch (cmd) {
case TIOCOUTQ: {
long amount;
@@ -1765,9 +1868,8 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
- if (put_user(amount, (unsigned int __user *)arg))
- return -EFAULT;
- return 0;
+ err = put_user(amount, (unsigned int __user *)arg);
+ break;
}
case TIOCINQ: {
@@ -1776,15 +1878,14 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
- if (put_user(amount, (unsigned int __user *)arg))
- return -EFAULT;
- return 0;
+ err = put_user(amount, (unsigned int __user *)arg);
+ break;
}
case SIOCGSTAMP:
if (sk != NULL)
- return sock_get_timestamp(sk, (struct timeval __user *)arg);
- return -EINVAL;
+ err = sock_get_timestamp(sk, (struct timeval __user *)arg);
+ break;
case SIOCGIFADDR:
case SIOCSIFADDR:
@@ -1796,14 +1897,14 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
- return -EINVAL;
+ break;
default:
IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__);
- return -ENOIOCTLCMD;
+ err = -ENOIOCTLCMD;
}
+ unlock_kernel();
- /*NOTREACHED*/
- return 0;
+ return err;
}
#ifdef CONFIG_COMPAT
@@ -1825,7 +1926,7 @@ static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
* Set some options for the socket
*
*/
-static int irda_setsockopt(struct socket *sock, int level, int optname,
+static int __irda_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
@@ -2083,6 +2184,18 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
return 0;
}
+static int irda_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int optlen)
+{
+ int err;
+
+ lock_kernel();
+ err = __irda_setsockopt(sock, level, optname, optval, optlen);
+ unlock_kernel();
+
+ return err;
+}
+
/*
* Function irda_extract_ias_value(ias_opt, ias_value)
*
@@ -2135,7 +2248,7 @@ static int irda_extract_ias_value(struct irda_ias_set *ias_opt,
/*
* Function irda_getsockopt (sock, level, optname, optval, optlen)
*/
-static int irda_getsockopt(struct socket *sock, int level, int optname,
+static int __irda_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
@@ -2463,13 +2576,25 @@ bed:
return 0;
}
+static int irda_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ int err;
+
+ lock_kernel();
+ err = __irda_getsockopt(sock, level, optname, optval, optlen);
+ unlock_kernel();
+
+ return err;
+}
+
static const struct net_proto_family irda_family_ops = {
.family = PF_IRDA,
.create = irda_create,
.owner = THIS_MODULE,
};
-static const struct proto_ops SOCKOPS_WRAPPED(irda_stream_ops) = {
+static const struct proto_ops irda_stream_ops = {
.family = PF_IRDA,
.owner = THIS_MODULE,
.release = irda_release,
@@ -2493,7 +2618,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_stream_ops) = {
.sendpage = sock_no_sendpage,
};
-static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = {
+static const struct proto_ops irda_seqpacket_ops = {
.family = PF_IRDA,
.owner = THIS_MODULE,
.release = irda_release,
@@ -2502,7 +2627,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = {
.socketpair = sock_no_socketpair,
.accept = irda_accept,
.getname = irda_getname,
- .poll = datagram_poll,
+ .poll = irda_datagram_poll,
.ioctl = irda_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = irda_compat_ioctl,
@@ -2517,7 +2642,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_seqpacket_ops) = {
.sendpage = sock_no_sendpage,
};
-static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = {
+static const struct proto_ops irda_dgram_ops = {
.family = PF_IRDA,
.owner = THIS_MODULE,
.release = irda_release,
@@ -2526,7 +2651,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = {
.socketpair = sock_no_socketpair,
.accept = irda_accept,
.getname = irda_getname,
- .poll = datagram_poll,
+ .poll = irda_datagram_poll,
.ioctl = irda_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = irda_compat_ioctl,
@@ -2542,7 +2667,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_dgram_ops) = {
};
#ifdef CONFIG_IRDA_ULTRA
-static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = {
+static const struct proto_ops irda_ultra_ops = {
.family = PF_IRDA,
.owner = THIS_MODULE,
.release = irda_release,
@@ -2551,7 +2676,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = irda_getname,
- .poll = datagram_poll,
+ .poll = irda_datagram_poll,
.ioctl = irda_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = irda_compat_ioctl,
@@ -2567,13 +2692,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(irda_ultra_ops) = {
};
#endif /* CONFIG_IRDA_ULTRA */
-SOCKOPS_WRAP(irda_stream, PF_IRDA);
-SOCKOPS_WRAP(irda_seqpacket, PF_IRDA);
-SOCKOPS_WRAP(irda_dgram, PF_IRDA);
-#ifdef CONFIG_IRDA_ULTRA
-SOCKOPS_WRAP(irda_ultra, PF_IRDA);
-#endif /* CONFIG_IRDA_ULTRA */
-
/*
* Function irsock_init (pro)
*
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index eafc010907c2..3c1754023022 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -30,6 +30,7 @@
********************************************************************/
#include <linux/init.h>
+#include <linux/sched.h>
#include <net/irda/irda.h>
#include <net/irda/irlmp.h>
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 62116829b817..315ead3cb926 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/random.h>
#include <linux/netdevice.h>
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 7b6b631f647f..d340110f5c0c 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -30,6 +30,7 @@
#include <linux/inetdevice.h>
#include <linux/if_arp.h>
#include <linux/module.h>
+#include <linux/sched.h>
#include <net/arp.h>
#include <net/irda/irda.h>
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index cf9a4b531a98..b26dee784aba 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -9,6 +9,7 @@
*/
#include "irnet_irda.h" /* Private header */
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/unaligned.h>
@@ -1402,8 +1403,8 @@ irnet_connect_indication(void * instance,
/* Socket already connecting ? On primary ? */
if(0
#ifdef ALLOW_SIMULT_CONNECT
- || ((irttp_is_primary(server->tsap) == 1) /* primary */
- && (test_and_clear_bit(0, &new->ttp_connect)))
+ || ((irttp_is_primary(server->tsap) == 1) && /* primary */
+ (test_and_clear_bit(0, &new->ttp_connect)))
#endif /* ALLOW_SIMULT_CONNECT */
)
{
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 68cbcb19cbd8..7dea882dbb75 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -13,6 +13,7 @@
* 2) as a control channel (write commands, read events)
*/
+#include <linux/sched.h>
#include <linux/smp_lock.h>
#include "irnet_ppp.h" /* Private header */
/* Please put other headers in irnet.h - Thanks */
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 3aebabb158a8..1e428863574f 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -481,7 +481,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
}
/* Create an IUCV socket */
-static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
+static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 3973d0e61e56..3b1f5f5f8de7 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1768,7 +1768,6 @@ static void iucv_tasklet_fn(unsigned long ignored)
*/
static void iucv_work_fn(struct work_struct *work)
{
- typedef void iucv_irq_fn(struct iucv_irq_data *);
LIST_HEAD(work_queue);
struct iucv_irq_list *p, *n;
@@ -1878,14 +1877,25 @@ int iucv_path_table_empty(void)
static int iucv_pm_freeze(struct device *dev)
{
int cpu;
+ struct iucv_irq_list *p, *n;
int rc = 0;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "iucv_pm_freeze\n");
#endif
+ if (iucv_pm_state != IUCV_PM_FREEZING) {
+ for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
+ smp_call_function_single(cpu, iucv_block_cpu_almost,
+ NULL, 1);
+ cancel_work_sync(&iucv_work);
+ list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
+ list_del_init(&p->list);
+ iucv_sever_pathid(p->data.ippathid,
+ iucv_error_no_listener);
+ kfree(p);
+ }
+ }
iucv_pm_state = IUCV_PM_FREEZING;
- for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
- smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1);
if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
rc = dev->driver->pm->freeze(dev);
if (iucv_path_table_empty())
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 472f6594184a..84209fbbeb17 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -35,7 +35,7 @@
#define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x))
#define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x))
-static int pfkey_net_id;
+static int pfkey_net_id __read_mostly;
struct netns_pfkey {
/* List of all pfkey sockets. */
struct hlist_head table;
@@ -177,7 +177,8 @@ static struct proto key_proto = {
.obj_size = sizeof(struct pfkey_sock),
};
-static int pfkey_create(struct net *net, struct socket *sock, int protocol)
+static int pfkey_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
@@ -3764,28 +3765,14 @@ static struct xfrm_mgr pfkeyv2_mgr =
static int __net_init pfkey_net_init(struct net *net)
{
- struct netns_pfkey *net_pfkey;
+ struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
int rv;
- net_pfkey = kmalloc(sizeof(struct netns_pfkey), GFP_KERNEL);
- if (!net_pfkey) {
- rv = -ENOMEM;
- goto out_kmalloc;
- }
INIT_HLIST_HEAD(&net_pfkey->table);
atomic_set(&net_pfkey->socks_nr, 0);
- rv = net_assign_generic(net, pfkey_net_id, net_pfkey);
- if (rv < 0)
- goto out_assign;
+
rv = pfkey_init_proc(net);
- if (rv < 0)
- goto out_proc;
- return 0;
-out_proc:
-out_assign:
- kfree(net_pfkey);
-out_kmalloc:
return rv;
}
@@ -3795,17 +3782,18 @@ static void __net_exit pfkey_net_exit(struct net *net)
pfkey_exit_proc(net);
BUG_ON(!hlist_empty(&net_pfkey->table));
- kfree(net_pfkey);
}
static struct pernet_operations pfkey_net_ops = {
.init = pfkey_net_init,
.exit = pfkey_net_exit,
+ .id = &pfkey_net_id,
+ .size = sizeof(struct netns_pfkey),
};
static void __exit ipsec_pfkey_exit(void)
{
- unregister_pernet_gen_subsys(pfkey_net_id, &pfkey_net_ops);
+ unregister_pernet_subsys(&pfkey_net_ops);
xfrm_unregister_km(&pfkeyv2_mgr);
sock_unregister(PF_KEY);
proto_unregister(&key_proto);
@@ -3824,7 +3812,7 @@ static int __init ipsec_pfkey_init(void)
err = xfrm_register_km(&pfkeyv2_mgr);
if (err != 0)
goto out_sock_unregister;
- err = register_pernet_gen_subsys(&pfkey_net_id, &pfkey_net_ops);
+ err = register_pernet_subsys(&pfkey_net_ops);
if (err != 0)
goto out_xfrm_unregister_km;
out:
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 4866b4fb0c27..3a66546cad06 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -140,14 +140,17 @@ static struct proto llc_proto = {
/**
* llc_ui_create - alloc and init a new llc_ui socket
+ * @net: network namespace (must be default network)
* @sock: Socket to initialize and attach allocated sk to.
* @protocol: Unused.
+ * @kern: on behalf of kernel or userspace
*
* Allocate and initialize a new llc_ui socket, validate the user wants a
* socket type we have available.
* Returns 0 upon success, negative upon failure.
*/
-static int llc_ui_create(struct net *net, struct socket *sock, int protocol)
+static int llc_ui_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
int rc = -ESOCKTNOSUPPORT;
@@ -155,7 +158,7 @@ static int llc_ui_create(struct net *net, struct socket *sock, int protocol)
if (!capable(CAP_NET_RAW))
return -EPERM;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) {
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 4d5543af3123..a10d508b07e1 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -194,6 +194,19 @@ config MAC80211_VERBOSE_MPL_DEBUG
Do not select this option.
+config MAC80211_VERBOSE_MHWMP_DEBUG
+ bool "Verbose mesh HWMP routing debugging"
+ depends on MAC80211_DEBUG_MENU
+ depends on MAC80211_MESH
+ ---help---
+ Selecting this option causes mac80211 to print out very
+ verbose mesh routing (HWMP) debugging messages (when mac80211
+ is taking part in a mesh network).
+ It should not be selected on production systems as those
+ messages are remotely triggerable.
+
+ Do not select this option.
+
config MAC80211_DEBUG_COUNTERS
bool "Extra statistics for TX/RX debugging"
depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 9f3cf7129324..298cfcc1bf8d 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_MAC80211) += mac80211.o
# mac80211 objects
mac80211-y := \
- main.o \
+ main.o status.o \
sta_info.o \
wep.o \
wpa.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index bc064d7933ff..51c7dc3c4c3b 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -41,7 +41,8 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- if (drv_ampdu_action(local, IEEE80211_AMPDU_RX_STOP,
+ if (drv_ampdu_action(local, &sta->sdata->vif,
+ IEEE80211_AMPDU_RX_STOP,
&sta->sta, tid, NULL))
printk(KERN_DEBUG "HW problem - can not stop rx "
"aggregation for tid %d\n", tid);
@@ -85,10 +86,6 @@ void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *r
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- /* stop HW Rx aggregation. ampdu_action existence
- * already verified in session init so we add the BUG_ON */
- BUG_ON(!local->ops->ampdu_action);
-
rcu_read_lock();
sta = sta_info_get(local, ra);
@@ -170,7 +167,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
- ieee80211_tx_skb(sdata, skb, 1);
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_addba_request(struct ieee80211_local *local,
@@ -210,9 +207,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
* check if configuration can support the BA policy
* and if buffer size does not exceeds max value */
/* XXX: check own ht delayed BA capability?? */
- if (((ba_policy != 1)
- && (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA)))
- || (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
+ if (((ba_policy != 1) &&
+ (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
+ (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
@@ -284,7 +281,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
goto end;
}
- ret = drv_ampdu_action(local, IEEE80211_AMPDU_RX_START,
+ ret = drv_ampdu_action(local, &sta->sdata->vif,
+ IEEE80211_AMPDU_RX_START,
&sta->sta, tid, &start_seq_num);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index bd765f30dba2..5e3a7eccef5a 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -91,7 +91,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.addba_req.start_seq_num =
cpu_to_le16(start_seq_num << 4);
- ieee80211_tx_skb(sdata, skb, 1);
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
@@ -120,16 +120,22 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
bar->control = cpu_to_le16(bar_control);
bar->start_seq_num = cpu_to_le16(ssn);
- ieee80211_tx_skb(sdata, skb, 0);
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
}
-static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
- enum ieee80211_back_parties initiator)
+int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
+ enum ieee80211_back_parties initiator)
{
struct ieee80211_local *local = sta->local;
int ret;
u8 *state;
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
+ sta->sta.addr, tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
state = &sta->ampdu_mlme.tid_state_tx[tid];
if (*state == HT_AGG_STATE_OPERATIONAL)
@@ -138,12 +144,12 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
*state = HT_AGG_STATE_REQ_STOP_BA_MSK |
(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
- ret = drv_ampdu_action(local, IEEE80211_AMPDU_TX_STOP,
+ ret = drv_ampdu_action(local, &sta->sdata->vif,
+ IEEE80211_AMPDU_TX_STOP,
&sta->sta, tid, NULL);
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
- *state = HT_AGG_STATE_OPERATIONAL;
/*
* We may have pending packets get stuck in this case...
* Not bothering with a workaround for now.
@@ -173,12 +179,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
/* check if the TID waits for addBA response */
spin_lock_bh(&sta->lock);
- if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
+ if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
+ HT_ADDBA_REQUESTED_MSK) {
spin_unlock_bh(&sta->lock);
*state = HT_AGG_STATE_IDLE;
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "timer expired on tid %d but we are not "
- "expecting addBA response there", tid);
+ "(or no longer) expecting addBA response there",
+ tid);
#endif
return;
}
@@ -196,11 +204,11 @@ static inline int ieee80211_ac_from_tid(int tid)
return ieee802_1d_to_ac[tid & 7];
}
-int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
+int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
- struct ieee80211_local *local = hw_to_local(hw);
- struct sta_info *sta;
- struct ieee80211_sub_if_data *sdata;
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
u8 *state;
int ret = 0;
u16 start_seq_num;
@@ -208,52 +216,37 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
if (WARN_ON(!local->ops->ampdu_action))
return -EINVAL;
- if ((tid >= STA_TID_NUM) || !(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ if ((tid >= STA_TID_NUM) ||
+ !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
return -EINVAL;
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
- ra, tid);
+ pubsta->addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- rcu_read_lock();
-
- sta = sta_info_get(local, ra);
- if (!sta) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Could not find the station\n");
-#endif
- ret = -ENOENT;
- goto unlock;
- }
-
/*
* The aggregation code is not prepared to handle
* anything but STA/AP due to the BSSID handling.
* IBSS could work in the code but isn't supported
* by drivers or the standard.
*/
- if (sta->sdata->vif.type != NL80211_IFTYPE_STATION &&
- sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
- sta->sdata->vif.type != NL80211_IFTYPE_AP) {
- ret = -EINVAL;
- goto unlock;
- }
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+ sdata->vif.type != NL80211_IFTYPE_AP)
+ return -EINVAL;
if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
"Denying BA session request\n");
#endif
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
spin_lock_bh(&sta->lock);
spin_lock(&local->ampdu_lock);
- sdata = sta->sdata;
-
/* we have tried too many times, receiver does not want A-MPDU */
if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
ret = -EBUSY;
@@ -310,8 +303,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
start_seq_num = sta->tid_seq[tid];
- ret = drv_ampdu_action(local, IEEE80211_AMPDU_TX_START,
- &sta->sta, tid, &start_seq_num);
+ ret = drv_ampdu_action(local, &sdata->vif,
+ IEEE80211_AMPDU_TX_START,
+ pubsta, tid, &start_seq_num);
if (ret) {
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -336,7 +330,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
sta->ampdu_mlme.dialog_token_allocator;
sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
- ieee80211_send_addba_request(sta->sdata, ra, tid,
+ ieee80211_send_addba_request(sdata, pubsta->addr, tid,
sta->ampdu_mlme.tid_tx[tid]->dialog_token,
sta->ampdu_mlme.tid_tx[tid]->ssn,
0x40, 5000);
@@ -348,7 +342,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
#endif
- goto unlock;
+ return 0;
err_free:
kfree(sta->ampdu_mlme.tid_tx[tid]);
@@ -360,8 +354,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
err_unlock_sta:
spin_unlock(&local->ampdu_lock);
spin_unlock_bh(&sta->lock);
- unlock:
- rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
@@ -428,13 +420,15 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
ieee80211_agg_splice_finish(local, sta, tid);
spin_unlock(&local->ampdu_lock);
- drv_ampdu_action(local, IEEE80211_AMPDU_TX_OPERATIONAL,
+ drv_ampdu_action(local, &sta->sdata->vif,
+ IEEE80211_AMPDU_TX_OPERATIONAL,
&sta->sta, tid, NULL);
}
-void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
+void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
{
- struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
u8 *state;
@@ -483,10 +477,11 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
-void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
+void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
{
- struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
@@ -501,6 +496,7 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
+ ra_tid->vif = vif;
skb->pkt_type = IEEE80211_ADDBA_MSG;
skb_queue_tail(&local->skb_queue, skb);
@@ -523,11 +519,6 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
goto unlock;
}
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
- sta->sta.addr, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
unlock:
@@ -535,36 +526,27 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
return ret;
}
-int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
- u8 *ra, u16 tid,
+int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
enum ieee80211_back_parties initiator)
{
- struct ieee80211_local *local = hw_to_local(hw);
- struct sta_info *sta;
- int ret = 0;
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
- if (WARN_ON(!local->ops->ampdu_action))
+ if (!local->ops->ampdu_action)
return -EINVAL;
if (tid >= STA_TID_NUM)
return -EINVAL;
- rcu_read_lock();
- sta = sta_info_get(local, ra);
- if (!sta) {
- rcu_read_unlock();
- return -ENOENT;
- }
-
- ret = __ieee80211_stop_tx_ba_session(sta, tid, initiator);
- rcu_read_unlock();
- return ret;
+ return __ieee80211_stop_tx_ba_session(sta, tid, initiator);
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
-void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
+void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
{
- struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
u8 *state;
@@ -627,10 +609,11 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
-void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
+void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
const u8 *ra, u16 tid)
{
- struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
@@ -645,6 +628,7 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
+ ra_tid->vif = vif;
skb->pkt_type = IEEE80211_DELBA_MSG;
skb_queue_tail(&local->skb_queue, skb);
@@ -668,24 +652,23 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
spin_lock_bh(&sta->lock);
- if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
- spin_unlock_bh(&sta->lock);
- return;
- }
+ if (!(*state & HT_ADDBA_REQUESTED_MSK))
+ goto out;
if (mgmt->u.action.u.addba_resp.dialog_token !=
sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
- spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- return;
+ goto out;
}
- del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+ del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
+
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
== WLAN_STATUS_SUCCESS) {
u8 curstate = *state;
@@ -699,5 +682,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
} else {
___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
}
+
+ out:
spin_unlock_bh(&sta->lock);
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 5608f6c68413..93ee1fd5c08d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -36,6 +36,15 @@ static bool nl80211_type_check(enum nl80211_iftype type)
}
}
+static bool nl80211_params_check(enum nl80211_iftype type,
+ struct vif_params *params)
+{
+ if (!nl80211_type_check(type))
+ return false;
+
+ return true;
+}
+
static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
@@ -45,7 +54,7 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
struct ieee80211_sub_if_data *sdata;
int err;
- if (!nl80211_type_check(type))
+ if (!nl80211_params_check(type, params))
return -EINVAL;
err = ieee80211_if_add(local, name, &dev, type, params);
@@ -72,7 +81,10 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata;
int ret;
- if (!nl80211_type_check(type))
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!nl80211_params_check(type, params))
return -EINVAL;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -81,9 +93,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
if (ret)
return ret;
- if (netif_running(sdata->dev))
- return -EBUSY;
-
if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
ieee80211_sdata_set_mesh_id(sdata,
params->mesh_id_len,
@@ -92,6 +101,13 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
return 0;
+ if (type == NL80211_IFTYPE_AP_VLAN &&
+ params && params->use_4addr == 0)
+ rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+ else if (type == NL80211_IFTYPE_STATION &&
+ params && params->use_4addr >= 0)
+ sdata->u.mgd.use_4addr = params->use_4addr;
+
sdata->u.mntr_flags = *flags;
return 0;
}
@@ -377,13 +393,13 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct sta_info *sta;
int ret = -ENOENT;
rcu_read_lock();
- sta = sta_info_get_by_idx(local, idx, dev);
+ sta = sta_info_get_by_idx(sdata, idx);
if (sta) {
ret = 0;
memcpy(mac, sta->sta.addr, ETH_ALEN);
@@ -738,13 +754,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
err = sta_info_insert(sta);
if (err) {
- /* STA has been freed */
- if (err == -EEXIST && layer2_update) {
- /* Need to update layer 2 devices on reassociation */
- sta = sta_info_get(local, mac);
- if (sta)
- ieee80211_send_layer2_update(sta);
- }
rcu_read_unlock();
return err;
}
@@ -813,6 +822,15 @@ static int ieee80211_change_station(struct wiphy *wiphy,
return -EINVAL;
}
+ if (params->vlan->ieee80211_ptr->use_4addr) {
+ if (vlansdata->u.vlan.sta) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+
+ rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
+ }
+
sta->sdata = vlansdata;
ieee80211_send_layer2_update(sta);
}
@@ -914,7 +932,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
pinfo->generation = mesh_paths_generation;
pinfo->filled = MPATH_INFO_FRAME_QLEN |
- MPATH_INFO_DSN |
+ MPATH_INFO_SN |
MPATH_INFO_METRIC |
MPATH_INFO_EXPTIME |
MPATH_INFO_DISCOVERY_TIMEOUT |
@@ -922,7 +940,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
MPATH_INFO_FLAGS;
pinfo->frame_qlen = mpath->frame_queue.qlen;
- pinfo->dsn = mpath->dsn;
+ pinfo->sn = mpath->sn;
pinfo->metric = mpath->metric;
if (time_before(jiffies, mpath->exp_time))
pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies);
@@ -934,8 +952,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE;
if (mpath->flags & MESH_PATH_RESOLVING)
pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING;
- if (mpath->flags & MESH_PATH_DSN_VALID)
- pinfo->flags |= NL80211_MPATH_FLAG_DSN_VALID;
+ if (mpath->flags & MESH_PATH_SN_VALID)
+ pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID;
if (mpath->flags & MESH_PATH_FIXED)
pinfo->flags |= NL80211_MPATH_FLAG_FIXED;
if (mpath->flags & MESH_PATH_RESOLVING)
@@ -1008,7 +1026,10 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
{
struct mesh_config *conf;
struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_if_mesh *ifmsh;
+
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ ifmsh = &sdata->u.mesh;
/* Set the config options which we are interested in setting */
conf = &(sdata->u.mesh.mshcfg);
@@ -1043,6 +1064,10 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
mask))
conf->dot11MeshHWMPnetDiameterTraversalTime =
nconf->dot11MeshHWMPnetDiameterTraversalTime;
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOTMODE, mask)) {
+ conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode;
+ ieee80211_mesh_root_setup(ifmsh);
+ }
return 0;
}
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 96991b68f048..e4b54093d41b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -1,3 +1,4 @@
+
/*
* mac80211 debugfs for wireless PHYs
*
@@ -38,16 +39,10 @@ static const struct file_operations name## _ops = { \
};
#define DEBUGFS_ADD(name) \
- local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \
- local, &name## _ops);
+ debugfs_create_file(#name, 0400, phyd, local, &name## _ops);
#define DEBUGFS_ADD_MODE(name, mode) \
- local->debugfs.name = debugfs_create_file(#name, mode, phyd, \
- local, &name## _ops);
-
-#define DEBUGFS_DEL(name) \
- debugfs_remove(local->debugfs.name); \
- local->debugfs.name = NULL;
+ debugfs_create_file(#name, mode, phyd, local, &name## _ops);
DEBUGFS_READONLY_FILE(frequency, 20, "%d",
@@ -57,7 +52,7 @@ DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x",
local->wep_iv & 0xffffff);
DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
- local->rate_ctrl ? local->rate_ctrl->ops->name : "<unset>");
+ local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
static ssize_t tsf_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -233,12 +228,7 @@ static const struct file_operations stats_ ##name## _ops = { \
};
#define DEBUGFS_STATS_ADD(name) \
- local->debugfs.stats.name = debugfs_create_file(#name, 0400, statsd,\
- local, &stats_ ##name## _ops);
-
-#define DEBUGFS_STATS_DEL(name) \
- debugfs_remove(local->debugfs.stats.name); \
- local->debugfs.stats.name = NULL;
+ debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u",
local->dot11TransmittedFragmentCount);
@@ -326,7 +316,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(noack);
statsd = debugfs_create_dir("statistics", phyd);
- local->debugfs.statistics = statsd;
/* if the dir failed, don't put all the other things into the root! */
if (!statsd)
@@ -367,57 +356,3 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_STATS_ADD(dot11FCSErrorCount);
DEBUGFS_STATS_ADD(dot11RTSSuccessCount);
}
-
-void debugfs_hw_del(struct ieee80211_local *local)
-{
- DEBUGFS_DEL(frequency);
- DEBUGFS_DEL(total_ps_buffered);
- DEBUGFS_DEL(wep_iv);
- DEBUGFS_DEL(tsf);
- DEBUGFS_DEL(queues);
- DEBUGFS_DEL(reset);
- DEBUGFS_DEL(noack);
-
- DEBUGFS_STATS_DEL(transmitted_fragment_count);
- DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
- DEBUGFS_STATS_DEL(failed_count);
- DEBUGFS_STATS_DEL(retry_count);
- DEBUGFS_STATS_DEL(multiple_retry_count);
- DEBUGFS_STATS_DEL(frame_duplicate_count);
- DEBUGFS_STATS_DEL(received_fragment_count);
- DEBUGFS_STATS_DEL(multicast_received_frame_count);
- DEBUGFS_STATS_DEL(transmitted_frame_count);
- DEBUGFS_STATS_DEL(num_scans);
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
- DEBUGFS_STATS_DEL(tx_handlers_drop);
- DEBUGFS_STATS_DEL(tx_handlers_queued);
- DEBUGFS_STATS_DEL(tx_handlers_drop_unencrypted);
- DEBUGFS_STATS_DEL(tx_handlers_drop_fragment);
- DEBUGFS_STATS_DEL(tx_handlers_drop_wep);
- DEBUGFS_STATS_DEL(tx_handlers_drop_not_assoc);
- DEBUGFS_STATS_DEL(tx_handlers_drop_unauth_port);
- DEBUGFS_STATS_DEL(rx_handlers_drop);
- DEBUGFS_STATS_DEL(rx_handlers_queued);
- DEBUGFS_STATS_DEL(rx_handlers_drop_nullfunc);
- DEBUGFS_STATS_DEL(rx_handlers_drop_defrag);
- DEBUGFS_STATS_DEL(rx_handlers_drop_short);
- DEBUGFS_STATS_DEL(rx_handlers_drop_passive_scan);
- DEBUGFS_STATS_DEL(tx_expand_skb_head);
- DEBUGFS_STATS_DEL(tx_expand_skb_head_cloned);
- DEBUGFS_STATS_DEL(rx_expand_skb_head);
- DEBUGFS_STATS_DEL(rx_expand_skb_head2);
- DEBUGFS_STATS_DEL(rx_handlers_fragments);
- DEBUGFS_STATS_DEL(tx_status_drop);
-#endif
- DEBUGFS_STATS_DEL(dot11ACKFailureCount);
- DEBUGFS_STATS_DEL(dot11RTSFailureCount);
- DEBUGFS_STATS_DEL(dot11FCSErrorCount);
- DEBUGFS_STATS_DEL(dot11RTSSuccessCount);
-
- debugfs_remove(local->debugfs.statistics);
- local->debugfs.statistics = NULL;
- debugfs_remove(local->debugfs.stations);
- local->debugfs.stations = NULL;
- debugfs_remove(local->debugfs.keys);
- local->debugfs.keys = NULL;
-}
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index dd2541935c27..68e6a2050f9a 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -3,14 +3,12 @@
#ifdef CONFIG_MAC80211_DEBUGFS
extern void debugfs_hw_add(struct ieee80211_local *local);
-extern void debugfs_hw_del(struct ieee80211_local *local);
extern int mac80211_open_file_generic(struct inode *inode, struct file *file);
#else
static inline void debugfs_hw_add(struct ieee80211_local *local)
{
return;
}
-static inline void debugfs_hw_del(struct ieee80211_local *local) {}
#endif
#endif /* __MAC80211_DEBUGFS_H */
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 99c752588b30..e0f5224630da 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -225,8 +225,8 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
KEY_OPS(key);
#define DEBUGFS_ADD(name) \
- key->debugfs.name = debugfs_create_file(#name, 0400,\
- key->debugfs.dir, key, &key_##name##_ops);
+ debugfs_create_file(#name, 0400, key->debugfs.dir, \
+ key, &key_##name##_ops);
void ieee80211_debugfs_key_add(struct ieee80211_key *key)
{
@@ -271,30 +271,12 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
DEBUGFS_ADD(ifindex);
};
-#define DEBUGFS_DEL(name) \
- debugfs_remove(key->debugfs.name); key->debugfs.name = NULL;
-
void ieee80211_debugfs_key_remove(struct ieee80211_key *key)
{
if (!key)
return;
- DEBUGFS_DEL(keylen);
- DEBUGFS_DEL(flags);
- DEBUGFS_DEL(keyidx);
- DEBUGFS_DEL(hw_key_idx);
- DEBUGFS_DEL(tx_rx_count);
- DEBUGFS_DEL(algorithm);
- DEBUGFS_DEL(tx_spec);
- DEBUGFS_DEL(rx_spec);
- DEBUGFS_DEL(replays);
- DEBUGFS_DEL(icverrors);
- DEBUGFS_DEL(key);
- DEBUGFS_DEL(ifindex);
-
- debugfs_remove(key->debugfs.stalink);
- key->debugfs.stalink = NULL;
- debugfs_remove(key->debugfs.dir);
+ debugfs_remove_recursive(key->debugfs.dir);
key->debugfs.dir = NULL;
}
void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
@@ -302,7 +284,7 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
char buf[50];
struct ieee80211_key *key;
- if (!sdata->debugfsdir)
+ if (!sdata->debugfs.dir)
return;
/* this is running under the key lock */
@@ -310,9 +292,9 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata)
key = sdata->default_key;
if (key) {
sprintf(buf, "../keys/%d", key->debugfs.cnt);
- sdata->common_debugfs.default_key =
+ sdata->debugfs.default_key =
debugfs_create_symlink("default_key",
- sdata->debugfsdir, buf);
+ sdata->debugfs.dir, buf);
} else
ieee80211_debugfs_key_remove_default(sdata);
}
@@ -322,8 +304,8 @@ void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata)
if (!sdata)
return;
- debugfs_remove(sdata->common_debugfs.default_key);
- sdata->common_debugfs.default_key = NULL;
+ debugfs_remove(sdata->debugfs.default_key);
+ sdata->debugfs.default_key = NULL;
}
void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
@@ -331,7 +313,7 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
char buf[50];
struct ieee80211_key *key;
- if (!sdata->debugfsdir)
+ if (!sdata->debugfs.dir)
return;
/* this is running under the key lock */
@@ -339,9 +321,9 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
key = sdata->default_mgmt_key;
if (key) {
sprintf(buf, "../keys/%d", key->debugfs.cnt);
- sdata->common_debugfs.default_mgmt_key =
+ sdata->debugfs.default_mgmt_key =
debugfs_create_symlink("default_mgmt_key",
- sdata->debugfsdir, buf);
+ sdata->debugfs.dir, buf);
} else
ieee80211_debugfs_key_remove_mgmt_default(sdata);
}
@@ -351,8 +333,8 @@ void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sda
if (!sdata)
return;
- debugfs_remove(sdata->common_debugfs.default_mgmt_key);
- sdata->common_debugfs.default_mgmt_key = NULL;
+ debugfs_remove(sdata->debugfs.default_mgmt_key);
+ sdata->debugfs.default_mgmt_key = NULL;
}
void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 61234e79022b..472b2039906c 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -149,12 +149,14 @@ IEEE80211_IF_FILE(path_refresh_time,
u.mesh.mshcfg.path_refresh_time, DEC);
IEEE80211_IF_FILE(min_discovery_timeout,
u.mesh.mshcfg.min_discovery_timeout, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
+ u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
#endif
-#define DEBUGFS_ADD(name, type)\
- sdata->debugfs.type.name = debugfs_create_file(#name, 0400,\
- sdata->debugfsdir, sdata, &name##_ops);
+#define DEBUGFS_ADD(name, type) \
+ debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
+ sdata, &name##_ops);
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
@@ -199,30 +201,32 @@ static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
}
#ifdef CONFIG_MAC80211_MESH
-#define MESHSTATS_ADD(name)\
- sdata->mesh_stats.name = debugfs_create_file(#name, 0400,\
- sdata->mesh_stats_dir, sdata, &name##_ops);
static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
{
- sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats",
- sdata->debugfsdir);
+ struct dentry *dir = debugfs_create_dir("mesh_stats",
+ sdata->debugfs.dir);
+
+#define MESHSTATS_ADD(name)\
+ debugfs_create_file(#name, 0400, dir, sdata, &name##_ops);
+
MESHSTATS_ADD(fwded_mcast);
MESHSTATS_ADD(fwded_unicast);
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
MESHSTATS_ADD(estab_plinks);
+#undef MESHSTATS_ADD
}
-#define MESHPARAMS_ADD(name)\
- sdata->mesh_config.name = debugfs_create_file(#name, 0600,\
- sdata->mesh_config_dir, sdata, &name##_ops);
-
static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
{
- sdata->mesh_config_dir = debugfs_create_dir("mesh_config",
- sdata->debugfsdir);
+ struct dentry *dir = debugfs_create_dir("mesh_config",
+ sdata->debugfs.dir);
+
+#define MESHPARAMS_ADD(name) \
+ debugfs_create_file(#name, 0600, dir, sdata, &name##_ops);
+
MESHPARAMS_ADD(dot11MeshMaxRetries);
MESHPARAMS_ADD(dot11MeshRetryTimeout);
MESHPARAMS_ADD(dot11MeshConfirmTimeout);
@@ -236,12 +240,14 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);
MESHPARAMS_ADD(min_discovery_timeout);
+
+#undef MESHPARAMS_ADD
}
#endif
static void add_files(struct ieee80211_sub_if_data *sdata)
{
- if (!sdata->debugfsdir)
+ if (!sdata->debugfs.dir)
return;
switch (sdata->vif.type) {
@@ -274,134 +280,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
}
}
-#define DEBUGFS_DEL(name, type) \
- do { \
- debugfs_remove(sdata->debugfs.type.name); \
- sdata->debugfs.type.name = NULL; \
- } while (0)
-
-static void del_sta_files(struct ieee80211_sub_if_data *sdata)
-{
- DEBUGFS_DEL(drop_unencrypted, sta);
- DEBUGFS_DEL(force_unicast_rateidx, sta);
- DEBUGFS_DEL(max_ratectrl_rateidx, sta);
-
- DEBUGFS_DEL(bssid, sta);
- DEBUGFS_DEL(aid, sta);
- DEBUGFS_DEL(capab, sta);
-}
-
-static void del_ap_files(struct ieee80211_sub_if_data *sdata)
-{
- DEBUGFS_DEL(drop_unencrypted, ap);
- DEBUGFS_DEL(force_unicast_rateidx, ap);
- DEBUGFS_DEL(max_ratectrl_rateidx, ap);
-
- DEBUGFS_DEL(num_sta_ps, ap);
- DEBUGFS_DEL(dtim_count, ap);
- DEBUGFS_DEL(num_buffered_multicast, ap);
-}
-
-static void del_wds_files(struct ieee80211_sub_if_data *sdata)
-{
- DEBUGFS_DEL(drop_unencrypted, wds);
- DEBUGFS_DEL(force_unicast_rateidx, wds);
- DEBUGFS_DEL(max_ratectrl_rateidx, wds);
-
- DEBUGFS_DEL(peer, wds);
-}
-
-static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
-{
- DEBUGFS_DEL(drop_unencrypted, vlan);
- DEBUGFS_DEL(force_unicast_rateidx, vlan);
- DEBUGFS_DEL(max_ratectrl_rateidx, vlan);
-}
-
-static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
-{
-}
-
-#ifdef CONFIG_MAC80211_MESH
-#define MESHSTATS_DEL(name) \
- do { \
- debugfs_remove(sdata->mesh_stats.name); \
- sdata->mesh_stats.name = NULL; \
- } while (0)
-
-static void del_mesh_stats(struct ieee80211_sub_if_data *sdata)
-{
- MESHSTATS_DEL(fwded_mcast);
- MESHSTATS_DEL(fwded_unicast);
- MESHSTATS_DEL(fwded_frames);
- MESHSTATS_DEL(dropped_frames_ttl);
- MESHSTATS_DEL(dropped_frames_no_route);
- MESHSTATS_DEL(estab_plinks);
- debugfs_remove(sdata->mesh_stats_dir);
- sdata->mesh_stats_dir = NULL;
-}
-
-#define MESHPARAMS_DEL(name) \
- do { \
- debugfs_remove(sdata->mesh_config.name); \
- sdata->mesh_config.name = NULL; \
- } while (0)
-
-static void del_mesh_config(struct ieee80211_sub_if_data *sdata)
-{
- MESHPARAMS_DEL(dot11MeshMaxRetries);
- MESHPARAMS_DEL(dot11MeshRetryTimeout);
- MESHPARAMS_DEL(dot11MeshConfirmTimeout);
- MESHPARAMS_DEL(dot11MeshHoldingTimeout);
- MESHPARAMS_DEL(dot11MeshTTL);
- MESHPARAMS_DEL(auto_open_plinks);
- MESHPARAMS_DEL(dot11MeshMaxPeerLinks);
- MESHPARAMS_DEL(dot11MeshHWMPactivePathTimeout);
- MESHPARAMS_DEL(dot11MeshHWMPpreqMinInterval);
- MESHPARAMS_DEL(dot11MeshHWMPnetDiameterTraversalTime);
- MESHPARAMS_DEL(dot11MeshHWMPmaxPREQretries);
- MESHPARAMS_DEL(path_refresh_time);
- MESHPARAMS_DEL(min_discovery_timeout);
- debugfs_remove(sdata->mesh_config_dir);
- sdata->mesh_config_dir = NULL;
-}
-#endif
-
-static void del_files(struct ieee80211_sub_if_data *sdata)
-{
- if (!sdata->debugfsdir)
- return;
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_MESH_POINT:
-#ifdef CONFIG_MAC80211_MESH
- del_mesh_stats(sdata);
- del_mesh_config(sdata);
-#endif
- break;
- case NL80211_IFTYPE_STATION:
- del_sta_files(sdata);
- break;
- case NL80211_IFTYPE_ADHOC:
- /* XXX */
- break;
- case NL80211_IFTYPE_AP:
- del_ap_files(sdata);
- break;
- case NL80211_IFTYPE_WDS:
- del_wds_files(sdata);
- break;
- case NL80211_IFTYPE_MONITOR:
- del_monitor_files(sdata);
- break;
- case NL80211_IFTYPE_AP_VLAN:
- del_vlan_files(sdata);
- break;
- default:
- break;
- }
-}
-
static int notif_registered;
void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
@@ -412,16 +290,18 @@ void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
return;
sprintf(buf, "netdev:%s", sdata->dev->name);
- sdata->debugfsdir = debugfs_create_dir(buf,
+ sdata->debugfs.dir = debugfs_create_dir(buf,
sdata->local->hw.wiphy->debugfsdir);
add_files(sdata);
}
void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
{
- del_files(sdata);
- debugfs_remove(sdata->debugfsdir);
- sdata->debugfsdir = NULL;
+ if (!sdata->debugfs.dir)
+ return;
+
+ debugfs_remove_recursive(sdata->debugfs.dir);
+ sdata->debugfs.dir = NULL;
}
static int netdev_notify(struct notifier_block *nb,
@@ -444,7 +324,7 @@ static int netdev_notify(struct notifier_block *nb,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- dir = sdata->debugfsdir;
+ dir = sdata->debugfs.dir;
if (!dir)
return 0;
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 33a2e892115b..3f41608c8081 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -57,7 +57,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU);
STA_FILE(tx_retry_failed, tx_retry_failed, LU);
STA_FILE(tx_retry_count, tx_retry_count, LU);
STA_FILE(last_signal, last_signal, D);
-STA_FILE(last_qual, last_qual, D);
STA_FILE(last_noise, last_noise, D);
STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
@@ -67,10 +66,11 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
char buf[100];
struct sta_info *sta = file->private_data;
u32 staflags = get_sta_flags(sta);
- int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s",
+ int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s",
staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
- staflags & WLAN_STA_PS ? "PS\n" : "",
+ staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "",
+ staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "",
staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
staflags & WLAN_STA_WME ? "WME\n" : "",
@@ -157,14 +157,38 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
}
STA_OPS(agg_status);
+static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[200], *p = buf;
+ int i;
+ struct sta_info *sta = file->private_data;
+ struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
+
+ p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
+ htc->ht_supported ? "" : "not ");
+ if (htc->ht_supported) {
+ p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.2x\n", htc->cap);
+ p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
+ htc->ampdu_factor, htc->ampdu_density);
+ p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
+ htc->mcs.rx_mask[i]);
+ p += scnprintf(p, sizeof(buf)+buf-p, "\nMCS rx highest: %d\n",
+ le16_to_cpu(htc->mcs.rx_highest));
+ p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
+ htc->mcs.tx_params);
+ }
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+}
+STA_OPS(ht_capa);
+
#define DEBUGFS_ADD(name) \
- sta->debugfs.name = debugfs_create_file(#name, 0400, \
+ debugfs_create_file(#name, 0400, \
sta->debugfs.dir, sta, &sta_ ##name## _ops);
-#define DEBUGFS_DEL(name) \
- debugfs_remove(sta->debugfs.name);\
- sta->debugfs.name = NULL;
-
void ieee80211_sta_debugfs_add(struct sta_info *sta)
{
@@ -209,36 +233,13 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(tx_retry_failed);
DEBUGFS_ADD(tx_retry_count);
DEBUGFS_ADD(last_signal);
- DEBUGFS_ADD(last_qual);
DEBUGFS_ADD(last_noise);
DEBUGFS_ADD(wep_weak_iv_count);
+ DEBUGFS_ADD(ht_capa);
}
void ieee80211_sta_debugfs_remove(struct sta_info *sta)
{
- DEBUGFS_DEL(flags);
- DEBUGFS_DEL(num_ps_buf_frames);
- DEBUGFS_DEL(inactive_ms);
- DEBUGFS_DEL(last_seq_ctrl);
- DEBUGFS_DEL(agg_status);
- DEBUGFS_DEL(aid);
- DEBUGFS_DEL(dev);
- DEBUGFS_DEL(rx_packets);
- DEBUGFS_DEL(tx_packets);
- DEBUGFS_DEL(rx_bytes);
- DEBUGFS_DEL(tx_bytes);
- DEBUGFS_DEL(rx_duplicates);
- DEBUGFS_DEL(rx_fragments);
- DEBUGFS_DEL(rx_dropped);
- DEBUGFS_DEL(tx_fragments);
- DEBUGFS_DEL(tx_filtered);
- DEBUGFS_DEL(tx_retry_failed);
- DEBUGFS_DEL(tx_retry_count);
- DEBUGFS_DEL(last_signal);
- DEBUGFS_DEL(last_qual);
- DEBUGFS_DEL(last_noise);
- DEBUGFS_DEL(wep_weak_iv_count);
-
- debugfs_remove(sta->debugfs.dir);
+ debugfs_remove_recursive(sta->debugfs.dir);
sta->debugfs.dir = NULL;
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 020a94a31106..921dd9c9ff62 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -239,15 +239,16 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
}
static inline int drv_ampdu_action(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
u16 *ssn)
{
int ret = -EOPNOTSUPP;
if (local->ops->ampdu_action)
- ret = local->ops->ampdu_action(&local->hw, action,
+ ret = local->ops->ampdu_action(&local->hw, vif, action,
sta, tid, ssn);
- trace_drv_ampdu_action(local, action, sta, tid, ssn, ret);
+ trace_drv_ampdu_action(local, vif, action, sta, tid, ssn, ret);
return ret;
}
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 37b9051afcf3..b8fef1d11369 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -634,11 +634,12 @@ TRACE_EVENT(drv_tx_last_beacon,
TRACE_EVENT(drv_ampdu_action,
TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
u16 *ssn, int ret),
- TP_ARGS(local, action, sta, tid, ssn, ret),
+ TP_ARGS(local, vif, action, sta, tid, ssn, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -647,10 +648,12 @@ TRACE_EVENT(drv_ampdu_action,
__field(u16, tid)
__field(u16, ssn)
__field(int, ret)
+ VIF_ENTRY
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
STA_ASSIGN;
__entry->ret = ret;
__entry->action = action;
@@ -659,8 +662,8 @@ TRACE_EVENT(drv_ampdu_action,
),
TP_printk(
- LOCAL_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d",
- LOCAL_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
)
);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 0891bfb06996..3787455fb696 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -134,14 +134,13 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.delba.params = cpu_to_le16(params);
mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
- ieee80211_tx_skb(sdata, skb, 1);
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_mgmt *mgmt, size_t len)
{
- struct ieee80211_local *local = sdata->local;
u16 tid, params;
u16 initiator;
@@ -153,7 +152,7 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
if (net_ratelimit())
printk(KERN_DEBUG "delba from %pM (%s) tid %d reason code %d\n",
mgmt->sa, initiator ? "initiator" : "recipient", tid,
- mgmt->u.action.u.delba.reason_code);
+ le16_to_cpu(mgmt->u.action.u.delba.reason_code));
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (initiator == WLAN_BACK_INITIATOR)
@@ -161,10 +160,9 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
WLAN_BACK_INITIATOR, 0);
else { /* WLAN_BACK_RECIPIENT */
spin_lock_bh(&sta->lock);
- sta->ampdu_mlme.tid_state_tx[tid] =
- HT_AGG_STATE_OPERATIONAL;
+ if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
+ ___ieee80211_stop_tx_ba_session(sta, tid,
+ WLAN_BACK_RECIPIENT);
spin_unlock_bh(&sta->lock);
- ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid,
- WLAN_BACK_RECIPIENT);
}
}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 6eaf69823439..10d13856f86c 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -73,6 +73,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
u8 *pos;
struct ieee80211_supported_band *sband;
+ struct cfg80211_bss *bss;
u32 bss_change;
u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
@@ -177,8 +178,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
- mgmt, skb->len, 0, GFP_KERNEL);
+ bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
+ mgmt, skb->len, 0, GFP_KERNEL);
+ cfg80211_put_bss(bss);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
}
@@ -453,6 +455,10 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
+ if (time_before(jiffies, ifibss->last_scan_completed +
+ IEEE80211_IBSS_MERGE_INTERVAL))
+ return;
+
if (ieee80211_sta_active_ibss(sdata))
return;
@@ -538,13 +544,12 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
WLAN_CAPABILITY_PRIVACY,
capability);
+ if (bss) {
#ifdef CONFIG_MAC80211_IBSS_DEBUG
- if (bss)
printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
"%pM\n", bss->cbss.bssid, ifibss->bssid);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
- if (bss && !memcmp(ifibss->bssid, bss->cbss.bssid, ETH_ALEN)) {
printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
" based on configured SSID\n",
sdata->dev->name, bss->cbss.bssid);
@@ -552,8 +557,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_join_ibss(sdata, bss);
ieee80211_rx_bss_put(local, bss);
return;
- } else if (bss)
- ieee80211_rx_bss_put(local, bss);
+ }
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG " did not try to join ibss\n");
@@ -655,7 +659,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
sdata->dev->name, resp->da);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
- ieee80211_tx_skb(sdata, skb, 0);
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
}
static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 588005c84a6d..039affa7c871 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/etherdevice.h>
+#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "key.h"
@@ -167,16 +168,12 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
struct ieee80211_rx_data {
struct sk_buff *skb;
- struct net_device *dev;
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
struct ieee80211_key *key;
- struct ieee80211_rx_status *status;
- struct ieee80211_rate *rate;
unsigned int flags;
- int sent_ps_buffered;
int queue;
u32 tkip_iv32;
u16 tkip_iv16;
@@ -209,6 +206,9 @@ struct ieee80211_if_wds {
struct ieee80211_if_vlan {
struct list_head list;
+
+ /* used for all tx if the VLAN is configured to 4-addr mode */
+ struct sta_info *sta;
};
struct mesh_stats {
@@ -312,6 +312,8 @@ struct ieee80211_if_managed {
} mfp; /* management frame protection */
int wmm_last_param_set;
+
+ u8 use_4addr;
};
enum ieee80211_ibss_request {
@@ -353,6 +355,7 @@ struct ieee80211_if_mesh {
struct work_struct work;
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
+ struct timer_list mesh_path_root_timer;
struct sk_buff_head skb_queue;
unsigned long timers_running;
@@ -362,23 +365,23 @@ struct ieee80211_if_mesh {
u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
size_t mesh_id_len;
/* Active Path Selection Protocol Identifier */
- u8 mesh_pp_id[4];
+ u8 mesh_pp_id;
/* Active Path Selection Metric Identifier */
- u8 mesh_pm_id[4];
+ u8 mesh_pm_id;
/* Congestion Control Mode Identifier */
- u8 mesh_cc_id[4];
+ u8 mesh_cc_id;
/* Synchronization Protocol Identifier */
- u8 mesh_sp_id[4];
+ u8 mesh_sp_id;
/* Authentication Protocol Identifier */
- u8 mesh_auth_id[4];
- /* Local mesh Destination Sequence Number */
- u32 dsn;
+ u8 mesh_auth_id;
+ /* Local mesh Sequence Number */
+ u32 sn;
/* Last used PREQ ID */
u32 preq_id;
atomic_t mpaths;
- /* Timestamp of last DSN update */
- unsigned long last_dsn_update;
- /* Timestamp of last DSN sent */
+ /* Timestamp of last SN update */
+ unsigned long last_sn_update;
+ /* Timestamp of last SN sent */
unsigned long last_preq;
struct mesh_rmc *rmc;
spinlock_t mesh_preq_queue_lock;
@@ -471,74 +474,11 @@ struct ieee80211_sub_if_data {
} u;
#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *debugfsdir;
- union {
- struct {
- struct dentry *drop_unencrypted;
- struct dentry *bssid;
- struct dentry *aid;
- struct dentry *capab;
- struct dentry *force_unicast_rateidx;
- struct dentry *max_ratectrl_rateidx;
- } sta;
- struct {
- struct dentry *drop_unencrypted;
- struct dentry *num_sta_ps;
- struct dentry *dtim_count;
- struct dentry *force_unicast_rateidx;
- struct dentry *max_ratectrl_rateidx;
- struct dentry *num_buffered_multicast;
- } ap;
- struct {
- struct dentry *drop_unencrypted;
- struct dentry *peer;
- struct dentry *force_unicast_rateidx;
- struct dentry *max_ratectrl_rateidx;
- } wds;
- struct {
- struct dentry *drop_unencrypted;
- struct dentry *force_unicast_rateidx;
- struct dentry *max_ratectrl_rateidx;
- } vlan;
- struct {
- struct dentry *mode;
- } monitor;
- } debugfs;
struct {
+ struct dentry *dir;
struct dentry *default_key;
struct dentry *default_mgmt_key;
- } common_debugfs;
-
-#ifdef CONFIG_MAC80211_MESH
- struct dentry *mesh_stats_dir;
- struct {
- struct dentry *fwded_mcast;
- struct dentry *fwded_unicast;
- struct dentry *fwded_frames;
- struct dentry *dropped_frames_ttl;
- struct dentry *dropped_frames_no_route;
- struct dentry *estab_plinks;
- struct timer_list mesh_path_timer;
- } mesh_stats;
-
- struct dentry *mesh_config_dir;
- struct {
- struct dentry *dot11MeshRetryTimeout;
- struct dentry *dot11MeshConfirmTimeout;
- struct dentry *dot11MeshHoldingTimeout;
- struct dentry *dot11MeshMaxRetries;
- struct dentry *dot11MeshTTL;
- struct dentry *auto_open_plinks;
- struct dentry *dot11MeshMaxPeerLinks;
- struct dentry *dot11MeshHWMPactivePathTimeout;
- struct dentry *dot11MeshHWMPpreqMinInterval;
- struct dentry *dot11MeshHWMPnetDiameterTraversalTime;
- struct dentry *dot11MeshHWMPmaxPREQretries;
- struct dentry *path_refresh_time;
- struct dentry *min_discovery_timeout;
- } mesh_config;
-#endif
-
+ } debugfs;
#endif
/* must be last, dynamically sized area in this! */
struct ieee80211_vif vif;
@@ -639,7 +579,6 @@ struct ieee80211_local {
/* number of interfaces with corresponding FIF_ flags */
int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
unsigned int filter_flags; /* FIF_* */
- struct iw_statistics wstats;
/* protects the aggregated multicast list and filter calls */
spinlock_t filter_lock;
@@ -662,6 +601,14 @@ struct ieee80211_local {
bool suspended;
/*
+ * Resuming is true while suspended, but when we're reprogramming the
+ * hardware -- at that time it's allowed to use ieee80211_queue_work()
+ * again even though some other parts of the stack are still suspended
+ * and we still drop received frames to avoid waking the stack.
+ */
+ bool resuming;
+
+ /*
* quiescing is true during the suspend process _only_ to
* ease timer cancelling etc.
*/
@@ -730,10 +677,9 @@ struct ieee80211_local {
unsigned long scanning;
struct cfg80211_ssid scan_ssid;
struct cfg80211_scan_request *int_scan_req;
- struct cfg80211_scan_request *scan_req;
+ struct cfg80211_scan_request *scan_req, *hw_scan_req;
struct ieee80211_channel *scan_channel;
- const u8 *orig_ies;
- int orig_ies_len;
+ enum ieee80211_band hw_scan_band;
int scan_channel_idx;
int scan_ies_len;
@@ -818,53 +764,6 @@ struct ieee80211_local {
#ifdef CONFIG_MAC80211_DEBUGFS
struct local_debugfsdentries {
struct dentry *rcdir;
- struct dentry *rcname;
- struct dentry *frequency;
- struct dentry *total_ps_buffered;
- struct dentry *wep_iv;
- struct dentry *tsf;
- struct dentry *queues;
- struct dentry *reset;
- struct dentry *noack;
- struct dentry *statistics;
- struct local_debugfsdentries_statsdentries {
- struct dentry *transmitted_fragment_count;
- struct dentry *multicast_transmitted_frame_count;
- struct dentry *failed_count;
- struct dentry *retry_count;
- struct dentry *multiple_retry_count;
- struct dentry *frame_duplicate_count;
- struct dentry *received_fragment_count;
- struct dentry *multicast_received_frame_count;
- struct dentry *transmitted_frame_count;
- struct dentry *wep_undecryptable_count;
- struct dentry *num_scans;
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
- struct dentry *tx_handlers_drop;
- struct dentry *tx_handlers_queued;
- struct dentry *tx_handlers_drop_unencrypted;
- struct dentry *tx_handlers_drop_fragment;
- struct dentry *tx_handlers_drop_wep;
- struct dentry *tx_handlers_drop_not_assoc;
- struct dentry *tx_handlers_drop_unauth_port;
- struct dentry *rx_handlers_drop;
- struct dentry *rx_handlers_queued;
- struct dentry *rx_handlers_drop_nullfunc;
- struct dentry *rx_handlers_drop_defrag;
- struct dentry *rx_handlers_drop_short;
- struct dentry *rx_handlers_drop_passive_scan;
- struct dentry *tx_expand_skb_head;
- struct dentry *tx_expand_skb_head_cloned;
- struct dentry *rx_expand_skb_head;
- struct dentry *rx_expand_skb_head2;
- struct dentry *rx_handlers_fragments;
- struct dentry *tx_status_drop;
-#endif
- struct dentry *dot11ACKFailureCount;
- struct dentry *dot11RTSFailureCount;
- struct dentry *dot11FCSErrorCount;
- struct dentry *dot11RTSSuccessCount;
- } stats;
struct dentry *stations;
struct dentry *keys;
} debugfs;
@@ -877,8 +776,9 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
return netdev_priv(dev);
}
-/* this struct represents 802.11n's RA/TID combination */
+/* this struct represents 802.11n's RA/TID combination along with our vif */
struct ieee80211_ra_tid {
+ struct ieee80211_vif *vif;
u8 ra[ETH_ALEN];
u16 tid;
};
@@ -905,12 +805,13 @@ struct ieee802_11_elems {
u8 *wmm_param;
struct ieee80211_ht_cap *ht_cap_elem;
struct ieee80211_ht_info *ht_info_elem;
- u8 *mesh_config;
+ struct ieee80211_meshconf_ie *mesh_config;
u8 *mesh_id;
u8 *peer_link;
u8 *preq;
u8 *prep;
u8 *perr;
+ struct ieee80211_rann_ie *rann;
u8 *ch_switch_elem;
u8 *country_elem;
u8 *pwr_constr_elem;
@@ -932,7 +833,6 @@ struct ieee802_11_elems {
u8 ext_supp_rates_len;
u8 wmm_info_len;
u8 wmm_param_len;
- u8 mesh_config_len;
u8 mesh_id_len;
u8 peer_link_len;
u8 preq_len;
@@ -1055,6 +955,18 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
+/*
+ * radiotap header for status frames
+ */
+struct ieee80211_tx_status_rtap_hdr {
+ struct ieee80211_radiotap_header hdr;
+ u8 rate;
+ u8 padding_for_rate;
+ __le16 tx_flags;
+ u8 data_retries;
+} __attribute__ ((packed));
+
+
/* HT */
void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
struct ieee80211_ht_cap *ht_cap_ie,
@@ -1083,6 +995,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
enum ieee80211_back_parties initiator);
+int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
+ enum ieee80211_back_parties initiator);
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1122,8 +1036,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
struct ieee80211_hdr *hdr, const u8 *tsc,
gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- int encrypt);
+void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee802_11_parse_elems(u8 *start, size_t len,
struct ieee802_11_elems *elems);
u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
@@ -1160,7 +1073,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u8 *extra, size_t extra_len, const u8 *bssid,
const u8 *key, u8 key_len, u8 key_idx);
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
- const u8 *ie, size_t ie_len);
+ const u8 *ie, size_t ie_len,
+ enum ieee80211_band band);
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 14f10eb91c5c..1bf12a26b45e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -214,8 +214,8 @@ static int ieee80211_open(struct net_device *dev)
/* must be before the call to ieee80211_configure_filter */
local->monitors++;
if (local->monitors == 1) {
- local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
- hw_reconf_flags |= IEEE80211_CONF_CHANGE_RADIOTAP;
+ local->hw.conf.flags |= IEEE80211_CONF_MONITOR;
+ hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
}
if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
@@ -435,8 +435,8 @@ static int ieee80211_stop(struct net_device *dev)
local->monitors--;
if (local->monitors == 0) {
- local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
- hw_reconf_flags |= IEEE80211_CONF_CHANGE_RADIOTAP;
+ local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
+ hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
}
if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
@@ -752,6 +752,8 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
ieee80211_mandatory_rates(sdata->local,
sdata->local->hw.conf.channel->band);
sdata->drop_unencrypted = 0;
+ if (type == NL80211_IFTYPE_STATION)
+ sdata->u.mgd.use_4addr = false;
return 0;
}
@@ -809,6 +811,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
/* setup type-dependent data */
ieee80211_setup_sdata(sdata, type);
+ if (params) {
+ ndev->ieee80211_ptr->use_4addr = params->use_4addr;
+ if (type == NL80211_IFTYPE_STATION)
+ sdata->u.mgd.use_4addr = params->use_4addr;
+ }
+
ret = register_netdevice(ndev);
if (ret)
goto fail;
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 9572e00f532c..a49f93b79e92 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -118,18 +118,6 @@ struct ieee80211_key {
struct {
struct dentry *stalink;
struct dentry *dir;
- struct dentry *keylen;
- struct dentry *flags;
- struct dentry *keyidx;
- struct dentry *hw_key_idx;
- struct dentry *tx_rx_count;
- struct dentry *algorithm;
- struct dentry *tx_spec;
- struct dentry *rx_spec;
- struct dentry *replays;
- struct dentry *icverrors;
- struct dentry *key;
- struct dentry *ifindex;
int cnt;
} debugfs;
#endif
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 797f53942e5f..8116d1a96a4a 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -9,7 +9,6 @@
*/
#include <net/mac80211.h>
-#include <net/ieee80211_radiotap.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -30,26 +29,11 @@
#include "rate.h"
#include "mesh.h"
#include "wep.h"
-#include "wme.h"
-#include "aes_ccm.h"
#include "led.h"
#include "cfg.h"
#include "debugfs.h"
#include "debugfs_netdev.h"
-/*
- * For seeing transmitted packets on monitor interfaces
- * we have a radiotap header too.
- */
-struct ieee80211_tx_status_rtap_hdr {
- struct ieee80211_radiotap_header hdr;
- u8 rate;
- u8 padding_for_rate;
- __le16 tx_flags;
- u8 data_retries;
-} __attribute__ ((packed));
-
-
void ieee80211_configure_filter(struct ieee80211_local *local)
{
u64 mc;
@@ -253,28 +237,6 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
BSS_CHANGED_ERP_SLOT;
}
-void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
- struct sk_buff *skb)
-{
- struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int tmp;
-
- skb->pkt_type = IEEE80211_TX_STATUS_MSG;
- skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
- &local->skb_queue : &local->skb_queue_unreliable, skb);
- tmp = skb_queue_len(&local->skb_queue) +
- skb_queue_len(&local->skb_queue_unreliable);
- while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
- (skb = skb_dequeue(&local->skb_queue_unreliable))) {
- dev_kfree_skb_irq(skb);
- tmp--;
- I802_DEBUG_INC(local->tx_status_drop);
- }
- tasklet_schedule(&local->tasklet);
-}
-EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
-
static void ieee80211_tasklet_handler(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
@@ -296,14 +258,14 @@ static void ieee80211_tasklet_handler(unsigned long data)
break;
case IEEE80211_DELBA_MSG:
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
- ieee80211_stop_tx_ba_cb(local_to_hw(local),
- ra_tid->ra, ra_tid->tid);
+ ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra,
+ ra_tid->tid);
dev_kfree_skb(skb);
break;
case IEEE80211_ADDBA_MSG:
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
- ieee80211_start_tx_ba_cb(local_to_hw(local),
- ra_tid->ra, ra_tid->tid);
+ ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra,
+ ra_tid->tid);
dev_kfree_skb(skb);
break ;
default:
@@ -315,299 +277,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
}
}
-static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
- struct sta_info *sta,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- /*
- * XXX: This is temporary!
- *
- * The problem here is that when we get here, the driver will
- * quite likely have pretty much overwritten info->control by
- * using info->driver_data or info->rate_driver_data. Thus,
- * when passing out the frame to the driver again, we would be
- * passing completely bogus data since the driver would then
- * expect a properly filled info->control. In mac80211 itself
- * the same problem occurs, since we need info->control.vif
- * internally.
- *
- * To fix this, we should send the frame through TX processing
- * again. However, it's not that simple, since the frame will
- * have been software-encrypted (if applicable) already, and
- * encrypting it again doesn't do much good. So to properly do
- * that, we not only have to skip the actual 'raw' encryption
- * (key selection etc. still has to be done!) but also the
- * sequence number assignment since that impacts the crypto
- * encapsulation, of course.
- *
- * Hence, for now, fix the bug by just dropping the frame.
- */
- goto drop;
-
- sta->tx_filtered_count++;
-
- /*
- * Clear the TX filter mask for this STA when sending the next
- * packet. If the STA went to power save mode, this will happen
- * when it wakes up for the next time.
- */
- set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
-
- /*
- * This code races in the following way:
- *
- * (1) STA sends frame indicating it will go to sleep and does so
- * (2) hardware/firmware adds STA to filter list, passes frame up
- * (3) hardware/firmware processes TX fifo and suppresses a frame
- * (4) we get TX status before having processed the frame and
- * knowing that the STA has gone to sleep.
- *
- * This is actually quite unlikely even when both those events are
- * processed from interrupts coming in quickly after one another or
- * even at the same time because we queue both TX status events and
- * RX frames to be processed by a tasklet and process them in the
- * same order that they were received or TX status last. Hence, there
- * is no race as long as the frame RX is processed before the next TX
- * status, which drivers can ensure, see below.
- *
- * Note that this can only happen if the hardware or firmware can
- * actually add STAs to the filter list, if this is done by the
- * driver in response to set_tim() (which will only reduce the race
- * this whole filtering tries to solve, not completely solve it)
- * this situation cannot happen.
- *
- * To completely solve this race drivers need to make sure that they
- * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
- * functions and
- * (b) always process RX events before TX status events if ordering
- * can be unknown, for example with different interrupt status
- * bits.
- */
- if (test_sta_flags(sta, WLAN_STA_PS) &&
- skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
- skb_queue_tail(&sta->tx_filtered, skb);
- return;
- }
-
- if (!test_sta_flags(sta, WLAN_STA_PS) &&
- !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
- /* Software retry the packet once */
- info->flags |= IEEE80211_TX_INTFL_RETRIED;
- ieee80211_add_pending_skb(local, skb);
- return;
- }
-
- drop:
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: dropped TX filtered frame, "
- "queue_len=%d PS=%d @%lu\n",
- wiphy_name(local->hw.wiphy),
- skb_queue_len(&sta->tx_filtered),
- !!test_sta_flags(sta, WLAN_STA_PS), jiffies);
-#endif
- dev_kfree_skb(skb);
-}
-
-void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct sk_buff *skb2;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u16 frag, type;
- __le16 fc;
- struct ieee80211_supported_band *sband;
- struct ieee80211_tx_status_rtap_hdr *rthdr;
- struct ieee80211_sub_if_data *sdata;
- struct net_device *prev_dev = NULL;
- struct sta_info *sta;
- int retry_count = -1, i;
-
- for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
- /* the HW cannot have attempted that rate */
- if (i >= hw->max_rates) {
- info->status.rates[i].idx = -1;
- info->status.rates[i].count = 0;
- }
-
- retry_count += info->status.rates[i].count;
- }
- if (retry_count < 0)
- retry_count = 0;
-
- rcu_read_lock();
-
- sband = local->hw.wiphy->bands[info->band];
-
- sta = sta_info_get(local, hdr->addr1);
-
- if (sta) {
- if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
- test_sta_flags(sta, WLAN_STA_PS)) {
- /*
- * The STA is in power save mode, so assume
- * that this TX packet failed because of that.
- */
- ieee80211_handle_filtered_frame(local, sta, skb);
- rcu_read_unlock();
- return;
- }
-
- fc = hdr->frame_control;
-
- if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
- (ieee80211_is_data_qos(fc))) {
- u16 tid, ssn;
- u8 *qc;
-
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
- & IEEE80211_SCTL_SEQ);
- ieee80211_send_bar(sta->sdata, hdr->addr1,
- tid, ssn);
- }
-
- if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
- ieee80211_handle_filtered_frame(local, sta, skb);
- rcu_read_unlock();
- return;
- } else {
- if (!(info->flags & IEEE80211_TX_STAT_ACK))
- sta->tx_retry_failed++;
- sta->tx_retry_count += retry_count;
- }
-
- rate_control_tx_status(local, sband, sta, skb);
- if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- ieee80211s_update_metric(local, sta, skb);
- }
-
- rcu_read_unlock();
-
- ieee80211_led_tx(local, 0);
-
- /* SNMP counters
- * Fragments are passed to low-level drivers as separate skbs, so these
- * are actually fragments, not frames. Update frame counters only for
- * the first fragment of the frame. */
-
- frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
- type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
-
- if (info->flags & IEEE80211_TX_STAT_ACK) {
- if (frag == 0) {
- local->dot11TransmittedFrameCount++;
- if (is_multicast_ether_addr(hdr->addr1))
- local->dot11MulticastTransmittedFrameCount++;
- if (retry_count > 0)
- local->dot11RetryCount++;
- if (retry_count > 1)
- local->dot11MultipleRetryCount++;
- }
-
- /* This counter shall be incremented for an acknowledged MPDU
- * with an individual address in the address 1 field or an MPDU
- * with a multicast address in the address 1 field of type Data
- * or Management. */
- if (!is_multicast_ether_addr(hdr->addr1) ||
- type == IEEE80211_FTYPE_DATA ||
- type == IEEE80211_FTYPE_MGMT)
- local->dot11TransmittedFragmentCount++;
- } else {
- if (frag == 0)
- local->dot11FailedCount++;
- }
-
- /* this was a transmitted frame, but now we want to reuse it */
- skb_orphan(skb);
-
- /*
- * This is a bit racy but we can avoid a lot of work
- * with this test...
- */
- if (!local->monitors && !local->cooked_mntrs) {
- dev_kfree_skb(skb);
- return;
- }
-
- /* send frame to monitor interfaces now */
-
- if (skb_headroom(skb) < sizeof(*rthdr)) {
- printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
- dev_kfree_skb(skb);
- return;
- }
-
- rthdr = (struct ieee80211_tx_status_rtap_hdr *)
- skb_push(skb, sizeof(*rthdr));
-
- memset(rthdr, 0, sizeof(*rthdr));
- rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
- rthdr->hdr.it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
- (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
- (1 << IEEE80211_RADIOTAP_RATE));
-
- if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
- !is_multicast_ether_addr(hdr->addr1))
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
-
- /*
- * XXX: Once radiotap gets the bitmap reset thing the vendor
- * extensions proposal contains, we can actually report
- * the whole set of tries we did.
- */
- if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
- else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
- if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
- rthdr->rate = sband->bitrates[
- info->status.rates[0].idx].bitrate / 5;
-
- /* for now report the total retry_count */
- rthdr->data_retries = retry_count;
-
- /* XXX: is this sufficient for BPF? */
- skb_set_mac_header(skb, 0);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->pkt_type = PACKET_OTHERHOST;
- skb->protocol = htons(ETH_P_802_2);
- memset(skb->cb, 0, sizeof(skb->cb));
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
- if (!netif_running(sdata->dev))
- continue;
-
- if (prev_dev) {
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2) {
- skb2->dev = prev_dev;
- netif_rx(skb2);
- }
- }
-
- prev_dev = sdata->dev;
- }
- }
- if (prev_dev) {
- skb->dev = prev_dev;
- netif_rx(skb);
- skb = NULL;
- }
- rcu_read_unlock();
- dev_kfree_skb(skb);
-}
-EXPORT_SYMBOL(ieee80211_tx_status);
-
static void ieee80211_restart_work(struct work_struct *work)
{
struct ieee80211_local *local =
@@ -659,7 +328,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
if (!wiphy)
return NULL;
- wiphy->netnsok = true;
+ wiphy->flags |= WIPHY_FLAG_NETNS_OK |
+ WIPHY_FLAG_4ADDR_AP |
+ WIPHY_FLAG_4ADDR_STATION;
wiphy->privid = mac80211_wiphy_privid;
/* Yes, putting cfg80211_bss into ieee80211_bss is a hack */
@@ -901,6 +572,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
i++;
}
}
+ local->int_scan_req->n_channels = i;
local->network_latency_notifier.notifier_call =
ieee80211_max_network_latency;
@@ -923,7 +595,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
fail_wep:
sta_info_stop(local);
fail_sta_info:
- debugfs_hw_del(local);
destroy_workqueue(local->workqueue);
fail_workqueue:
wiphy_unregister(local->hw.wiphy);
@@ -959,10 +630,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
ieee80211_clear_tx_pending(local);
sta_info_stop(local);
rate_control_deinitialize(local);
- debugfs_hw_del(local);
- if (skb_queue_len(&local->skb_queue)
- || skb_queue_len(&local->skb_queue_unreliable))
+ if (skb_queue_len(&local->skb_queue) ||
+ skb_queue_len(&local->skb_queue_unreliable))
printk(KERN_WARNING "%s: skb_queue not empty\n",
wiphy_name(local->hw.wiphy));
skb_queue_purge(&local->skb_queue);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 9a733890eb47..c0fe46493f71 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 open80211s Ltd.
+ * Copyright (c) 2008, 2009 open80211s Ltd.
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*
@@ -14,18 +14,14 @@
#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
+#define IEEE80211_MESH_RANN_INTERVAL (1 * HZ)
-#define PP_OFFSET 1 /* Path Selection Protocol */
-#define PM_OFFSET 5 /* Path Selection Metric */
-#define CC_OFFSET 9 /* Congestion Control Mode */
-#define SP_OFFSET 13 /* Synchronization Protocol */
-#define AUTH_OFFSET 17 /* Authentication Protocol */
-#define CAPAB_OFFSET 22
-#define CAPAB_ACCEPT_PLINKS 0x80
-#define CAPAB_FORWARDING 0x10
+#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
+#define MESHCONF_CAPAB_FORWARDING 0x08
#define TMR_RUNNING_HK 0
#define TMR_RUNNING_MP 1
+#define TMR_RUNNING_MPR 2
int mesh_allocated;
static struct kmem_cache *rm_cache;
@@ -85,11 +81,11 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
*/
if (ifmsh->mesh_id_len == ie->mesh_id_len &&
memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
- memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
- memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
- memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0 &&
- memcmp(ifmsh->mesh_sp_id, ie->mesh_config + SP_OFFSET, 4) == 0 &&
- memcmp(ifmsh->mesh_auth_id, ie->mesh_config + AUTH_OFFSET, 4) == 0)
+ (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) &&
+ (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) &&
+ (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) &&
+ (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) &&
+ (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))
return true;
return false;
@@ -102,7 +98,8 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
*/
bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
{
- return (*(ie->mesh_config + CAPAB_OFFSET) & CAPAB_ACCEPT_PLINKS) != 0;
+ return (ie->mesh_config->meshconf_cap &
+ MESHCONF_CAPAB_ACCEPT_PLINKS) != 0;
}
/**
@@ -128,18 +125,11 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
{
- u8 oui[3] = {0x00, 0x0F, 0xAC};
-
- memcpy(sta->mesh_pp_id, oui, sizeof(oui));
- memcpy(sta->mesh_pm_id, oui, sizeof(oui));
- memcpy(sta->mesh_cc_id, oui, sizeof(oui));
- memcpy(sta->mesh_sp_id, oui, sizeof(oui));
- memcpy(sta->mesh_auth_id, oui, sizeof(oui));
- sta->mesh_pp_id[sizeof(oui)] = 0;
- sta->mesh_pm_id[sizeof(oui)] = 0;
- sta->mesh_cc_id[sizeof(oui)] = 0xff;
- sta->mesh_sp_id[sizeof(oui)] = 0xff;
- sta->mesh_auth_id[sizeof(oui)] = 0x0;
+ sta->mesh_pp_id = 0; /* HWMP */
+ sta->mesh_pm_id = 0; /* Airtime */
+ sta->mesh_cc_id = 0; /* Disabled */
+ sta->mesh_sp_id = 0; /* Neighbor Offset */
+ sta->mesh_auth_id = 0; /* Disabled */
}
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -205,8 +195,8 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
list_del(&p->list);
kmem_cache_free(rm_cache, p);
--entries;
- } else if ((seqnum == p->seqnum)
- && (memcmp(sa, p->sa, ETH_ALEN) == 0))
+ } else if ((seqnum == p->seqnum) &&
+ (memcmp(sa, p->sa, ETH_ALEN) == 0))
return -1;
}
@@ -228,6 +218,7 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
struct ieee80211_supported_band *sband;
u8 *pos;
int len, i, rate;
+ u8 neighbors;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
len = sband->n_bitrates;
@@ -251,46 +242,49 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
}
}
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ pos = skb_put(skb, 2 + 1);
+ *pos++ = WLAN_EID_DS_PARAMS;
+ *pos++ = 1;
+ *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+ }
+
pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
*pos++ = WLAN_EID_MESH_ID;
*pos++ = sdata->u.mesh.mesh_id_len;
if (sdata->u.mesh.mesh_id_len)
memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
- pos = skb_put(skb, 2 + IEEE80211_MESH_CONFIG_LEN);
+ pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie));
*pos++ = WLAN_EID_MESH_CONFIG;
- *pos++ = IEEE80211_MESH_CONFIG_LEN;
- /* Version */
- *pos++ = 1;
+ *pos++ = sizeof(struct ieee80211_meshconf_ie);
/* Active path selection protocol ID */
- memcpy(pos, sdata->u.mesh.mesh_pp_id, 4);
- pos += 4;
+ *pos++ = sdata->u.mesh.mesh_pp_id;
/* Active path selection metric ID */
- memcpy(pos, sdata->u.mesh.mesh_pm_id, 4);
- pos += 4;
+ *pos++ = sdata->u.mesh.mesh_pm_id;
/* Congestion control mode identifier */
- memcpy(pos, sdata->u.mesh.mesh_cc_id, 4);
- pos += 4;
+ *pos++ = sdata->u.mesh.mesh_cc_id;
/* Synchronization protocol identifier */
- memcpy(pos, sdata->u.mesh.mesh_sp_id, 4);
- pos += 4;
+ *pos++ = sdata->u.mesh.mesh_sp_id;
/* Authentication Protocol identifier */
- memcpy(pos, sdata->u.mesh.mesh_auth_id, 4);
- pos += 4;
+ *pos++ = sdata->u.mesh.mesh_auth_id;
- /* Mesh Formation Info */
- memset(pos, 0x00, 1);
- pos += 1;
+ /* Mesh Formation Info - number of neighbors */
+ neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
+ /* Number of neighbor mesh STAs or 15 whichever is smaller */
+ neighbors = (neighbors > 15) ? 15 : neighbors;
+ *pos++ = neighbors << 1;
/* Mesh capability */
sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
- *pos = CAPAB_FORWARDING;
- *pos++ |= sdata->u.mesh.accepting_plinks ? CAPAB_ACCEPT_PLINKS : 0x00;
+ *pos = MESHCONF_CAPAB_FORWARDING;
+ *pos++ |= sdata->u.mesh.accepting_plinks ?
+ MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
*pos++ = 0x00;
return;
@@ -355,6 +349,34 @@ static void ieee80211_mesh_path_timer(unsigned long data)
ieee80211_queue_work(&local->hw, &ifmsh->work);
}
+static void ieee80211_mesh_path_root_timer(unsigned long data)
+{
+ struct ieee80211_sub_if_data *sdata =
+ (struct ieee80211_sub_if_data *) data;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
+
+ set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
+
+ if (local->quiescing) {
+ set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running);
+ return;
+ }
+
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
+}
+
+void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
+{
+ if (ifmsh->mshcfg.dot11MeshHWMPRootMode)
+ set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
+ else {
+ clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
+ /* stop running timer */
+ del_timer_sync(&ifmsh->mesh_path_root_timer);
+ }
+}
+
/**
* ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame
* @hdr: 802.11 frame header
@@ -365,8 +387,9 @@ static void ieee80211_mesh_path_timer(unsigned long data)
*
* Return the length of the 802.11 (does not include a mesh control header)
*/
-int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, char
- *meshda, char *meshsa) {
+int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
+ const u8 *meshda, const u8 *meshsa)
+{
if (is_multicast_ether_addr(meshda)) {
*fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA TA SA */
@@ -448,6 +471,15 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
}
+static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+
+ mesh_path_tx_root_frame(sdata);
+ mod_timer(&ifmsh->mesh_path_root_timer,
+ round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL));
+}
+
#ifdef CONFIG_PM
void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
{
@@ -462,6 +494,8 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
if (del_timer_sync(&ifmsh->mesh_path_timer))
set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
+ if (del_timer_sync(&ifmsh->mesh_path_root_timer))
+ set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running);
}
void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
@@ -472,6 +506,9 @@ void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
add_timer(&ifmsh->housekeeping_timer);
if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running))
add_timer(&ifmsh->mesh_path_timer);
+ if (test_and_clear_bit(TMR_RUNNING_MPR, &ifmsh->timers_running))
+ add_timer(&ifmsh->mesh_path_root_timer);
+ ieee80211_mesh_root_setup(ifmsh);
}
#endif
@@ -481,6 +518,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
+ ieee80211_mesh_root_setup(ifmsh);
ieee80211_queue_work(&local->hw, &ifmsh->work);
sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
@@ -491,6 +529,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
{
del_timer_sync(&sdata->u.mesh.housekeeping_timer);
+ del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
/*
* If the timer fired while we waited for it, it will have
* requeued the work. Now the work will be running again
@@ -561,7 +600,7 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
switch (mgmt->u.action.category) {
- case PLINK_CATEGORY:
+ case MESH_PLINK_CATEGORY:
mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
break;
case MESH_PATH_SEL_CATEGORY:
@@ -628,6 +667,9 @@ static void ieee80211_mesh_work(struct work_struct *work)
if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
ieee80211_mesh_housekeeping(sdata, ifmsh);
+
+ if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags))
+ ieee80211_mesh_rootpath(sdata);
}
void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
@@ -673,7 +715,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
MESH_MIN_DISCOVERY_TIMEOUT;
ifmsh->accepting_plinks = true;
ifmsh->preq_id = 0;
- ifmsh->dsn = 0;
+ ifmsh->sn = 0;
atomic_set(&ifmsh->mpaths, 0);
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
@@ -684,6 +726,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
setup_timer(&ifmsh->mesh_path_timer,
ieee80211_mesh_path_timer,
(unsigned long) sdata);
+ setup_timer(&ifmsh->mesh_path_root_timer,
+ ieee80211_mesh_path_root_timer,
+ (unsigned long) sdata);
INIT_LIST_HEAD(&ifmsh->preq_queue.list);
spin_lock_init(&ifmsh->mesh_preq_queue_lock);
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index dd1c19319f0a..31e102541869 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 open80211s Ltd.
+ * Copyright (c) 2008, 2009 open80211s Ltd.
* Authors: Luis Carlos Cobo <luisca@cozybit.com>
* Javier Cardona <javier@cozybit.com>
*
@@ -26,7 +26,7 @@
*
* @MESH_PATH_ACTIVE: the mesh path can be used for forwarding
* @MESH_PATH_RESOLVING: the discovery process is running for this mesh path
- * @MESH_PATH_DSN_VALID: the mesh path contains a valid destination sequence
+ * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence
* number
* @MESH_PATH_FIXED: the mesh path has been manually set and should not be
* modified
@@ -38,7 +38,7 @@
enum mesh_path_flags {
MESH_PATH_ACTIVE = BIT(0),
MESH_PATH_RESOLVING = BIT(1),
- MESH_PATH_DSN_VALID = BIT(2),
+ MESH_PATH_SN_VALID = BIT(2),
MESH_PATH_FIXED = BIT(3),
MESH_PATH_RESOLVED = BIT(4),
};
@@ -53,11 +53,13 @@ enum mesh_path_flags {
* to grow.
* @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
* grow
+ * @MESH_WORK_ROOT: the mesh root station needs to send a frame
*/
enum mesh_deferred_task_flags {
MESH_WORK_HOUSEKEEPING,
MESH_WORK_GROW_MPATH_TABLE,
MESH_WORK_GROW_MPP_TABLE,
+ MESH_WORK_ROOT,
};
/**
@@ -70,7 +72,7 @@ enum mesh_deferred_task_flags {
* @timer: mesh path discovery timer
* @frame_queue: pending queue for frames sent to this destination while the
* path is unresolved
- * @dsn: destination sequence number of the destination
+ * @sn: target sequence number
* @metric: current metric to this destination
* @hop_count: hops to destination
* @exp_time: in jiffies, when the path will expire or when it expired
@@ -94,7 +96,7 @@ struct mesh_path {
struct timer_list timer;
struct sk_buff_head frame_queue;
struct rcu_head rcu;
- u32 dsn;
+ u32 sn;
u32 metric;
u8 hop_count;
unsigned long exp_time;
@@ -174,7 +176,7 @@ struct mesh_rmc {
#define MESH_CFG_CMP_LEN (IEEE80211_MESH_CONFIG_LEN - 2)
/* Default values, timeouts in ms */
-#define MESH_TTL 5
+#define MESH_TTL 31
#define MESH_MAX_RETR 3
#define MESH_RET_T 100
#define MESH_CONF_T 100
@@ -206,13 +208,19 @@ struct mesh_rmc {
#define MESH_MAX_MPATHS 1024
/* Pending ANA approval */
-#define PLINK_CATEGORY 30
+#define MESH_PLINK_CATEGORY 30
#define MESH_PATH_SEL_CATEGORY 32
+#define MESH_PATH_SEL_ACTION 0
+
+/* PERR reason codes */
+#define PEER_RCODE_UNSPECIFIED 11
+#define PERR_RCODE_NO_ROUTE 12
+#define PERR_RCODE_DEST_UNREACH 13
/* Public interfaces */
/* Various */
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
- char *da, char *sa);
+ const u8 *da, const u8 *sa);
int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
struct ieee80211_sub_if_data *sdata, char *addr4,
char *addr5, char *addr6);
@@ -234,6 +242,7 @@ ieee80211_rx_result
ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
+void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
/* Mesh paths */
int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -274,8 +283,8 @@ void mesh_mpp_table_grow(void);
u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl);
/* Mesh paths */
-int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra,
- struct ieee80211_sub_if_data *sdata);
+int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode,
+ const u8 *ra, struct ieee80211_sub_if_data *sdata);
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
void mesh_path_flush_pending(struct mesh_path *mpath);
void mesh_path_tx_pending(struct mesh_path *mpath);
@@ -288,6 +297,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
+void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
extern int mesh_paths_generation;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index e12a786e26b8..833b2f3670c5 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 open80211s Ltd.
+ * Copyright (c) 2008, 2009 open80211s Ltd.
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -9,6 +9,12 @@
#include "mesh.h"
+#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
+#define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args)
+#else
+#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
+#endif
+
#define TEST_FRAME_LEN 8192
#define MAX_METRIC 0xffffffff
#define ARITH_SHIFT 8
@@ -21,6 +27,12 @@
#define MP_F_DO 0x1
/* Reply and forward */
#define MP_F_RF 0x2
+/* Unknown Sequence Number */
+#define MP_F_USN 0x01
+/* Reason code Present */
+#define MP_F_RCODE 0x02
+
+static void mesh_queue_preq(struct mesh_path *, u8);
static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
{
@@ -29,6 +41,13 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
return get_unaligned_le32(preq_elem + offset);
}
+static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
+{
+ if (ae)
+ offset += 6;
+ return get_unaligned_le16(preq_elem + offset);
+}
+
/* HWMP IE processing macros */
#define AE_F (1<<6)
#define AE_F_SET(x) (*x & AE_F)
@@ -37,30 +56,33 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
#define PREQ_IE_TTL(x) (*(x + 2))
#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
#define PREQ_IE_ORIG_ADDR(x) (x + 7)
-#define PREQ_IE_ORIG_DSN(x) u32_field_get(x, 13, 0);
+#define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0);
#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x));
#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x));
-#define PREQ_IE_DST_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
-#define PREQ_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
-#define PREQ_IE_DST_DSN(x) u32_field_get(x, 33, AE_F_SET(x));
+#define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
+#define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
+#define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x));
#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
#define PREP_IE_ORIG_ADDR(x) (x + 3)
-#define PREP_IE_ORIG_DSN(x) u32_field_get(x, 9, 0);
+#define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0);
#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x));
#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x));
-#define PREP_IE_DST_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
-#define PREP_IE_DST_DSN(x) u32_field_get(x, 27, AE_F_SET(x));
+#define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
+#define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x));
-#define PERR_IE_DST_ADDR(x) (x + 2)
-#define PERR_IE_DST_DSN(x) u32_field_get(x, 8, 0);
+#define PERR_IE_TTL(x) (*(x))
+#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
+#define PERR_IE_TARGET_ADDR(x) (x + 3)
+#define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0);
+#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0);
#define MSEC_TO_TU(x) (x*1000/1024)
-#define DSN_GT(x, y) ((long) (y) - (long) (x) < 0)
-#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
+#define SN_GT(x, y) ((long) (y) - (long) (x) < 0)
+#define SN_LT(x, y) ((long) (x) - (long) (y) < 0)
#define net_traversal_jiffies(s) \
msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -75,13 +97,17 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
enum mpath_frame_type {
MPATH_PREQ = 0,
MPATH_PREP,
- MPATH_PERR
+ MPATH_PERR,
+ MPATH_RANN
};
+static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
- u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
- __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
- __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata)
+ u8 *orig_addr, __le32 orig_sn, u8 target_flags, u8 *target,
+ __le32 target_sn, const u8 *da, u8 hop_count, u8 ttl,
+ __le32 lifetime, __le32 metric, __le32 preq_id,
+ struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
@@ -103,21 +129,30 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
- /* BSSID is left zeroed, wildcard value */
+ /* BSSID == SA */
+ memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
- mgmt->u.action.u.mesh_action.action_code = action;
+ mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
switch (action) {
case MPATH_PREQ:
+ mhwmp_dbg("sending PREQ to %pM\n", target);
ie_len = 37;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREQ;
break;
case MPATH_PREP:
+ mhwmp_dbg("sending PREP to %pM\n", target);
ie_len = 31;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREP;
break;
+ case MPATH_RANN:
+ mhwmp_dbg("sending RANN from %pM\n", orig_addr);
+ ie_len = sizeof(struct ieee80211_rann_ie);
+ pos = skb_put(skb, 2 + ie_len);
+ *pos++ = WLAN_EID_RANN;
+ break;
default:
kfree_skb(skb);
return -ENOTSUPP;
@@ -133,34 +168,40 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
}
memcpy(pos, orig_addr, ETH_ALEN);
pos += ETH_ALEN;
- memcpy(pos, &orig_dsn, 4);
- pos += 4;
- memcpy(pos, &lifetime, 4);
+ memcpy(pos, &orig_sn, 4);
pos += 4;
+ if (action != MPATH_RANN) {
+ memcpy(pos, &lifetime, 4);
+ pos += 4;
+ }
memcpy(pos, &metric, 4);
pos += 4;
if (action == MPATH_PREQ) {
/* destination count */
*pos++ = 1;
- *pos++ = dst_flags;
+ *pos++ = target_flags;
+ }
+ if (action != MPATH_RANN) {
+ memcpy(pos, target, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &target_sn, 4);
}
- memcpy(pos, dst, ETH_ALEN);
- pos += ETH_ALEN;
- memcpy(pos, &dst_dsn, 4);
- ieee80211_tx_skb(sdata, skb, 1);
+ ieee80211_tx_skb(sdata, skb);
return 0;
}
/**
* mesh_send_path error - Sends a PERR mesh management frame
*
- * @dst: broken destination
- * @dst_dsn: dsn of the broken destination
+ * @target: broken destination
+ * @target_sn: SN of the broken destination
+ * @target_rcode: reason code for this PERR
* @ra: node this frame is addressed to
*/
-int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
- struct ieee80211_sub_if_data *sdata)
+int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
+ __le16 target_rcode, const u8 *ra,
+ struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
@@ -184,20 +225,32 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
- mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
- ie_len = 12;
+ mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
+ ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PERR;
*pos++ = ie_len;
- /* mode flags, reserved */
- *pos++ = 0;
+ /* ttl */
+ *pos++ = MESH_TTL;
/* number of destinations */
*pos++ = 1;
- memcpy(pos, dst, ETH_ALEN);
+ /*
+ * flags bit, bit 1 is unset if we know the sequence number and
+ * bit 2 is set if we have a reason code
+ */
+ *pos = 0;
+ if (!target_sn)
+ *pos |= MP_F_USN;
+ if (target_rcode)
+ *pos |= MP_F_RCODE;
+ pos++;
+ memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
- memcpy(pos, &dst_dsn, 4);
+ memcpy(pos, &target_sn, 4);
+ pos += 4;
+ memcpy(pos, &target_rcode, 2);
- ieee80211_tx_skb(sdata, skb, 1);
+ ieee80211_tx_skb(sdata, skb);
return 0;
}
@@ -259,7 +312,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
* @hwmp_ie: hwmp information element (PREP or PREQ)
*
* This function updates the path routing information to the originator and the
- * transmitter of a HWMP PREQ or PREP fram.
+ * transmitter of a HWMP PREQ or PREP frame.
*
* Returns: metric to frame originator or 0 if the frame should not be further
* processed
@@ -269,18 +322,17 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
*/
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
- u8 *hwmp_ie)
+ u8 *hwmp_ie, enum mpath_frame_type action)
{
struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath;
struct sta_info *sta;
bool fresh_info;
u8 *orig_addr, *ta;
- u32 orig_dsn, orig_metric;
+ u32 orig_sn, orig_metric;
unsigned long orig_lifetime, exp_time;
u32 last_hop_metric, new_metric;
bool process = true;
- u8 action = mgmt->u.action.u.mesh_action.action_code;
rcu_read_lock();
sta = sta_info_get(local, mgmt->sa);
@@ -296,7 +348,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
switch (action) {
case MPATH_PREQ:
orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
- orig_dsn = PREQ_IE_ORIG_DSN(hwmp_ie);
+ orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
orig_metric = PREQ_IE_METRIC(hwmp_ie);
break;
@@ -309,7 +361,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
* information from both PREQ and PREP frames.
*/
orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
- orig_dsn = PREP_IE_ORIG_DSN(hwmp_ie);
+ orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
orig_metric = PREP_IE_METRIC(hwmp_ie);
break;
@@ -335,9 +387,9 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (mpath->flags & MESH_PATH_FIXED)
fresh_info = false;
else if ((mpath->flags & MESH_PATH_ACTIVE) &&
- (mpath->flags & MESH_PATH_DSN_VALID)) {
- if (DSN_GT(mpath->dsn, orig_dsn) ||
- (mpath->dsn == orig_dsn &&
+ (mpath->flags & MESH_PATH_SN_VALID)) {
+ if (SN_GT(mpath->sn, orig_sn) ||
+ (mpath->sn == orig_sn &&
action == MPATH_PREQ &&
new_metric > mpath->metric)) {
process = false;
@@ -356,9 +408,9 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (fresh_info) {
mesh_path_assign_nexthop(mpath, sta);
- mpath->flags |= MESH_PATH_DSN_VALID;
+ mpath->flags |= MESH_PATH_SN_VALID;
mpath->metric = new_metric;
- mpath->dsn = orig_dsn;
+ mpath->sn = orig_sn;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
mesh_path_activate(mpath);
@@ -397,7 +449,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (fresh_info) {
mesh_path_assign_nexthop(mpath, sta);
- mpath->flags &= ~MESH_PATH_DSN_VALID;
+ mpath->flags &= ~MESH_PATH_SN_VALID;
mpath->metric = last_hop_metric;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
@@ -419,44 +471,47 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath;
- u8 *dst_addr, *orig_addr;
- u8 dst_flags, ttl;
- u32 orig_dsn, dst_dsn, lifetime;
+ u8 *target_addr, *orig_addr;
+ u8 target_flags, ttl;
+ u32 orig_sn, target_sn, lifetime;
bool reply = false;
bool forward = true;
- /* Update destination DSN, if present */
- dst_addr = PREQ_IE_DST_ADDR(preq_elem);
+ /* Update target SN, if present */
+ target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
- dst_dsn = PREQ_IE_DST_DSN(preq_elem);
- orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
- dst_flags = PREQ_IE_DST_F(preq_elem);
+ target_sn = PREQ_IE_TARGET_SN(preq_elem);
+ orig_sn = PREQ_IE_ORIG_SN(preq_elem);
+ target_flags = PREQ_IE_TARGET_F(preq_elem);
- if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
+ mhwmp_dbg("received PREQ from %pM\n", orig_addr);
+
+ if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
+ mhwmp_dbg("PREQ is for us\n");
forward = false;
reply = true;
metric = 0;
- if (time_after(jiffies, ifmsh->last_dsn_update +
+ if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
- time_before(jiffies, ifmsh->last_dsn_update)) {
- dst_dsn = ++ifmsh->dsn;
- ifmsh->last_dsn_update = jiffies;
+ time_before(jiffies, ifmsh->last_sn_update)) {
+ target_sn = ++ifmsh->sn;
+ ifmsh->last_sn_update = jiffies;
}
} else {
rcu_read_lock();
- mpath = mesh_path_lookup(dst_addr, sdata);
+ mpath = mesh_path_lookup(target_addr, sdata);
if (mpath) {
- if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
- DSN_LT(mpath->dsn, dst_dsn)) {
- mpath->dsn = dst_dsn;
- mpath->flags |= MESH_PATH_DSN_VALID;
- } else if ((!(dst_flags & MP_F_DO)) &&
+ if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
+ SN_LT(mpath->sn, target_sn)) {
+ mpath->sn = target_sn;
+ mpath->flags |= MESH_PATH_SN_VALID;
+ } else if ((!(target_flags & MP_F_DO)) &&
(mpath->flags & MESH_PATH_ACTIVE)) {
reply = true;
metric = mpath->metric;
- dst_dsn = mpath->dsn;
- if (dst_flags & MP_F_RF)
- dst_flags |= MP_F_DO;
+ target_sn = mpath->sn;
+ if (target_flags & MP_F_RF)
+ target_flags |= MP_F_DO;
else
forward = false;
}
@@ -467,13 +522,14 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
if (reply) {
lifetime = PREQ_IE_LIFETIME(preq_elem);
ttl = ifmsh->mshcfg.dot11MeshTTL;
- if (ttl != 0)
- mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr,
- cpu_to_le32(dst_dsn), 0, orig_addr,
- cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
+ if (ttl != 0) {
+ mhwmp_dbg("replying to the PREQ\n");
+ mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
+ cpu_to_le32(target_sn), 0, orig_addr,
+ cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
- else
+ } else
ifmsh->mshstats.dropped_frames_ttl++;
}
@@ -487,13 +543,14 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ifmsh->mshstats.dropped_frames_ttl++;
return;
}
+ mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr);
--ttl;
flags = PREQ_IE_FLAGS(preq_elem);
preq_id = PREQ_IE_PREQ_ID(preq_elem);
hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
- cpu_to_le32(orig_dsn), dst_flags, dst_addr,
- cpu_to_le32(dst_dsn), sdata->dev->broadcast,
+ cpu_to_le32(orig_sn), target_flags, target_addr,
+ cpu_to_le32(target_sn), broadcast_addr,
hopcount, ttl, cpu_to_le32(lifetime),
cpu_to_le32(metric), cpu_to_le32(preq_id),
sdata);
@@ -508,10 +565,12 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
u8 *prep_elem, u32 metric)
{
struct mesh_path *mpath;
- u8 *dst_addr, *orig_addr;
+ u8 *target_addr, *orig_addr;
u8 ttl, hopcount, flags;
u8 next_hop[ETH_ALEN];
- u32 dst_dsn, orig_dsn, lifetime;
+ u32 target_sn, orig_sn, lifetime;
+
+ mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem));
/* Note that we divert from the draft nomenclature and denominate
* destination to what the draft refers to as origininator. So in this
@@ -519,8 +578,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
* which corresponds with the originator of the PREQ which this PREP
* replies
*/
- dst_addr = PREP_IE_DST_ADDR(prep_elem);
- if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
+ target_addr = PREP_IE_TARGET_ADDR(prep_elem);
+ if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
/* destination, no forwarding required */
return;
@@ -531,7 +590,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
}
rcu_read_lock();
- mpath = mesh_path_lookup(dst_addr, sdata);
+ mpath = mesh_path_lookup(target_addr, sdata);
if (mpath)
spin_lock_bh(&mpath->state_lock);
else
@@ -547,13 +606,13 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
lifetime = PREP_IE_LIFETIME(prep_elem);
hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
- dst_dsn = PREP_IE_DST_DSN(prep_elem);
- orig_dsn = PREP_IE_ORIG_DSN(prep_elem);
+ target_sn = PREP_IE_TARGET_SN(prep_elem);
+ orig_sn = PREP_IE_ORIG_SN(prep_elem);
mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
- cpu_to_le32(orig_dsn), 0, dst_addr,
- cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl,
- cpu_to_le32(lifetime), cpu_to_le32(metric),
+ cpu_to_le32(orig_sn), 0, target_addr,
+ cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount,
+ ttl, cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
rcu_read_unlock();
@@ -570,32 +629,96 @@ fail:
static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, u8 *perr_elem)
{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath;
- u8 *ta, *dst_addr;
- u32 dst_dsn;
+ u8 ttl;
+ u8 *ta, *target_addr;
+ u8 target_flags;
+ u32 target_sn;
+ u16 target_rcode;
ta = mgmt->sa;
- dst_addr = PERR_IE_DST_ADDR(perr_elem);
- dst_dsn = PERR_IE_DST_DSN(perr_elem);
+ ttl = PERR_IE_TTL(perr_elem);
+ if (ttl <= 1) {
+ ifmsh->mshstats.dropped_frames_ttl++;
+ return;
+ }
+ ttl--;
+ target_flags = PERR_IE_TARGET_FLAGS(perr_elem);
+ target_addr = PERR_IE_TARGET_ADDR(perr_elem);
+ target_sn = PERR_IE_TARGET_SN(perr_elem);
+ target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
+
rcu_read_lock();
- mpath = mesh_path_lookup(dst_addr, sdata);
+ mpath = mesh_path_lookup(target_addr, sdata);
if (mpath) {
spin_lock_bh(&mpath->state_lock);
if (mpath->flags & MESH_PATH_ACTIVE &&
memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 &&
- (!(mpath->flags & MESH_PATH_DSN_VALID) ||
- DSN_GT(dst_dsn, mpath->dsn))) {
+ (!(mpath->flags & MESH_PATH_SN_VALID) ||
+ SN_GT(target_sn, mpath->sn))) {
mpath->flags &= ~MESH_PATH_ACTIVE;
- mpath->dsn = dst_dsn;
+ mpath->sn = target_sn;
spin_unlock_bh(&mpath->state_lock);
- mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
- sdata->dev->broadcast, sdata);
+ mesh_path_error_tx(ttl, target_addr, cpu_to_le32(target_sn),
+ cpu_to_le16(target_rcode),
+ broadcast_addr, sdata);
} else
spin_unlock_bh(&mpath->state_lock);
}
rcu_read_unlock();
}
+static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt,
+ struct ieee80211_rann_ie *rann)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct mesh_path *mpath;
+ u8 *ta;
+ u8 ttl, flags, hopcount;
+ u8 *orig_addr;
+ u32 orig_sn, metric;
+
+ ta = mgmt->sa;
+ ttl = rann->rann_ttl;
+ if (ttl <= 1) {
+ ifmsh->mshstats.dropped_frames_ttl++;
+ return;
+ }
+ ttl--;
+ flags = rann->rann_flags;
+ orig_addr = rann->rann_addr;
+ orig_sn = rann->rann_seq;
+ hopcount = rann->rann_hopcount;
+ hopcount++;
+ metric = rann->rann_metric;
+ mhwmp_dbg("received RANN from %pM\n", orig_addr);
+
+ rcu_read_lock();
+ mpath = mesh_path_lookup(orig_addr, sdata);
+ if (!mpath) {
+ mesh_path_add(orig_addr, sdata);
+ mpath = mesh_path_lookup(orig_addr, sdata);
+ if (!mpath) {
+ rcu_read_unlock();
+ sdata->u.mesh.mshstats.dropped_frames_no_route++;
+ return;
+ }
+ mesh_queue_preq(mpath,
+ PREQ_Q_F_START | PREQ_Q_F_REFRESH);
+ }
+ if (mpath->sn < orig_sn) {
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
+ cpu_to_le32(orig_sn),
+ 0, NULL, 0, broadcast_addr,
+ hopcount, ttl, 0,
+ cpu_to_le32(metric + mpath->metric),
+ 0, sdata);
+ mpath->sn = orig_sn;
+ }
+ rcu_read_unlock();
+}
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
@@ -614,34 +737,34 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
len - baselen, &elems);
- switch (mgmt->u.action.u.mesh_action.action_code) {
- case MPATH_PREQ:
- if (!elems.preq || elems.preq_len != 37)
+ if (elems.preq) {
+ if (elems.preq_len != 37)
/* Right now we support just 1 destination and no AE */
return;
- last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq);
- if (!last_hop_metric)
- return;
- hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric);
- break;
- case MPATH_PREP:
- if (!elems.prep || elems.prep_len != 31)
+ last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq,
+ MPATH_PREQ);
+ if (last_hop_metric)
+ hwmp_preq_frame_process(sdata, mgmt, elems.preq,
+ last_hop_metric);
+ }
+ if (elems.prep) {
+ if (elems.prep_len != 31)
/* Right now we support no AE */
return;
- last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep);
- if (!last_hop_metric)
- return;
- hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric);
- break;
- case MPATH_PERR:
- if (!elems.perr || elems.perr_len != 12)
+ last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep,
+ MPATH_PREP);
+ if (last_hop_metric)
+ hwmp_prep_frame_process(sdata, mgmt, elems.prep,
+ last_hop_metric);
+ }
+ if (elems.perr) {
+ if (elems.perr_len != 15)
/* Right now we support only one destination per PERR */
return;
hwmp_perr_frame_process(sdata, mgmt, elems.perr);
- default:
- return;
}
-
+ if (elems.rann)
+ hwmp_rann_frame_process(sdata, mgmt, elems.rann);
}
/**
@@ -661,7 +784,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
if (!preq_node) {
- printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
+ mhwmp_dbg("could not allocate PREQ node\n");
return;
}
@@ -670,7 +793,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
spin_unlock(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
if (printk_ratelimit())
- printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
+ mhwmp_dbg("PREQ node queue full\n");
return;
}
@@ -705,7 +828,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_preq_queue *preq_node;
struct mesh_path *mpath;
- u8 ttl, dst_flags;
+ u8 ttl, target_flags;
u32 lifetime;
spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -747,11 +870,11 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
ifmsh->last_preq = jiffies;
- if (time_after(jiffies, ifmsh->last_dsn_update +
+ if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
- time_before(jiffies, ifmsh->last_dsn_update)) {
- ++ifmsh->dsn;
- sdata->u.mesh.last_dsn_update = jiffies;
+ time_before(jiffies, ifmsh->last_sn_update)) {
+ ++ifmsh->sn;
+ sdata->u.mesh.last_sn_update = jiffies;
}
lifetime = default_lifetime(sdata);
ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
@@ -762,14 +885,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
}
if (preq_node->flags & PREQ_Q_F_REFRESH)
- dst_flags = MP_F_DO;
+ target_flags = MP_F_DO;
else
- dst_flags = MP_F_RF;
+ target_flags = MP_F_RF;
spin_unlock_bh(&mpath->state_lock);
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
- cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst,
- cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0,
+ cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
+ cpu_to_le32(mpath->sn), broadcast_addr, 0,
ttl, cpu_to_le32(lifetime), 0,
cpu_to_le32(ifmsh->preq_id++), sdata);
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
@@ -796,15 +919,15 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
struct sk_buff *skb_to_free = NULL;
struct mesh_path *mpath;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u8 *dst_addr = hdr->addr3;
+ u8 *target_addr = hdr->addr3;
int err = 0;
rcu_read_lock();
- mpath = mesh_path_lookup(dst_addr, sdata);
+ mpath = mesh_path_lookup(target_addr, sdata);
if (!mpath) {
- mesh_path_add(dst_addr, sdata);
- mpath = mesh_path_lookup(dst_addr, sdata);
+ mesh_path_add(target_addr, sdata);
+ mpath = mesh_path_lookup(target_addr, sdata);
if (!mpath) {
sdata->u.mesh.mshstats.dropped_frames_no_route++;
err = -ENOSPC;
@@ -813,17 +936,16 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
}
if (mpath->flags & MESH_PATH_ACTIVE) {
- if (time_after(jiffies, mpath->exp_time +
- msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
- && !memcmp(sdata->dev->dev_addr, hdr->addr4,
- ETH_ALEN)
- && !(mpath->flags & MESH_PATH_RESOLVING)
- && !(mpath->flags & MESH_PATH_FIXED)) {
+ if (time_after(jiffies,
+ mpath->exp_time +
+ msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
+ !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) &&
+ !(mpath->flags & MESH_PATH_RESOLVING) &&
+ !(mpath->flags & MESH_PATH_FIXED)) {
mesh_queue_preq(mpath,
PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
- memcpy(hdr->addr1, mpath->next_hop->sta.addr,
- ETH_ALEN);
+ memcpy(hdr->addr1, mpath->next_hop->sta.addr, ETH_ALEN);
} else {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!(mpath->flags & MESH_PATH_RESOLVING)) {
@@ -882,3 +1004,14 @@ void mesh_path_timer(unsigned long data)
endmpathtimer:
rcu_read_unlock();
}
+
+void
+mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+
+ mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr,
+ cpu_to_le32(++ifmsh->sn),
+ 0, NULL, 0, broadcast_addr,
+ 0, MESH_TTL, 0, 0, 0, sdata);
+}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 751c4d0e2b36..a8da23905c70 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 open80211s Ltd.
+ * Copyright (c) 2008, 2009 open80211s Ltd.
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -449,6 +449,7 @@ err_path_alloc:
*/
void mesh_plink_broken(struct sta_info *sta)
{
+ static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
@@ -463,11 +464,12 @@ void mesh_plink_broken(struct sta_info *sta)
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
mpath->flags &= ~MESH_PATH_ACTIVE;
- ++mpath->dsn;
+ ++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
- mesh_path_error_tx(mpath->dst,
- cpu_to_le32(mpath->dsn),
- sdata->dev->broadcast, sdata);
+ mesh_path_error_tx(MESH_TTL, mpath->dst,
+ cpu_to_le32(mpath->sn),
+ cpu_to_le16(PERR_RCODE_DEST_UNREACH),
+ bcast, sdata);
} else
spin_unlock_bh(&mpath->state_lock);
}
@@ -601,7 +603,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mesh_path *mpath;
- u32 dsn = 0;
+ u32 sn = 0;
if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
u8 *ra, *da;
@@ -610,8 +612,9 @@ void mesh_path_discard_frame(struct sk_buff *skb,
ra = hdr->addr1;
mpath = mesh_path_lookup(da, sdata);
if (mpath)
- dsn = ++mpath->dsn;
- mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
+ sn = ++mpath->sn;
+ mesh_path_error_tx(MESH_TTL, skb->data, cpu_to_le32(sn),
+ cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
}
kfree_skb(skb);
@@ -646,7 +649,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
spin_lock_bh(&mpath->state_lock);
mesh_path_assign_nexthop(mpath, next_hop);
- mpath->dsn = 0xffff;
+ mpath->sn = 0xffff;
mpath->metric = 0;
mpath->hop_count = 0;
mpath->exp_time = 0;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index ffcbad75e09b..0f7c6e6a4248 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 open80211s Ltd.
+ * Copyright (c) 2008, 2009 open80211s Ltd.
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -18,9 +18,8 @@
#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
-#define PLINK_GET_FRAME_SUBTYPE(p) (p)
-#define PLINK_GET_LLID(p) (p + 1)
-#define PLINK_GET_PLID(p) (p + 3)
+#define PLINK_GET_LLID(p) (p + 4)
+#define PLINK_GET_PLID(p) (p + 6)
#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
jiffies + HZ * t / 1000))
@@ -65,6 +64,7 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
{
atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
mesh_accept_plinks_update(sdata);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
}
static inline
@@ -72,12 +72,13 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
{
atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
mesh_accept_plinks_update(sdata);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
}
/**
* mesh_plink_fsm_restart - restart a mesh peer link finite state machine
*
- * @sta: mes peer link to restart
+ * @sta: mesh peer link to restart
*
* Locking: this function must be called holding sta->lock
*/
@@ -152,6 +153,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
+ static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
u8 *pos;
int ie_len;
@@ -169,7 +171,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
- mgmt->u.action.category = PLINK_CATEGORY;
+ mgmt->u.action.category = MESH_PLINK_CATEGORY;
mgmt->u.action.u.plink_action.action_code = action;
if (action == PLINK_CLOSE)
@@ -179,7 +181,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
if (action == PLINK_CONFIRM) {
pos = skb_put(skb, 4);
/* two-byte status code followed by two-byte AID */
- memset(pos, 0, 4);
+ memset(pos, 0, 2);
+ memcpy(pos + 2, &plid, 2);
}
mesh_mgmt_ies_add(skb, sdata);
}
@@ -187,18 +190,18 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
/* Add Peer Link Management element */
switch (action) {
case PLINK_OPEN:
- ie_len = 3;
+ ie_len = 6;
break;
case PLINK_CONFIRM:
- ie_len = 5;
+ ie_len = 8;
include_plid = true;
break;
case PLINK_CLOSE:
default:
if (!plid)
- ie_len = 5;
+ ie_len = 8;
else {
- ie_len = 7;
+ ie_len = 10;
include_plid = true;
}
break;
@@ -207,7 +210,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PEER_LINK;
*pos++ = ie_len;
- *pos++ = action;
+ memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto));
+ pos += 4;
memcpy(pos, &llid, 2);
if (include_plid) {
pos += 2;
@@ -218,7 +222,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
memcpy(pos, &reason, 2);
}
- ieee80211_tx_skb(sdata, skb, 1);
+ ieee80211_tx_skb(sdata, skb);
return 0;
}
@@ -395,6 +399,17 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
u8 ie_len;
u8 *baseaddr;
__le16 plid, llid, reason;
+#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
+ static const char *mplstates[] = {
+ [PLINK_LISTEN] = "LISTEN",
+ [PLINK_OPN_SNT] = "OPN-SNT",
+ [PLINK_OPN_RCVD] = "OPN-RCVD",
+ [PLINK_CNF_RCVD] = "CNF_RCVD",
+ [PLINK_ESTAB] = "ESTAB",
+ [PLINK_HOLDING] = "HOLDING",
+ [PLINK_BLOCKED] = "BLOCKED"
+ };
+#endif
/* need action_code, aux */
if (len < IEEE80211_MIN_ACTION_SIZE + 3)
@@ -417,12 +432,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
return;
}
- ftype = *((u8 *)PLINK_GET_FRAME_SUBTYPE(elems.peer_link));
+ ftype = mgmt->u.action.u.plink_action.action_code;
ie_len = elems.peer_link_len;
- if ((ftype == PLINK_OPEN && ie_len != 3) ||
- (ftype == PLINK_CONFIRM && ie_len != 5) ||
- (ftype == PLINK_CLOSE && ie_len != 5 && ie_len != 7)) {
- mpl_dbg("Mesh plink: incorrect plink ie length\n");
+ if ((ftype == PLINK_OPEN && ie_len != 6) ||
+ (ftype == PLINK_CONFIRM && ie_len != 8) ||
+ (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) {
+ mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
+ ftype, ie_len);
return;
}
@@ -434,7 +450,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
* from the point of view of this host.
*/
memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2);
- if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 7))
+ if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10))
memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2);
rcu_read_lock();
@@ -532,8 +548,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
}
}
- mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %d %d %d %d\n",
- mgmt->sa, sta->plink_state,
+ mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
+ mgmt->sa, mplstates[sta->plink_state],
le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
event);
reason = 0;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 71220a5d1406..6dc7b5ad9a41 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -426,7 +426,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
}
- ieee80211_tx_skb(sdata, skb, 0);
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
}
@@ -467,7 +468,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
__cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
else
cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
- ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED);
+ if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -498,7 +501,8 @@ void ieee80211_send_pspoll(struct ieee80211_local *local,
memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN);
- ieee80211_tx_skb(sdata, skb, 0);
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_send_nullfunc(struct ieee80211_local *local,
@@ -531,7 +535,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
- ieee80211_tx_skb(sdata, skb, 0);
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
}
/* spectrum management related things */
@@ -1463,8 +1468,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (status_code != WLAN_STATUS_SUCCESS) {
printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
sdata->dev->name, status_code);
- list_del(&wk->list);
- kfree(wk);
+ wk->state = IEEE80211_MGD_STATE_IDLE;
return RX_MGMT_CFG80211_ASSOC;
}
@@ -1899,7 +1903,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
fc = le16_to_cpu(mgmt->frame_control);
switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PROBE_REQ:
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
case IEEE80211_STYPE_AUTH:
@@ -2505,6 +2508,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_mgd_work *wk;
const u8 *bssid = NULL;
+ bool not_auth_yet = false;
mutex_lock(&ifmgd->mtx);
@@ -2514,6 +2518,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
} else list_for_each_entry(wk, &ifmgd->work_list, list) {
if (&wk->bss->cbss == req->bss) {
bssid = req->bss->bssid;
+ if (wk->state == IEEE80211_MGD_STATE_PROBE)
+ not_auth_yet = true;
list_del(&wk->list);
kfree(wk);
break;
@@ -2521,6 +2527,20 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
}
/*
+ * If somebody requests authentication and we haven't
+ * sent out an auth frame yet there's no need to send
+ * out a deauth frame either. If the state was PROBE,
+ * then this is the case. If it's AUTH we have sent a
+ * frame, and if it's IDLE we have completed the auth
+ * process already.
+ */
+ if (not_auth_yet) {
+ mutex_unlock(&ifmgd->mtx);
+ __cfg80211_auth_canceled(sdata->dev, bssid);
+ return 0;
+ }
+
+ /*
* cfg80211 should catch this ... but it's racy since
* we can receive a deauth frame, process it, hand it
* to cfg80211 while that's in a locked section already
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b33efc4fc267..b9007f80cb92 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -163,8 +163,7 @@ struct rate_control_ref *rate_control_alloc(const char *name,
#ifdef CONFIG_MAC80211_DEBUGFS
debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
local->debugfs.rcdir = debugfsdir;
- local->debugfs.rcname = debugfs_create_file("name", 0400, debugfsdir,
- ref, &rcname_ops);
+ debugfs_create_file("name", 0400, debugfsdir, ref, &rcname_ops);
#endif
ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
@@ -188,9 +187,7 @@ static void rate_control_release(struct kref *kref)
ctrl_ref->ops->free(ctrl_ref->priv);
#ifdef CONFIG_MAC80211_DEBUGFS
- debugfs_remove(ctrl_ref->local->debugfs.rcname);
- ctrl_ref->local->debugfs.rcname = NULL;
- debugfs_remove(ctrl_ref->local->debugfs.rcdir);
+ debugfs_remove_recursive(ctrl_ref->local->debugfs.rcdir);
ctrl_ref->local->debugfs.rcdir = NULL;
#endif
@@ -287,9 +284,16 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
struct rate_control_ref *ref, *old;
ASSERT_RTNL();
+
if (local->open_count)
return -EBUSY;
+ if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
+ if (WARN_ON(!local->ops->set_rts_threshold))
+ return -EINVAL;
+ return 0;
+ }
+
ref = rate_control_alloc(name, local);
if (!ref) {
printk(KERN_WARNING "%s: Failed to select rate control "
@@ -308,7 +312,6 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
"algorithm '%s'\n", wiphy_name(local->hw.wiphy),
ref->ops->name);
-
return 0;
}
@@ -317,6 +320,10 @@ void rate_control_deinitialize(struct ieee80211_local *local)
struct rate_control_ref *ref;
ref = local->rate_ctrl;
+
+ if (!ref)
+ return;
+
local->rate_ctrl = NULL;
rate_control_put(ref);
}
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 2ab5ad9e71ce..cb9bd1f65e27 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -59,6 +59,9 @@ static inline void rate_control_rate_init(struct sta_info *sta)
void *priv_sta = sta->rate_ctrl_priv;
struct ieee80211_supported_band *sband;
+ if (!ref)
+ return;
+
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
@@ -72,7 +75,7 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
- if (ref->ops->rate_update)
+ if (ref && ref->ops->rate_update)
ref->ops->rate_update(ref->priv, sband, ista,
priv_sta, changed);
}
@@ -97,7 +100,7 @@ static inline void rate_control_add_sta_debugfs(struct sta_info *sta)
{
#ifdef CONFIG_MAC80211_DEBUGFS
struct rate_control_ref *ref = sta->rate_ctrl;
- if (sta->debugfs.dir && ref->ops->add_sta_debugfs)
+ if (ref && sta->debugfs.dir && ref->ops->add_sta_debugfs)
ref->ops->add_sta_debugfs(ref->priv, sta->rate_ctrl_priv,
sta->debugfs.dir);
#endif
@@ -107,7 +110,7 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
{
#ifdef CONFIG_MAC80211_DEBUGFS
struct rate_control_ref *ref = sta->rate_ctrl;
- if (ref->ops->remove_sta_debugfs)
+ if (ref && ref->ops->remove_sta_debugfs)
ref->ops->remove_sta_debugfs(ref->priv, sta->rate_ctrl_priv);
#endif
}
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index a59043fbb0ff..45667054a5f3 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/poll.h>
#include <linux/netdevice.h>
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5c385e3c1d1f..beecf50fbd10 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -27,11 +27,10 @@
#include "tkip.h"
#include "wme.h"
-static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
- struct tid_ampdu_rx *tid_agg_rx,
- struct sk_buff *skb,
- u16 mpdu_seq_num,
- int bar_req);
+static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
+ struct tid_ampdu_rx *tid_agg_rx,
+ u16 head_seq_num);
+
/*
* monitor mode reception
*
@@ -39,11 +38,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
* only useful for monitoring.
*/
static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
- struct sk_buff *skb,
- int rtap_len)
+ struct sk_buff *skb)
{
- skb_pull(skb, rtap_len);
-
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
if (likely(skb->len > FCS_LEN))
skb_trim(skb, skb->len - FCS_LEN);
@@ -59,15 +55,14 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
}
static inline int should_drop_frame(struct sk_buff *skb,
- int present_fcs_len,
- int radiotap_len)
+ int present_fcs_len)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
return 1;
- if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
+ if (unlikely(skb->len < 16 + present_fcs_len))
return 1;
if (ieee80211_is_ctl(hdr->frame_control) &&
!ieee80211_is_pspoll(hdr->frame_control) &&
@@ -95,10 +90,6 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
if (len & 1) /* padding for RX_FLAGS if necessary */
len++;
- /* make sure radiotap starts at a naturally aligned address */
- if (len % 8)
- len = roundup(len, 8);
-
return len;
}
@@ -116,6 +107,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_header *rthdr;
unsigned char *pos;
+ u16 rx_flags = 0;
rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
memset(rthdr, 0, rtap_len);
@@ -134,7 +126,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_TSFT */
if (status->flag & RX_FLAG_TSFT) {
- *(__le64 *)pos = cpu_to_le64(status->mactime);
+ put_unaligned_le64(status->mactime, pos);
rthdr->it_present |=
cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
pos += 8;
@@ -166,17 +158,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos++;
/* IEEE80211_RADIOTAP_CHANNEL */
- *(__le16 *)pos = cpu_to_le16(status->freq);
+ put_unaligned_le16(status->freq, pos);
pos += 2;
if (status->band == IEEE80211_BAND_5GHZ)
- *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
- IEEE80211_CHAN_5GHZ);
+ put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
+ pos);
+ else if (status->flag & RX_FLAG_HT)
+ put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
+ pos);
else if (rate->flags & IEEE80211_RATE_ERP_G)
- *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
- IEEE80211_CHAN_2GHZ);
+ put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
+ pos);
else
- *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
- IEEE80211_CHAN_2GHZ);
+ put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
+ pos);
pos += 2;
/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
@@ -205,10 +200,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_RX_FLAGS */
/* ensure 2 byte alignment for the 2 byte field as required */
- if ((pos - (unsigned char *)rthdr) & 1)
+ if ((pos - (u8 *)rthdr) & 1)
pos++;
if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
- *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
+ rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
+ put_unaligned_le16(rx_flags, pos);
pos += 2;
}
@@ -227,7 +223,6 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
struct sk_buff *skb, *skb2;
struct net_device *prev_dev = NULL;
int present_fcs_len = 0;
- int rtap_len = 0;
/*
* First, we may need to make a copy of the skb because
@@ -237,25 +232,23 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
* We don't need to, of course, if we aren't going to return
* the SKB because it has a bad FCS/PLCP checksum.
*/
- if (status->flag & RX_FLAG_RADIOTAP)
- rtap_len = ieee80211_get_radiotap_len(origskb->data);
- else
- /* room for the radiotap header based on driver features */
- needed_headroom = ieee80211_rx_radiotap_len(local, status);
+
+ /* room for the radiotap header based on driver features */
+ needed_headroom = ieee80211_rx_radiotap_len(local, status);
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
present_fcs_len = FCS_LEN;
if (!local->monitors) {
- if (should_drop_frame(origskb, present_fcs_len, rtap_len)) {
+ if (should_drop_frame(origskb, present_fcs_len)) {
dev_kfree_skb(origskb);
return NULL;
}
- return remove_monitor_info(local, origskb, rtap_len);
+ return remove_monitor_info(local, origskb);
}
- if (should_drop_frame(origskb, present_fcs_len, rtap_len)) {
+ if (should_drop_frame(origskb, present_fcs_len)) {
/* only need to expand headroom if necessary */
skb = origskb;
origskb = NULL;
@@ -279,16 +272,14 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
*/
skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
- origskb = remove_monitor_info(local, origskb, rtap_len);
+ origskb = remove_monitor_info(local, origskb);
if (!skb)
return origskb;
}
- /* if necessary, prepend radiotap information */
- if (!(status->flag & RX_FLAG_RADIOTAP))
- ieee80211_add_rx_radiotap_header(local, skb, rate,
- needed_headroom);
+ /* prepend radiotap information */
+ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -489,7 +480,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
- char *dev_addr = rx->dev->dev_addr;
+ char *dev_addr = rx->sdata->dev->dev_addr;
if (ieee80211_is_data(hdr->frame_control)) {
if (is_multicast_ether_addr(hdr->addr1)) {
@@ -518,7 +509,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
if (ieee80211_is_action(hdr->frame_control)) {
mgmt = (struct ieee80211_mgmt *)hdr;
- if (mgmt->u.action.category != PLINK_CATEGORY)
+ if (mgmt->u.action.category != MESH_PLINK_CATEGORY)
return RX_DROP_MONITOR;
return RX_CONTINUE;
}
@@ -603,7 +594,9 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int keyidx;
int hdrlen;
ieee80211_rx_result result = RX_DROP_UNUSABLE;
@@ -657,8 +650,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
} else if (mmie_keyidx >= 0) {
/* Broadcast/multicast robust management frame / BIP */
- if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
- (rx->status->flag & RX_FLAG_IV_STRIPPED))
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
return RX_CONTINUE;
if (mmie_keyidx < NUM_DEFAULT_KEYS ||
@@ -690,8 +683,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
* we somehow allow the driver to tell us which key
* the hardware used if this flag is set?
*/
- if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
- (rx->status->flag & RX_FLAG_IV_STRIPPED))
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
return RX_CONTINUE;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -727,8 +720,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
/* Check for weak IVs if possible */
if (rx->sta && rx->key->conf.alg == ALG_WEP &&
ieee80211_is_data(hdr->frame_control) &&
- (!(rx->status->flag & RX_FLAG_IV_STRIPPED) ||
- !(rx->status->flag & RX_FLAG_DECRYPTED)) &&
+ (!(status->flag & RX_FLAG_IV_STRIPPED) ||
+ !(status->flag & RX_FLAG_DECRYPTED)) &&
ieee80211_wep_is_weak_iv(rx->skb, rx->key))
rx->sta->wep_weak_iv_count++;
@@ -748,7 +741,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
}
/* either the frame has been decrypted or will be dropped */
- rx->status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_DECRYPTED;
return result;
}
@@ -792,7 +785,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
atomic_inc(&sdata->bss->num_sta_ps);
- set_sta_flags(sta, WLAN_STA_PS);
+ set_sta_flags(sta, WLAN_STA_PS_STA);
drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
@@ -800,45 +793,37 @@ static void ap_sta_ps_start(struct sta_info *sta)
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
-static int ap_sta_ps_end(struct sta_info *sta)
+static void ap_sta_ps_end(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct ieee80211_local *local = sdata->local;
- int sent, buffered;
atomic_dec(&sdata->bss->num_sta_ps);
- clear_sta_flags(sta, WLAN_STA_PS);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
-
- if (!skb_queue_empty(&sta->ps_tx_buf))
- sta_info_clear_tim_bit(sta);
+ clear_sta_flags(sta, WLAN_STA_PS_STA);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
sdata->dev->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
- /* Send all buffered frames to the station */
- sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
- buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
- sent += buffered;
- local->total_ps_buffered -= buffered;
-
+ if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
- "since STA not sleeping anymore\n", sdata->dev->name,
- sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
+ printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
+ sdata->dev->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ return;
+ }
- return sent;
+ ieee80211_sta_ps_deliver_wakeup(sta);
}
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
{
struct sta_info *sta = rx->sta;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!sta)
return RX_CONTINUE;
@@ -869,9 +854,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->rx_fragments++;
sta->rx_bytes += rx->skb->len;
- sta->last_signal = rx->status->signal;
- sta->last_qual = rx->status->qual;
- sta->last_noise = rx->status->noise;
+ sta->last_signal = status->signal;
+ sta->last_noise = status->noise;
/*
* Change STA power saving mode only at the end of a frame
@@ -880,7 +864,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (!ieee80211_has_morefrags(hdr->frame_control) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
- if (test_sta_flags(sta, WLAN_STA_PS)) {
+ if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
/*
* Ignore doze->wake transitions that are
* indicated by non-data frames, the standard
@@ -891,19 +875,24 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
*/
if (ieee80211_is_data(hdr->frame_control) &&
!ieee80211_has_pm(hdr->frame_control))
- rx->sent_ps_buffered += ap_sta_ps_end(sta);
+ ap_sta_ps_end(sta);
} else {
if (ieee80211_has_pm(hdr->frame_control))
ap_sta_ps_start(sta);
}
}
- /* Drop data::nullfunc frames silently, since they are used only to
- * control station power saving mode. */
- if (ieee80211_is_nullfunc(hdr->frame_control)) {
+ /*
+ * Drop (qos-)data::nullfunc frames silently, since they
+ * are used only to control station power saving mode.
+ */
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
- /* Update counter and free packet here to avoid counting this
- * as a dropped packed. */
+ /*
+ * Update counter and free packet here to avoid
+ * counting this as a dropped packed.
+ */
sta->rx_packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
@@ -1103,9 +1092,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
- struct sk_buff *skb;
- int no_pending_pkts;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
__le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
@@ -1116,56 +1103,10 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
(sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
return RX_DROP_UNUSABLE;
- skb = skb_dequeue(&rx->sta->tx_filtered);
- if (!skb) {
- skb = skb_dequeue(&rx->sta->ps_tx_buf);
- if (skb)
- rx->local->total_ps_buffered--;
- }
- no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
- skb_queue_empty(&rx->sta->ps_tx_buf);
-
- if (skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *) skb->data;
-
- /*
- * Tell TX path to send this frame even though the STA may
- * still remain is PS mode after this frame exchange.
- */
- info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
-
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
- rx->sta->sta.addr, rx->sta->sta.aid,
- skb_queue_len(&rx->sta->ps_tx_buf));
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
-
- /* Use MoreData flag to indicate whether there are more
- * buffered frames for this STA */
- if (no_pending_pkts)
- hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
- else
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-
- ieee80211_add_pending_skb(rx->local, skb);
-
- if (no_pending_pkts)
- sta_info_clear_tim_bit(rx->sta);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- } else if (!rx->sent_ps_buffered) {
- /*
- * FIXME: This can be the result of a race condition between
- * us expiring a frame and the station polling for it.
- * Should we send it a null-func frame indicating we
- * have nothing buffered for it?
- */
- printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
- "though there are no buffered frames for it\n",
- rx->dev->name, rx->sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
- }
+ if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
+ ieee80211_sta_ps_deliver_poll_response(rx->sta);
+ else
+ set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
/* Free PS Poll skb here instead of returning RX_DROP that would
* count as an dropped frame. */
@@ -1206,11 +1147,14 @@ ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
static int
ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
{
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+
/*
* Pass through unencrypted frames if the hardware has
* decrypted them already.
*/
- if (rx->status->flag & RX_FLAG_DECRYPTED)
+ if (status->flag & RX_FLAG_DECRYPTED)
return 0;
/* Drop unencrypted frames if key is set. */
@@ -1224,8 +1168,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
rx->key))
return -EACCES;
/* BIP does not use Protected field, so need to check MMIE */
- if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb)
- && ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
+ if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
+ ieee80211_get_mmie_keyidx(rx->skb) < 0 &&
rx->key))
return -EACCES;
/*
@@ -1244,8 +1188,18 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
static int
__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
{
- struct net_device *dev = rx->dev;
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct net_device *dev = sdata->dev;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
+ return -1;
+
+ if (is_multicast_ether_addr(hdr->addr1) &&
+ ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
+ (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
+ return -1;
return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
}
@@ -1264,7 +1218,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
* of whether the frame was encrypted or not.
*/
if (ehdr->h_proto == htons(ETH_P_PAE) &&
- (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
+ (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 ||
compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
return true;
@@ -1281,10 +1235,10 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
static void
ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
{
- struct net_device *dev = rx->dev;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct net_device *dev = sdata->dev;
struct ieee80211_local *local = rx->local;
struct sk_buff *skb, *xmit_skb;
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
struct sta_info *dsta;
@@ -1294,7 +1248,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if ((sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
- (rx->flags & IEEE80211_RX_RA_MATCH)) {
+ (rx->flags & IEEE80211_RX_RA_MATCH) &&
+ (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
if (is_multicast_ether_addr(ehdr->h_dest)) {
/*
* send multicast frames both to higher layers in
@@ -1337,10 +1292,10 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
skb = NULL;
} else {
u8 *data = skb->data;
- size_t len = skb->len;
- u8 *new = __skb_push(skb, align);
- memmove(new, data, len);
- __skb_trim(skb, len);
+ size_t len = skb_headlen(skb);
+ skb->data -= align;
+ memmove(skb->data, data, len);
+ skb_set_tail_pointer(skb, len);
}
}
#endif
@@ -1365,7 +1320,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
{
- struct net_device *dev = rx->dev;
+ struct net_device *dev = rx->sdata->dev;
struct ieee80211_local *local = rx->local;
u16 ethertype;
u8 *payload;
@@ -1490,12 +1445,11 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
unsigned int hdrlen;
struct sk_buff *skb = rx->skb, *fwd_skb;
struct ieee80211_local *local = rx->local;
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
- sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
if (!ieee80211_is_data(hdr->frame_control))
return RX_CONTINUE;
@@ -1533,7 +1487,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
/* Frame has reached destination. Don't forward */
if (!is_multicast_ether_addr(hdr->addr1) &&
- compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
+ compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0)
return RX_CONTINUE;
mesh_hdr->ttl--;
@@ -1550,10 +1504,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!fwd_skb && net_ratelimit())
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
- rx->dev->name);
+ sdata->dev->name);
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
- memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
+ memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN);
info = IEEE80211_SKB_CB(fwd_skb);
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
@@ -1587,7 +1541,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
}
if (is_multicast_ether_addr(hdr->addr1) ||
- rx->dev->flags & IFF_PROMISC)
+ sdata->dev->flags & IFF_PROMISC)
return RX_CONTINUE;
else
return RX_DROP_MONITOR;
@@ -1597,7 +1551,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
{
- struct net_device *dev = rx->dev;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct net_device *dev = sdata->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
__le16 fc = hdr->frame_control;
int err;
@@ -1608,6 +1563,14 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
return RX_DROP_MONITOR;
+ /*
+ * Allow the cooked monitor interface of an AP to see 4-addr frames so
+ * that a 4-addr station can be detected and moved into a separate VLAN
+ */
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ sdata->vif.type == NL80211_IFTYPE_AP)
+ return RX_DROP_MONITOR;
+
err = __ieee80211_data_to_8023(rx);
if (unlikely(err))
return RX_DROP_UNUSABLE;
@@ -1641,11 +1604,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
if (ieee80211_is_back_req(bar->frame_control)) {
if (!rx->sta)
- return RX_CONTINUE;
+ return RX_DROP_MONITOR;
tid = le16_to_cpu(bar->control) >> 12;
if (rx->sta->ampdu_mlme.tid_state_rx[tid]
!= HT_AGG_STATE_OPERATIONAL)
- return RX_CONTINUE;
+ return RX_DROP_MONITOR;
tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
@@ -1655,13 +1618,10 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
mod_timer(&tid_agg_rx->session_timer,
TU_TO_EXP_TIME(tid_agg_rx->timeout));
- /* manage reordering buffer according to requested */
- /* sequence number */
- rcu_read_lock();
- ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
- start_seq_num, 1);
- rcu_read_unlock();
- return RX_DROP_UNUSABLE;
+ /* release stored frames up to start of BAR */
+ ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
+ kfree_skb(skb);
+ return RX_QUEUED;
}
return RX_CONTINUE;
@@ -1710,14 +1670,14 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.sa_query.trans_id,
WLAN_SA_QUERY_TR_ID_LEN);
- ieee80211_tx_skb(sdata, skb, 1);
+ ieee80211_tx_skb(sdata, skb);
}
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
{
struct ieee80211_local *local = rx->local;
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
int len = rx->skb->len;
@@ -1829,7 +1789,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
if (!(rx->flags & IEEE80211_RX_RA_MATCH))
@@ -1867,11 +1827,11 @@ static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
* Some hardware seem to generate incorrect Michael MIC
* reports; ignore them to avoid triggering countermeasures.
*/
- goto ignore;
+ return;
}
if (!ieee80211_has_protected(hdr->frame_control))
- goto ignore;
+ return;
if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
/*
@@ -1880,35 +1840,33 @@ static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
* group keys and only the AP is sending real multicast
* frames in the BSS.
*/
- goto ignore;
+ return;
}
if (!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control))
- goto ignore;
+ return;
mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
GFP_ATOMIC);
- ignore:
- dev_kfree_skb(rx->skb);
- rx->skb = NULL;
}
/* TODO: use IEEE80211_RX_FRAGMENTED */
-static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
+static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
+ struct ieee80211_rate *rate)
{
struct ieee80211_sub_if_data *sdata;
struct ieee80211_local *local = rx->local;
struct ieee80211_rtap_hdr {
struct ieee80211_radiotap_header hdr;
u8 flags;
- u8 rate;
+ u8 rate_or_pad;
__le16 chan_freq;
__le16 chan_flags;
} __attribute__ ((packed)) *rthdr;
struct sk_buff *skb = rx->skb, *skb2;
struct net_device *prev_dev = NULL;
- struct ieee80211_rx_status *status = rx->status;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
if (rx->flags & IEEE80211_RX_CMNTR_REPORTED)
goto out_free_skb;
@@ -1922,10 +1880,13 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
rthdr->hdr.it_present =
cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
- (1 << IEEE80211_RADIOTAP_RATE) |
(1 << IEEE80211_RADIOTAP_CHANNEL));
- rthdr->rate = rx->rate->bitrate / 5;
+ if (rate) {
+ rthdr->rate_or_pad = rate->bitrate / 5;
+ rthdr->hdr.it_present |=
+ cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
+ }
rthdr->chan_freq = cpu_to_le16(status->freq);
if (status->band == IEEE80211_BAND_5GHZ)
@@ -1978,13 +1939,13 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_data *rx,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct ieee80211_rate *rate)
{
ieee80211_rx_result res = RX_DROP_MONITOR;
rx->skb = skb;
rx->sdata = sdata;
- rx->dev = sdata->dev;
#define CALL_RXH(rxh) \
do { \
@@ -2023,7 +1984,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
rx->sta->rx_dropped++;
/* fall through */
case RX_CONTINUE:
- ieee80211_rx_cooked_monitor(rx);
+ ieee80211_rx_cooked_monitor(rx, rate);
break;
case RX_DROP_UNUSABLE:
I802_DEBUG_INC(sdata->local->rx_handlers_drop);
@@ -2043,12 +2004,14 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_data *rx,
struct ieee80211_hdr *hdr)
{
- u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, sdata->vif.type);
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
int multicast = is_multicast_ether_addr(hdr->addr1);
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
- if (!bssid)
+ if (!bssid && !sdata->u.mgd.use_4addr)
return 0;
if (!multicast &&
compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) {
@@ -2075,10 +2038,10 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
rx->flags &= ~IEEE80211_RX_RA_MATCH;
} else if (!rx->sta) {
int rate_idx;
- if (rx->status->flag & RX_FLAG_HT)
+ if (status->flag & RX_FLAG_HT)
rate_idx = 0; /* TODO: HT rates */
else
- rate_idx = rx->status->rate_idx;
+ rate_idx = status->rate_idx;
rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
BIT(rate_idx));
}
@@ -2113,8 +2076,6 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
return 0;
break;
case NL80211_IFTYPE_MONITOR:
- /* take everything */
- break;
case NL80211_IFTYPE_UNSPECIFIED:
case __NL80211_IFTYPE_AFTER_LAST:
/* should never get here */
@@ -2147,23 +2108,9 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
rx.skb = skb;
rx.local = local;
- rx.status = status;
- rx.rate = rate;
-
if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
local->dot11ReceivedFragmentCount++;
- rx.sta = sta_info_get(local, hdr->addr2);
- if (rx.sta) {
- rx.sdata = rx.sta->sdata;
- rx.dev = rx.sta->sdata->dev;
- }
-
- if ((status->flag & RX_FLAG_MMIC_ERROR)) {
- ieee80211_rx_michael_mic_report(hdr, &rx);
- return;
- }
-
if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
rx.flags |= IEEE80211_RX_IN_SCAN;
@@ -2171,13 +2118,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_parse_qos(&rx);
ieee80211_verify_alignment(&rx);
- skb = rx.skb;
+ rx.sta = sta_info_get(local, hdr->addr2);
+ if (rx.sta)
+ rx.sdata = rx.sta->sdata;
if (rx.sdata && ieee80211_is_data(hdr->frame_control)) {
rx.flags |= IEEE80211_RX_RA_MATCH;
prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
- if (prepares)
- prev = rx.sdata;
+ if (prepares) {
+ if (status->flag & RX_FLAG_MMIC_ERROR) {
+ if (rx.flags & IEEE80211_RX_RA_MATCH)
+ ieee80211_rx_michael_mic_report(hdr, &rx);
+ } else
+ prev = rx.sdata;
+ }
} else list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!netif_running(sdata->dev))
continue;
@@ -2192,6 +2146,13 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (!prepares)
continue;
+ if (status->flag & RX_FLAG_MMIC_ERROR) {
+ rx.sdata = sdata;
+ if (rx.flags & IEEE80211_RX_RA_MATCH)
+ ieee80211_rx_michael_mic_report(hdr, &rx);
+ continue;
+ }
+
/*
* frame is destined for this interface, but if it's not
* also for the previous one we handle that after the
@@ -2217,11 +2178,11 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
prev->dev->name);
continue;
}
- ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
+ ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
prev = sdata;
}
if (prev)
- ieee80211_invoke_rx_handlers(prev, &rx, skb);
+ ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
else
dev_kfree_skb(skb);
}
@@ -2250,7 +2211,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
int index)
{
struct ieee80211_supported_band *sband;
- struct ieee80211_rate *rate;
+ struct ieee80211_rate *rate = NULL;
struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
struct ieee80211_rx_status *status;
@@ -2261,9 +2222,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
/* release the reordered frames to stack */
sband = hw->wiphy->bands[status->band];
- if (status->flag & RX_FLAG_HT)
- rate = sband->bitrates; /* TODO: HT rates */
- else
+ if (!(status->flag & RX_FLAG_HT))
rate = &sband->bitrates[status->rate_idx];
__ieee80211_rx_handle_packet(hw, skb, rate);
tid_agg_rx->stored_mpdu_num--;
@@ -2273,6 +2232,18 @@ no_frame:
tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
}
+static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
+ struct tid_ampdu_rx *tid_agg_rx,
+ u16 head_seq_num)
+{
+ int index;
+
+ while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
+ index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
+ tid_agg_rx->buf_size;
+ ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
+ }
+}
/*
* Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
@@ -2284,15 +2255,17 @@ no_frame:
#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
/*
- * As it function blongs to Rx path it must be called with
- * the proper rcu_read_lock protection for its flow.
+ * As this function belongs to the RX path it must be under
+ * rcu_read_lock protection. It returns false if the frame
+ * can be processed immediately, true if it was consumed.
*/
-static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
- struct tid_ampdu_rx *tid_agg_rx,
- struct sk_buff *skb,
- u16 mpdu_seq_num,
- int bar_req)
+static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
+ struct tid_ampdu_rx *tid_agg_rx,
+ struct sk_buff *skb)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ u16 sc = le16_to_cpu(hdr->seq_ctrl);
+ u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
u16 head_seq_num, buf_size;
int index;
@@ -2302,47 +2275,37 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
/* frame with out of date sequence number */
if (seq_less(mpdu_seq_num, head_seq_num)) {
dev_kfree_skb(skb);
- return 1;
+ return true;
}
- /* if frame sequence number exceeds our buffering window size or
- * block Ack Request arrived - release stored frames */
- if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
- /* new head to the ordering buffer */
- if (bar_req)
- head_seq_num = mpdu_seq_num;
- else
- head_seq_num =
- seq_inc(seq_sub(mpdu_seq_num, buf_size));
+ /*
+ * If frame the sequence number exceeds our buffering window
+ * size release some previous frames to make room for this one.
+ */
+ if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
+ head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
/* release stored frames up to new head to stack */
- while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
- index = seq_sub(tid_agg_rx->head_seq_num,
- tid_agg_rx->ssn)
- % tid_agg_rx->buf_size;
- ieee80211_release_reorder_frame(hw, tid_agg_rx,
- index);
- }
- if (bar_req)
- return 1;
+ ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
}
- /* now the new frame is always in the range of the reordering */
- /* buffer window */
- index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
- % tid_agg_rx->buf_size;
+ /* Now the new frame is always in the range of the reordering buffer */
+
+ index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
+
/* check if we already stored this frame */
if (tid_agg_rx->reorder_buf[index]) {
dev_kfree_skb(skb);
- return 1;
+ return true;
}
- /* if arrived mpdu is in the right order and nothing else stored */
- /* release it immediately */
+ /*
+ * If the current MPDU is in the right order and nothing else
+ * is stored we can process it directly, no need to buffer it.
+ */
if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
- tid_agg_rx->stored_mpdu_num == 0) {
- tid_agg_rx->head_seq_num =
- seq_inc(tid_agg_rx->head_seq_num);
- return 0;
+ tid_agg_rx->stored_mpdu_num == 0) {
+ tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
+ return false;
}
/* put the frame in the reordering buffer */
@@ -2350,8 +2313,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
tid_agg_rx->reorder_time[index] = jiffies;
tid_agg_rx->stored_mpdu_num++;
/* release the buffer until next missing frame */
- index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
- % tid_agg_rx->buf_size;
+ index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
+ tid_agg_rx->buf_size;
if (!tid_agg_rx->reorder_buf[index] &&
tid_agg_rx->stored_mpdu_num > 1) {
/*
@@ -2362,12 +2325,12 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
int skipped = 1;
for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
j = (j + 1) % tid_agg_rx->buf_size) {
- if (tid_agg_rx->reorder_buf[j] == NULL) {
+ if (!tid_agg_rx->reorder_buf[j]) {
skipped++;
continue;
}
if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
- HZ / 10))
+ HT_RX_REORDER_BUF_TIMEOUT))
break;
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -2383,51 +2346,56 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
* Increment the head seq# also for the skipped slots.
*/
tid_agg_rx->head_seq_num =
- (tid_agg_rx->head_seq_num + skipped) &
- SEQ_MASK;
+ (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
skipped = 0;
}
} else while (tid_agg_rx->reorder_buf[index]) {
ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
- index = seq_sub(tid_agg_rx->head_seq_num,
- tid_agg_rx->ssn) % tid_agg_rx->buf_size;
+ index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
+ tid_agg_rx->buf_size;
}
- return 1;
+
+ return true;
}
-static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
- struct sk_buff *skb)
+/*
+ * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
+ * true if the MPDU was buffered, false if it should be processed.
+ */
+static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
+ struct sk_buff *skb)
{
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct sta_info *sta;
struct tid_ampdu_rx *tid_agg_rx;
u16 sc;
- u16 mpdu_seq_num;
- u8 ret = 0;
int tid;
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return false;
+
+ /*
+ * filter the QoS data rx stream according to
+ * STA/TID and check if this STA/TID is on aggregation
+ */
+
sta = sta_info_get(local, hdr->addr2);
if (!sta)
- return ret;
-
- /* filter the QoS data rx stream according to
- * STA/TID and check if this STA/TID is on aggregation */
- if (!ieee80211_is_data_qos(hdr->frame_control))
- goto end_reorder;
+ return false;
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
- goto end_reorder;
+ return false;
tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
/* qos null data frames are excluded */
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
- goto end_reorder;
+ return false;
- /* new un-ordered ampdu frame - process it */
+ /* new, potentially un-ordered, ampdu frame - process it */
/* reset session timer */
if (tid_agg_rx->timeout)
@@ -2439,16 +2407,11 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
if (sc & IEEE80211_SCTL_FRAG) {
ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
- ret = 1;
- goto end_reorder;
+ dev_kfree_skb(skb);
+ return true;
}
- /* according to mpdu sequence number deal with reordering buffer */
- mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
- ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
- mpdu_seq_num, 0);
- end_reorder:
- return ret;
+ return ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb);
}
/*
@@ -2490,14 +2453,22 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto drop;
if (status->flag & RX_FLAG_HT) {
- /* rate_idx is MCS index */
- if (WARN_ON(status->rate_idx < 0 ||
- status->rate_idx >= 76))
+ /*
+ * rate_idx is MCS index, which can be [0-76] as documented on:
+ *
+ * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
+ *
+ * Anything else would be some sort of driver or hardware error.
+ * The driver should catch hardware errors.
+ */
+ if (WARN((status->rate_idx < 0 ||
+ status->rate_idx > 76),
+ "Rate marked as an HT rate but passed "
+ "status->rate_idx is not "
+ "an MCS index [0-76]: %d (0x%02x)\n",
+ status->rate_idx,
+ status->rate_idx))
goto drop;
- /* HT rates are not in the table - use the highest legacy rate
- * for now since other parts of mac80211 may not yet be fully
- * MCS aware. */
- rate = &sband->bitrates[sband->n_bitrates - 1];
} else {
if (WARN_ON(status->rate_idx < 0 ||
status->rate_idx >= sband->n_bitrates))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 7a350d2690a0..4cf387c944bf 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,8 +12,6 @@
* published by the Free Software Foundation.
*/
-/* TODO: figure out how to avoid that the "current BSS" expires */
-
#include <linux/wireless.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
@@ -189,6 +187,39 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
return RX_QUEUED;
}
+/* return false if no more work */
+static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
+{
+ struct cfg80211_scan_request *req = local->scan_req;
+ enum ieee80211_band band;
+ int i, ielen, n_chans;
+
+ do {
+ if (local->hw_scan_band == IEEE80211_NUM_BANDS)
+ return false;
+
+ band = local->hw_scan_band;
+ n_chans = 0;
+ for (i = 0; i < req->n_channels; i++) {
+ if (req->channels[i]->band == band) {
+ local->hw_scan_req->channels[n_chans] =
+ req->channels[i];
+ n_chans++;
+ }
+ }
+
+ local->hw_scan_band++;
+ } while (!n_chans);
+
+ local->hw_scan_req->n_channels = n_chans;
+
+ ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
+ req->ie, req->ie_len, band);
+ local->hw_scan_req->ie_len = ielen;
+
+ return true;
+}
+
/*
* inform AP that we will go to sleep so that it will buffer the frames
* while we scan
@@ -249,13 +280,6 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
}
}
-static void ieee80211_restore_scan_ies(struct ieee80211_local *local)
-{
- kfree(local->scan_req->ie);
- local->scan_req->ie = local->orig_ies;
- local->scan_req->ie_len = local->orig_ies_len;
-}
-
void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -264,25 +288,36 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
mutex_lock(&local->scan_mtx);
- if (WARN_ON(!local->scanning)) {
+ /*
+ * It's ok to abort a not-yet-running scan (that
+ * we have one at all will be verified by checking
+ * local->scan_req next), but not to complete it
+ * successfully.
+ */
+ if (WARN_ON(!local->scanning && !aborted))
+ aborted = true;
+
+ if (WARN_ON(!local->scan_req)) {
mutex_unlock(&local->scan_mtx);
return;
}
- if (WARN_ON(!local->scan_req)) {
+ was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
+ if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
+ ieee80211_queue_delayed_work(&local->hw,
+ &local->scan_work, 0);
mutex_unlock(&local->scan_mtx);
return;
}
- if (test_bit(SCAN_HW_SCANNING, &local->scanning))
- ieee80211_restore_scan_ies(local);
+ kfree(local->hw_scan_req);
+ local->hw_scan_req = NULL;
if (local->scan_req != local->int_scan_req)
cfg80211_scan_done(local->scan_req, aborted);
local->scan_req = NULL;
local->scan_sdata = NULL;
- was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
local->scanning = 0;
local->scan_channel = NULL;
@@ -394,19 +429,23 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (local->ops->hw_scan) {
u8 *ies;
- int ielen;
- ies = kmalloc(2 + IEEE80211_MAX_SSID_LEN +
- local->scan_ies_len + req->ie_len, GFP_KERNEL);
- if (!ies)
+ local->hw_scan_req = kmalloc(
+ sizeof(*local->hw_scan_req) +
+ req->n_channels * sizeof(req->channels[0]) +
+ 2 + IEEE80211_MAX_SSID_LEN + local->scan_ies_len +
+ req->ie_len, GFP_KERNEL);
+ if (!local->hw_scan_req)
return -ENOMEM;
- ielen = ieee80211_build_preq_ies(local, ies,
- req->ie, req->ie_len);
- local->orig_ies = req->ie;
- local->orig_ies_len = req->ie_len;
- req->ie = ies;
- req->ie_len = ielen;
+ local->hw_scan_req->ssids = req->ssids;
+ local->hw_scan_req->n_ssids = req->n_ssids;
+ ies = (u8 *)local->hw_scan_req +
+ sizeof(*local->hw_scan_req) +
+ req->n_channels * sizeof(req->channels[0]);
+ local->hw_scan_req->ie = ies;
+
+ local->hw_scan_band = 0;
}
local->scan_req = req;
@@ -438,16 +477,17 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(local);
mutex_unlock(&local->scan_mtx);
- if (local->ops->hw_scan)
- rc = drv_hw_scan(local, local->scan_req);
- else
+ if (local->ops->hw_scan) {
+ WARN_ON(!ieee80211_prep_hw_scan(local));
+ rc = drv_hw_scan(local, local->hw_scan_req);
+ } else
rc = ieee80211_start_sw_scan(local);
mutex_lock(&local->scan_mtx);
if (rc) {
- if (local->ops->hw_scan)
- ieee80211_restore_scan_ies(local);
+ kfree(local->hw_scan_req);
+ local->hw_scan_req = NULL;
local->scanning = 0;
ieee80211_recalc_idle(local);
@@ -574,23 +614,14 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
{
int skip;
struct ieee80211_channel *chan;
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
skip = 0;
chan = local->scan_req->channels[local->scan_channel_idx];
- if (chan->flags & IEEE80211_CHAN_DISABLED ||
- (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- chan->flags & IEEE80211_CHAN_NO_IBSS))
+ local->scan_channel = chan;
+ if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
- if (!skip) {
- local->scan_channel = chan;
- if (ieee80211_hw_config(local,
- IEEE80211_CONF_CHANGE_CHANNEL))
- skip = 1;
- }
-
/* advance state machine to next channel/band */
local->scan_channel_idx++;
@@ -656,6 +687,14 @@ void ieee80211_scan_work(struct work_struct *work)
return;
}
+ if (local->hw_scan_req) {
+ int rc = drv_hw_scan(local, local->hw_scan_req);
+ mutex_unlock(&local->scan_mtx);
+ if (rc)
+ ieee80211_scan_completed(&local->hw, true);
+ return;
+ }
+
if (local->scan_req && !local->scanning) {
struct cfg80211_scan_request *req = local->scan_req;
int rc;
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 68953033403d..aa743a895cf9 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -65,7 +65,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
- ieee80211_tx_skb(sdata, skb, 1);
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 594f2318c3d8..71f370dd24bc 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -116,14 +116,15 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
return sta;
}
-struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
- struct net_device *dev)
+struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
+ int idx)
{
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
int i = 0;
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- if (dev && dev != sta->sdata->dev)
+ if (sdata != sta->sdata)
continue;
if (i < idx) {
++i;
@@ -147,8 +148,10 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
static void __sta_info_free(struct ieee80211_local *local,
struct sta_info *sta)
{
- rate_control_free_sta(sta);
- rate_control_put(sta->rate_ctrl);
+ if (sta->rate_ctrl) {
+ rate_control_free_sta(sta);
+ rate_control_put(sta->rate_ctrl);
+ }
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: Destroyed STA %pM\n",
@@ -171,6 +174,8 @@ void sta_info_destroy(struct sta_info *sta)
local = sta->local;
+ cancel_work_sync(&sta->drv_unblock_wk);
+
rate_control_remove_sta_debugfs(sta);
ieee80211_sta_debugfs_remove(sta);
@@ -259,6 +264,38 @@ static void sta_info_hash_add(struct ieee80211_local *local,
rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
}
+static void sta_unblock(struct work_struct *wk)
+{
+ struct sta_info *sta;
+
+ sta = container_of(wk, struct sta_info, drv_unblock_wk);
+
+ if (sta->dead)
+ return;
+
+ if (!test_sta_flags(sta, WLAN_STA_PS_STA))
+ ieee80211_sta_ps_deliver_wakeup(sta);
+ else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL))
+ ieee80211_sta_ps_deliver_poll_response(sta);
+}
+
+static int sta_prepare_rate_control(struct ieee80211_local *local,
+ struct sta_info *sta, gfp_t gfp)
+{
+ if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+ return 0;
+
+ sta->rate_ctrl = rate_control_get(local->rate_ctrl);
+ sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
+ &sta->sta, gfp);
+ if (!sta->rate_ctrl_priv) {
+ rate_control_put(sta->rate_ctrl);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
u8 *addr, gfp_t gfp)
{
@@ -272,16 +309,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
spin_lock_init(&sta->lock);
spin_lock_init(&sta->flaglock);
+ INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
memcpy(sta->sta.addr, addr, ETH_ALEN);
sta->local = local;
sta->sdata = sdata;
- sta->rate_ctrl = rate_control_get(local->rate_ctrl);
- sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
- &sta->sta, gfp);
- if (!sta->rate_ctrl_priv) {
- rate_control_put(sta->rate_ctrl);
+ if (sta_prepare_rate_control(local, sta, gfp)) {
kfree(sta);
return NULL;
}
@@ -478,8 +512,10 @@ static void __sta_info_unlink(struct sta_info **sta)
}
list_del(&(*sta)->list);
+ (*sta)->dead = true;
- if (test_and_clear_sta_flags(*sta, WLAN_STA_PS)) {
+ if (test_and_clear_sta_flags(*sta,
+ WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
BUG_ON(!sdata->bss);
atomic_dec(&sdata->bss->num_sta_ps);
@@ -489,6 +525,9 @@ static void __sta_info_unlink(struct sta_info **sta)
local->num_sta--;
local->sta_generation++;
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+
if (local->ops->sta_notify) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
@@ -801,8 +840,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
sta_info_destroy(sta);
}
-struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
- const u8 *addr)
+struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
+ const u8 *addr)
{
struct sta_info *sta = sta_info_get(hw_to_local(hw), addr);
@@ -810,4 +849,114 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
return NULL;
return &sta->sta;
}
+EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
+
+struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
+ const u8 *addr)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!vif)
+ return NULL;
+
+ sdata = vif_to_sdata(vif);
+
+ return ieee80211_find_sta_by_hw(&sdata->local->hw, addr);
+}
EXPORT_SYMBOL(ieee80211_find_sta);
+
+/* powersave support code */
+void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ int sent, buffered;
+
+ drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
+
+ if (!skb_queue_empty(&sta->ps_tx_buf))
+ sta_info_clear_tim_bit(sta);
+
+ /* Send all buffered frames to the station */
+ sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
+ buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
+ sent += buffered;
+ local->total_ps_buffered -= buffered;
+
+#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
+ printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
+ "since STA not sleeping anymore\n", sdata->dev->name,
+ sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
+#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+}
+
+void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ int no_pending_pkts;
+
+ skb = skb_dequeue(&sta->tx_filtered);
+ if (!skb) {
+ skb = skb_dequeue(&sta->ps_tx_buf);
+ if (skb)
+ local->total_ps_buffered--;
+ }
+ no_pending_pkts = skb_queue_empty(&sta->tx_filtered) &&
+ skb_queue_empty(&sta->ps_tx_buf);
+
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *) skb->data;
+
+ /*
+ * Tell TX path to send this frame even though the STA may
+ * still remain is PS mode after this frame exchange.
+ */
+ info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
+
+#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
+ printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
+ sta->sta.addr, sta->sta.aid,
+ skb_queue_len(&sta->ps_tx_buf));
+#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+
+ /* Use MoreData flag to indicate whether there are more
+ * buffered frames for this STA */
+ if (no_pending_pkts)
+ hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
+ else
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+
+ ieee80211_add_pending_skb(local, skb);
+
+ if (no_pending_pkts)
+ sta_info_clear_tim_bit(sta);
+#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
+ } else {
+ /*
+ * FIXME: This can be the result of a race condition between
+ * us expiring a frame and the station polling for it.
+ * Should we send it a null-func frame indicating we
+ * have nothing buffered for it?
+ */
+ printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
+ "though there are no buffered frames for it\n",
+ sdata->dev->name, sta->sta.addr);
+#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ }
+}
+
+void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta, bool block)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+
+ if (block)
+ set_sta_flags(sta, WLAN_STA_PS_DRIVER);
+ else
+ ieee80211_queue_work(hw, &sta->drv_unblock_wk);
+}
+EXPORT_SYMBOL(ieee80211_sta_block_awake);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ccc3adf962c7..b4810f6aa94f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -12,6 +12,7 @@
#include <linux/list.h>
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/workqueue.h>
#include "key.h"
/**
@@ -21,7 +22,7 @@
*
* @WLAN_STA_AUTH: Station is authenticated.
* @WLAN_STA_ASSOC: Station is associated.
- * @WLAN_STA_PS: Station is in power-save mode
+ * @WLAN_STA_PS_STA: Station is in power-save mode
* @WLAN_STA_AUTHORIZED: Station is authorized to send/receive traffic.
* This bit is always checked so needs to be enabled for all stations
* when virtual port control is not in use.
@@ -36,11 +37,16 @@
* @WLAN_STA_MFP: Management frame protection is used with this STA.
* @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle.
* Used to deny ADDBA requests (both TX and RX).
+ * @WLAN_STA_PS_DRIVER: driver requires keeping this station in
+ * power-save mode logically to flush frames that might still
+ * be in the queues
+ * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
+ * station in power-save mode, reply when the driver unblocks.
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH = 1<<0,
WLAN_STA_ASSOC = 1<<1,
- WLAN_STA_PS = 1<<2,
+ WLAN_STA_PS_STA = 1<<2,
WLAN_STA_AUTHORIZED = 1<<3,
WLAN_STA_SHORT_PREAMBLE = 1<<4,
WLAN_STA_ASSOC_AP = 1<<5,
@@ -48,7 +54,9 @@ enum ieee80211_sta_info_flags {
WLAN_STA_WDS = 1<<7,
WLAN_STA_CLEAR_PS_FILT = 1<<9,
WLAN_STA_MFP = 1<<10,
- WLAN_STA_SUSPEND = 1<<11
+ WLAN_STA_SUSPEND = 1<<11,
+ WLAN_STA_PS_DRIVER = 1<<12,
+ WLAN_STA_PSPOLL = 1<<13,
};
#define STA_TID_NUM 16
@@ -177,6 +185,7 @@ struct sta_ampdu_mlme {
* @lock: used for locking all fields that require locking, see comments
* in the header file.
* @flaglock: spinlock for flags accesses
+ * @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
* @pin_status: used internally for pinning a STA struct into memory
* @flags: STA flags, see &enum ieee80211_sta_info_flags
@@ -193,7 +202,6 @@ struct sta_ampdu_mlme {
* @rx_fragments: number of received MPDUs
* @rx_dropped: number of dropped MPDUs from this STA
* @last_signal: signal of last received frame from this STA
- * @last_qual: qual of last received frame from this STA
* @last_noise: noise of last received frame from this STA
* @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
* @tx_filtered_count: number of frames the hardware filtered for this STA
@@ -217,6 +225,7 @@ struct sta_ampdu_mlme {
* @plink_timer_was_running: used by suspend/resume to restore timers
* @debugfs: debug filesystem info
* @sta: station information we share with the driver
+ * @dead: set to true when sta is unlinked
*/
struct sta_info {
/* General information, mostly static */
@@ -230,8 +239,12 @@ struct sta_info {
spinlock_t lock;
spinlock_t flaglock;
+ struct work_struct drv_unblock_wk;
+
u16 listen_interval;
+ bool dead;
+
/*
* for use by the internal lifetime management,
* see __sta_info_unlink
@@ -259,7 +272,6 @@ struct sta_info {
unsigned long rx_fragments;
unsigned long rx_dropped;
int last_signal;
- int last_qual;
int last_noise;
__le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
@@ -301,28 +313,6 @@ struct sta_info {
#ifdef CONFIG_MAC80211_DEBUGFS
struct sta_info_debugfsdentries {
struct dentry *dir;
- struct dentry *flags;
- struct dentry *num_ps_buf_frames;
- struct dentry *inactive_ms;
- struct dentry *last_seq_ctrl;
- struct dentry *agg_status;
- struct dentry *aid;
- struct dentry *dev;
- struct dentry *rx_packets;
- struct dentry *tx_packets;
- struct dentry *rx_bytes;
- struct dentry *tx_bytes;
- struct dentry *rx_duplicates;
- struct dentry *rx_fragments;
- struct dentry *rx_dropped;
- struct dentry *tx_fragments;
- struct dentry *tx_filtered;
- struct dentry *tx_retry_failed;
- struct dentry *tx_retry_count;
- struct dentry *last_signal;
- struct dentry *last_qual;
- struct dentry *last_noise;
- struct dentry *wep_weak_iv_count;
bool add_has_run;
} debugfs;
#endif
@@ -419,8 +409,8 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr);
/*
* Get STA info by index, BROKEN!
*/
-struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
- struct net_device *dev);
+struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
+ int idx);
/*
* Create a new STA info, caller owns returned structure
* until sta_info_insert().
@@ -454,4 +444,7 @@ int sta_info_flush(struct ieee80211_local *local,
void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
unsigned long exp_time);
+void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
+void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
+
#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
new file mode 100644
index 000000000000..9f91fd8e6efb
--- /dev/null
+++ b/net/mac80211/status.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright 2002-2005, Instant802 Networks, Inc.
+ * Copyright 2005-2006, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "rate.h"
+#include "mesh.h"
+#include "led.h"
+
+
+void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int tmp;
+
+ skb->pkt_type = IEEE80211_TX_STATUS_MSG;
+ skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
+ &local->skb_queue : &local->skb_queue_unreliable, skb);
+ tmp = skb_queue_len(&local->skb_queue) +
+ skb_queue_len(&local->skb_queue_unreliable);
+ while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
+ (skb = skb_dequeue(&local->skb_queue_unreliable))) {
+ dev_kfree_skb_irq(skb);
+ tmp--;
+ I802_DEBUG_INC(local->tx_status_drop);
+ }
+ tasklet_schedule(&local->tasklet);
+}
+EXPORT_SYMBOL(ieee80211_tx_status_irqsafe);
+
+static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
+ struct sta_info *sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ /*
+ * XXX: This is temporary!
+ *
+ * The problem here is that when we get here, the driver will
+ * quite likely have pretty much overwritten info->control by
+ * using info->driver_data or info->rate_driver_data. Thus,
+ * when passing out the frame to the driver again, we would be
+ * passing completely bogus data since the driver would then
+ * expect a properly filled info->control. In mac80211 itself
+ * the same problem occurs, since we need info->control.vif
+ * internally.
+ *
+ * To fix this, we should send the frame through TX processing
+ * again. However, it's not that simple, since the frame will
+ * have been software-encrypted (if applicable) already, and
+ * encrypting it again doesn't do much good. So to properly do
+ * that, we not only have to skip the actual 'raw' encryption
+ * (key selection etc. still has to be done!) but also the
+ * sequence number assignment since that impacts the crypto
+ * encapsulation, of course.
+ *
+ * Hence, for now, fix the bug by just dropping the frame.
+ */
+ goto drop;
+
+ sta->tx_filtered_count++;
+
+ /*
+ * Clear the TX filter mask for this STA when sending the next
+ * packet. If the STA went to power save mode, this will happen
+ * when it wakes up for the next time.
+ */
+ set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
+
+ /*
+ * This code races in the following way:
+ *
+ * (1) STA sends frame indicating it will go to sleep and does so
+ * (2) hardware/firmware adds STA to filter list, passes frame up
+ * (3) hardware/firmware processes TX fifo and suppresses a frame
+ * (4) we get TX status before having processed the frame and
+ * knowing that the STA has gone to sleep.
+ *
+ * This is actually quite unlikely even when both those events are
+ * processed from interrupts coming in quickly after one another or
+ * even at the same time because we queue both TX status events and
+ * RX frames to be processed by a tasklet and process them in the
+ * same order that they were received or TX status last. Hence, there
+ * is no race as long as the frame RX is processed before the next TX
+ * status, which drivers can ensure, see below.
+ *
+ * Note that this can only happen if the hardware or firmware can
+ * actually add STAs to the filter list, if this is done by the
+ * driver in response to set_tim() (which will only reduce the race
+ * this whole filtering tries to solve, not completely solve it)
+ * this situation cannot happen.
+ *
+ * To completely solve this race drivers need to make sure that they
+ * (a) don't mix the irq-safe/not irq-safe TX status/RX processing
+ * functions and
+ * (b) always process RX events before TX status events if ordering
+ * can be unknown, for example with different interrupt status
+ * bits.
+ */
+ if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
+ skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
+ skb_queue_tail(&sta->tx_filtered, skb);
+ return;
+ }
+
+ if (!test_sta_flags(sta, WLAN_STA_PS_STA) &&
+ !(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
+ /* Software retry the packet once */
+ info->flags |= IEEE80211_TX_INTFL_RETRIED;
+ ieee80211_add_pending_skb(local, skb);
+ return;
+ }
+
+ drop:
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: dropped TX filtered frame, "
+ "queue_len=%d PS=%d @%lu\n",
+ wiphy_name(local->hw.wiphy),
+ skb_queue_len(&sta->tx_filtered),
+ !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
+#endif
+ dev_kfree_skb(skb);
+}
+
+void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct sk_buff *skb2;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u16 frag, type;
+ __le16 fc;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_tx_status_rtap_hdr *rthdr;
+ struct ieee80211_sub_if_data *sdata;
+ struct net_device *prev_dev = NULL;
+ struct sta_info *sta;
+ int retry_count = -1, i;
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ /* the HW cannot have attempted that rate */
+ if (i >= hw->max_rates) {
+ info->status.rates[i].idx = -1;
+ info->status.rates[i].count = 0;
+ }
+
+ retry_count += info->status.rates[i].count;
+ }
+ if (retry_count < 0)
+ retry_count = 0;
+
+ rcu_read_lock();
+
+ sband = local->hw.wiphy->bands[info->band];
+
+ sta = sta_info_get(local, hdr->addr1);
+
+ if (sta) {
+ if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
+ test_sta_flags(sta, WLAN_STA_PS_STA)) {
+ /*
+ * The STA is in power save mode, so assume
+ * that this TX packet failed because of that.
+ */
+ ieee80211_handle_filtered_frame(local, sta, skb);
+ rcu_read_unlock();
+ return;
+ }
+
+ fc = hdr->frame_control;
+
+ if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
+ (ieee80211_is_data_qos(fc))) {
+ u16 tid, ssn;
+ u8 *qc;
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
+ & IEEE80211_SCTL_SEQ);
+ ieee80211_send_bar(sta->sdata, hdr->addr1,
+ tid, ssn);
+ }
+
+ if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
+ ieee80211_handle_filtered_frame(local, sta, skb);
+ rcu_read_unlock();
+ return;
+ } else {
+ if (!(info->flags & IEEE80211_TX_STAT_ACK))
+ sta->tx_retry_failed++;
+ sta->tx_retry_count += retry_count;
+ }
+
+ rate_control_tx_status(local, sband, sta, skb);
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+ ieee80211s_update_metric(local, sta, skb);
+ }
+
+ rcu_read_unlock();
+
+ ieee80211_led_tx(local, 0);
+
+ /* SNMP counters
+ * Fragments are passed to low-level drivers as separate skbs, so these
+ * are actually fragments, not frames. Update frame counters only for
+ * the first fragment of the frame. */
+
+ frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
+
+ if (info->flags & IEEE80211_TX_STAT_ACK) {
+ if (frag == 0) {
+ local->dot11TransmittedFrameCount++;
+ if (is_multicast_ether_addr(hdr->addr1))
+ local->dot11MulticastTransmittedFrameCount++;
+ if (retry_count > 0)
+ local->dot11RetryCount++;
+ if (retry_count > 1)
+ local->dot11MultipleRetryCount++;
+ }
+
+ /* This counter shall be incremented for an acknowledged MPDU
+ * with an individual address in the address 1 field or an MPDU
+ * with a multicast address in the address 1 field of type Data
+ * or Management. */
+ if (!is_multicast_ether_addr(hdr->addr1) ||
+ type == IEEE80211_FTYPE_DATA ||
+ type == IEEE80211_FTYPE_MGMT)
+ local->dot11TransmittedFragmentCount++;
+ } else {
+ if (frag == 0)
+ local->dot11FailedCount++;
+ }
+
+ /* this was a transmitted frame, but now we want to reuse it */
+ skb_orphan(skb);
+
+ /*
+ * This is a bit racy but we can avoid a lot of work
+ * with this test...
+ */
+ if (!local->monitors && !local->cooked_mntrs) {
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ /* send frame to monitor interfaces now */
+
+ if (skb_headroom(skb) < sizeof(*rthdr)) {
+ printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ rthdr = (struct ieee80211_tx_status_rtap_hdr *)
+ skb_push(skb, sizeof(*rthdr));
+
+ memset(rthdr, 0, sizeof(*rthdr));
+ rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
+ rthdr->hdr.it_present =
+ cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
+ (1 << IEEE80211_RADIOTAP_RATE));
+
+ if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
+ !is_multicast_ether_addr(hdr->addr1))
+ rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
+
+ /*
+ * XXX: Once radiotap gets the bitmap reset thing the vendor
+ * extensions proposal contains, we can actually report
+ * the whole set of tries we did.
+ */
+ if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
+ (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+ rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
+ if (info->status.rates[0].idx >= 0 &&
+ !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
+ rthdr->rate = sband->bitrates[
+ info->status.rates[0].idx].bitrate / 5;
+
+ /* for now report the total retry_count */
+ rthdr->data_retries = retry_count;
+
+ /* XXX: is this sufficient for BPF? */
+ skb_set_mac_header(skb, 0);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+ memset(skb->cb, 0, sizeof(skb->cb));
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ if (!netif_running(sdata->dev))
+ continue;
+
+ if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
+ (type == IEEE80211_FTYPE_DATA))
+ continue;
+
+ if (prev_dev) {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ skb2->dev = prev_dev;
+ netif_rx(skb2);
+ }
+ }
+
+ prev_dev = sdata->dev;
+ }
+ }
+ if (prev_dev) {
+ skb->dev = prev_dev;
+ netif_rx(skb);
+ skb = NULL;
+ }
+ rcu_read_unlock();
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(ieee80211_tx_status);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 964b7faa7f17..4921d724b6c7 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -301,9 +301,9 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
#endif
if (key->local->ops->update_tkip_key &&
key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
- u8 bcast[ETH_ALEN] =
+ static const u8 bcast[ETH_ALEN] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- u8 *sta_addr = key->sta->sta.addr;
+ const u8 *sta_addr = key->sta->sta.addr;
if (is_multicast_ether_addr(ra))
sta_addr = bcast;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index db4bda681ec9..8834cc93c716 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -317,12 +317,11 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (!atomic_read(&tx->sdata->bss->num_sta_ps))
return TX_CONTINUE;
- /* buffered in hardware */
- if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) {
- info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
+ info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
+ /* device releases frame after DTIM beacon */
+ if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
return TX_CONTINUE;
- }
/* buffered in mac80211 */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -367,15 +366,16 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
u32 staflags;
- if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)
- || ieee80211_is_auth(hdr->frame_control)
- || ieee80211_is_assoc_resp(hdr->frame_control)
- || ieee80211_is_reassoc_resp(hdr->frame_control)))
+ if (unlikely(!sta ||
+ ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_auth(hdr->frame_control) ||
+ ieee80211_is_assoc_resp(hdr->frame_control) ||
+ ieee80211_is_reassoc_resp(hdr->frame_control)))
return TX_CONTINUE;
staflags = get_sta_flags(sta);
- if (unlikely((staflags & WLAN_STA_PS) &&
+ if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
@@ -398,8 +398,13 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
} else
tx->local->total_ps_buffered++;
- /* Queue frame to be sent after STA sends an PS Poll frame */
- if (skb_queue_empty(&sta->ps_tx_buf))
+ /*
+ * Queue frame to be sent after STA wakes up/polls,
+ * but don't set the TIM bit if the driver is blocking
+ * wakeup or poll response transmissions anyway.
+ */
+ if (skb_queue_empty(&sta->ps_tx_buf) &&
+ !(staflags & WLAN_STA_PS_DRIVER))
sta_info_set_tim_bit(sta);
info->control.jiffies = jiffies;
@@ -409,7 +414,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
return TX_QUEUED;
}
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) {
+ else if (unlikely(staflags & WLAN_STA_PS_STA)) {
printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
"set -> send frame\n", tx->dev->name,
sta->sta.addr);
@@ -1047,7 +1052,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
hdr = (struct ieee80211_hdr *) skb->data;
- tx->sta = sta_info_get(local, hdr->addr1);
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ tx->sta = rcu_dereference(sdata->u.vlan.sta);
+ if (!tx->sta)
+ tx->sta = sta_info_get(local, hdr->addr1);
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1201,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
struct sk_buff *skb = tx->skb;
ieee80211_tx_result res = TX_DROP;
-#define CALL_TXH(txh) \
- res = txh(tx); \
- if (res != TX_CONTINUE) \
- goto txh_done;
-
- CALL_TXH(ieee80211_tx_h_check_assoc)
- CALL_TXH(ieee80211_tx_h_ps_buf)
- CALL_TXH(ieee80211_tx_h_select_key)
- CALL_TXH(ieee80211_tx_h_michael_mic_add)
- CALL_TXH(ieee80211_tx_h_rate_ctrl)
- CALL_TXH(ieee80211_tx_h_misc)
- CALL_TXH(ieee80211_tx_h_sequence)
- CALL_TXH(ieee80211_tx_h_fragment)
+#define CALL_TXH(txh) \
+ do { \
+ res = txh(tx); \
+ if (res != TX_CONTINUE) \
+ goto txh_done; \
+ } while (0)
+
+ CALL_TXH(ieee80211_tx_h_check_assoc);
+ CALL_TXH(ieee80211_tx_h_ps_buf);
+ CALL_TXH(ieee80211_tx_h_select_key);
+ CALL_TXH(ieee80211_tx_h_michael_mic_add);
+ if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+ CALL_TXH(ieee80211_tx_h_rate_ctrl);
+ CALL_TXH(ieee80211_tx_h_misc);
+ CALL_TXH(ieee80211_tx_h_sequence);
+ CALL_TXH(ieee80211_tx_h_fragment);
/* handlers after fragment must be aware of tx info fragmentation! */
- CALL_TXH(ieee80211_tx_h_stats)
- CALL_TXH(ieee80211_tx_h_encrypt)
- CALL_TXH(ieee80211_tx_h_calculate_duration)
+ CALL_TXH(ieee80211_tx_h_stats);
+ CALL_TXH(ieee80211_tx_h_encrypt);
+ CALL_TXH(ieee80211_tx_h_calculate_duration);
#undef CALL_TXH
txh_done:
@@ -1387,6 +1398,30 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
return 0;
}
+static bool need_dynamic_ps(struct ieee80211_local *local)
+{
+ /* driver doesn't support power save */
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+ return false;
+
+ /* hardware does dynamic power save */
+ if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
+ return false;
+
+ /* dynamic power save disabled */
+ if (local->hw.conf.dynamic_ps_timeout <= 0)
+ return false;
+
+ /* we are scanning, don't enable power save */
+ if (local->scanning)
+ return false;
+
+ if (!local->ps_sdata)
+ return false;
+
+ return true;
+}
+
static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -1397,11 +1432,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
int headroom;
bool may_encrypt;
- dev_hold(sdata->dev);
-
- if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
- local->hw.conf.dynamic_ps_timeout > 0 &&
- !(local->scanning) && local->ps_sdata) {
+ if (need_dynamic_ps(local)) {
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
ieee80211_stop_queues_by_reason(&local->hw,
IEEE80211_QUEUE_STOP_REASON_PS);
@@ -1413,7 +1444,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
- info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ rcu_read_lock();
if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
int hdrlen;
@@ -1437,7 +1468,6 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
* support we will need a different mechanism.
*/
- rcu_read_lock();
list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
list) {
if (!netif_running(tmp_sdata->dev))
@@ -1445,14 +1475,11 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
continue;
if (compare_ether_addr(tmp_sdata->dev->dev_addr,
- hdr->addr2)) {
- dev_hold(tmp_sdata->dev);
- dev_put(sdata->dev);
+ hdr->addr2) == 0) {
sdata = tmp_sdata;
break;
}
}
- rcu_read_unlock();
}
}
@@ -1466,7 +1493,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) {
dev_kfree_skb(skb);
- dev_put(sdata->dev);
+ rcu_read_unlock();
return;
}
@@ -1477,13 +1504,13 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
!is_multicast_ether_addr(hdr->addr1))
if (mesh_nexthop_lookup(skb, sdata)) {
/* skb queued: don't free */
- dev_put(sdata->dev);
+ rcu_read_unlock();
return;
}
ieee80211_select_queue(local, skb);
ieee80211_tx(sdata, skb, false);
- dev_put(sdata->dev);
+ rcu_read_unlock();
}
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
@@ -1547,6 +1574,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
memset(info, 0, sizeof(*info));
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+
/* pass the radiotap header up to xmit */
ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
return NETDEV_TX_OK;
@@ -1585,7 +1614,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
const u8 *encaps_data;
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
- struct sta_info *sta;
+ struct sta_info *sta = NULL;
u32 sta_flags = 0;
if (unlikely(skb->len < ETH_HLEN)) {
@@ -1602,8 +1631,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
switch (sdata->vif.type) {
- case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
+ rcu_read_lock();
+ sta = rcu_dereference(sdata->u.vlan.sta);
+ if (sta) {
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
+ /* RA TA DA SA */
+ memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr3, skb->data, ETH_ALEN);
+ memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
+ hdrlen = 30;
+ sta_flags = get_sta_flags(sta);
+ }
+ rcu_read_unlock();
+ if (sta)
+ break;
+ /* fall through */
+ case NL80211_IFTYPE_AP:
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1639,21 +1684,25 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
/* packet from other interface */
struct mesh_path *mppath;
int is_mesh_mcast = 1;
- char *mesh_da;
+ const u8 *mesh_da;
rcu_read_lock();
if (is_multicast_ether_addr(skb->data))
/* DA TA mSA AE:SA */
mesh_da = skb->data;
else {
+ static const u8 bcast[ETH_ALEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
mppath = mpp_path_lookup(skb->data, sdata);
if (mppath) {
/* RA TA mDA mSA AE:DA SA */
mesh_da = mppath->mpp;
is_mesh_mcast = 0;
- } else
+ } else {
/* DA TA mSA AE:SA */
- mesh_da = dev->broadcast;
+ mesh_da = bcast;
+ }
}
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
mesh_da, dev->dev_addr);
@@ -1677,12 +1726,21 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
break;
#endif
case NL80211_IFTYPE_STATION:
- fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
- /* BSSID SA DA */
memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
- memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
- memcpy(hdr.addr3, skb->data, ETH_ALEN);
- hdrlen = 24;
+ if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
+ /* RA TA DA SA */
+ memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr3, skb->data, ETH_ALEN);
+ memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
+ hdrlen = 30;
+ } else {
+ fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
+ /* BSSID SA DA */
+ memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
+ memcpy(hdr.addr3, skb->data, ETH_ALEN);
+ hdrlen = 24;
+ }
break;
case NL80211_IFTYPE_ADHOC:
/* DA SA BSSID */
@@ -1907,12 +1965,10 @@ void ieee80211_tx_pending(unsigned long data)
}
sdata = vif_to_sdata(info->control.vif);
- dev_hold(sdata->dev);
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
txok = ieee80211_tx_pending_skb(local, skb);
- dev_put(sdata->dev);
if (!txok)
__skb_queue_head(&local->pending[i], skb);
spin_lock_irqsave(&local->queue_stop_reason_lock,
@@ -1990,8 +2046,9 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
}
}
-struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 *tim_offset, u16 *tim_length)
{
struct ieee80211_local *local = hw_to_local(hw);
struct sk_buff *skb = NULL;
@@ -2008,6 +2065,11 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
sdata = vif_to_sdata(vif);
+ if (tim_offset)
+ *tim_offset = 0;
+ if (tim_length)
+ *tim_length = 0;
+
if (sdata->vif.type == NL80211_IFTYPE_AP) {
ap = &sdata->u.ap;
beacon = rcu_dereference(ap->beacon);
@@ -2043,6 +2105,11 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
spin_unlock_irqrestore(&local->sta_lock, flags);
}
+ if (tim_offset)
+ *tim_offset = beacon->head_len;
+ if (tim_length)
+ *tim_length = skb->len - beacon->head_len;
+
if (beacon->tail)
memcpy(skb_put(skb, beacon->tail_len),
beacon->tail, beacon->tail_len);
@@ -2080,7 +2147,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
memset(mgmt->da, 0xff, ETH_ALEN);
memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
- /* BSSID is left zeroed, wildcard value */
+ memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
mgmt->u.beacon.beacon_int =
cpu_to_le16(sdata->vif.bss_conf.beacon_int);
mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2119,7 +2186,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
rcu_read_unlock();
return skb;
}
-EXPORT_SYMBOL(ieee80211_beacon_get);
+EXPORT_SYMBOL(ieee80211_beacon_get_tim);
void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const void *frame, size_t frame_len,
@@ -2214,17 +2281,12 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_get_buffered_bc);
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- int encrypt)
+void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
skb_set_mac_header(skb, 0);
skb_set_network_header(skb, 0);
skb_set_transport_header(skb, 0);
- if (!encrypt)
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
-
/*
* The other path calling ieee80211_xmit is from the tasklet,
* and while we can handle concurrent transmissions locking
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index aeb65b3d2295..d09f78bb2442 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -520,9 +520,9 @@ EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
*/
static bool ieee80211_can_queue_work(struct ieee80211_local *local)
{
- if (WARN(local->suspended, "queueing ieee80211 work while "
- "going to suspend\n"))
- return false;
+ if (WARN(local->suspended && !local->resuming,
+ "queueing ieee80211 work while going to suspend\n"))
+ return false;
return true;
}
@@ -666,8 +666,8 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
elems->mesh_id_len = elen;
break;
case WLAN_EID_MESH_CONFIG:
- elems->mesh_config = pos;
- elems->mesh_config_len = elen;
+ if (elen >= sizeof(struct ieee80211_meshconf_ie))
+ elems->mesh_config = (void *)pos;
break;
case WLAN_EID_PEER_LINK:
elems->peer_link = pos;
@@ -685,6 +685,10 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
elems->perr = pos;
elems->perr_len = elen;
break;
+ case WLAN_EID_RANN:
+ if (elen >= sizeof(struct ieee80211_rann_ie))
+ elems->rann = (void *)pos;
+ break;
case WLAN_EID_CHANNEL_SWITCH:
elems->ch_switch_elem = pos;
elems->ch_switch_elem_len = elen;
@@ -868,17 +872,19 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
WARN_ON(err);
}
- ieee80211_tx_skb(sdata, skb, 0);
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
}
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
- const u8 *ie, size_t ie_len)
+ const u8 *ie, size_t ie_len,
+ enum ieee80211_band band)
{
struct ieee80211_supported_band *sband;
u8 *pos, *supp_rates_len, *esupp_rates_len = NULL;
int i;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ sband = local->hw.wiphy->bands[band];
pos = buffer;
@@ -966,9 +972,11 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
memcpy(pos, ssid, ssid_len);
pos += ssid_len;
- skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len));
+ skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len,
+ local->hw.conf.channel->band));
- ieee80211_tx_skb(sdata, skb, 0);
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
}
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1025,13 +1033,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct sta_info *sta;
unsigned long flags;
int res;
- bool from_suspend = local->suspended;
- /*
- * We're going to start the hardware, at that point
- * we are no longer suspended and can RX frames.
- */
- local->suspended = false;
+ if (local->suspended)
+ local->resuming = true;
/* restart hardware */
if (local->open_count) {
@@ -1129,11 +1133,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
* If this is for hw restart things are still running.
* We may want to change that later, however.
*/
- if (!from_suspend)
+ if (!local->suspended)
return 0;
#ifdef CONFIG_PM
+ /* first set suspended false, then resuming */
local->suspended = false;
+ mb();
+ local->resuming = false;
list_for_each_entry(sdata, &local->interfaces, list) {
switch(sdata->vif.type) {
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 8a980f136941..247123fe1a7a 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -281,16 +281,18 @@ bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
ieee80211_rx_result
ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_auth(hdr->frame_control))
return RX_CONTINUE;
- if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
return RX_DROP_UNUSABLE;
- } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) {
+ } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
/* remove ICV */
skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 70778694877b..5332014cb229 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -85,16 +85,16 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
u8 *data, *key = NULL, key_offset;
size_t data_len;
unsigned int hdrlen;
- struct ieee80211_hdr *hdr;
u8 mic[MICHAEL_MIC_LEN];
struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int authenticator = 1, wpa_test = 0;
/* No way to verify the MIC if the hardware stripped it */
- if (rx->status->flag & RX_FLAG_MMIC_STRIPPED)
+ if (status->flag & RX_FLAG_MMIC_STRIPPED)
return RX_CONTINUE;
- hdr = (struct ieee80211_hdr *)skb->data;
if (!rx->key || rx->key->conf.alg != ALG_TKIP ||
!ieee80211_has_protected(hdr->frame_control) ||
!ieee80211_is_data_present(hdr->frame_control))
@@ -216,6 +216,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
int hdrlen, res, hwaccel = 0, wpa_test = 0;
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -225,8 +226,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
if (!rx->sta || skb->len - hdrlen < 12)
return RX_DROP_UNUSABLE;
- if (rx->status->flag & RX_FLAG_DECRYPTED) {
- if (rx->status->flag & RX_FLAG_IV_STRIPPED) {
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
/*
* Hardware took care of all processing, including
* replay protection, and stripped the ICV/IV so
@@ -442,6 +443,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
int hdrlen;
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 pn[CCMP_PN_LEN];
int data_len;
@@ -455,8 +457,8 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
- if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
- (rx->status->flag & RX_FLAG_IV_STRIPPED))
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
return RX_CONTINUE;
ccmp_hdr2pn(pn, skb->data + hdrlen);
@@ -466,7 +468,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
}
- if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
/* hardware didn't decrypt/verify MIC */
ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1);
@@ -563,6 +565,7 @@ ieee80211_rx_result
ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie *mmie;
u8 aad[20], mic[8], ipn[6];
@@ -571,8 +574,8 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
if (!ieee80211_is_mgmt(hdr->frame_control))
return RX_CONTINUE;
- if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
- (rx->status->flag & RX_FLAG_IV_STRIPPED))
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
return RX_CONTINUE;
if (skb->len < 24 + sizeof(*mmie))
@@ -591,7 +594,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
}
- if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
+ if (!(status->flag & RX_FLAG_DECRYPTED)) {
/* hardware didn't decrypt/verify MIC */
bip_aad(skb, aad);
ieee80211_aes_cmac(key->u.aes_cmac.tfm,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8e572d7c08c5..0e98c3282d42 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
@@ -1356,6 +1357,11 @@ err_stat:
return ret;
}
+s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ u32 seq);
+EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
+
int nf_conntrack_init(struct net *net)
{
int ret;
@@ -1373,6 +1379,9 @@ int nf_conntrack_init(struct net *net)
/* For use by REJECT target */
rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
+
+ /* Howto get NAT offsets */
+ rcu_assign_pointer(nf_ct_nat_offset, NULL);
}
return 0;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 2032dfe25ca8..fdf5d2a1d9b4 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -202,9 +202,9 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
static inline int expect_matches(const struct nf_conntrack_expect *a,
const struct nf_conntrack_expect *b)
{
- return a->master == b->master && a->class == b->class
- && nf_ct_tuple_equal(&a->tuple, &b->tuple)
- && nf_ct_tuple_mask_equal(&a->mask, &b->mask);
+ return a->master == b->master && a->class == b->class &&
+ nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
+ nf_ct_tuple_mask_equal(&a->mask, &b->mask);
}
/* Generally a bad idea to call this: could have matched already. */
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 5509dd1f14cf..38ea7ef3ccd2 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -243,8 +243,8 @@ static int try_epsv_response(const char *data, size_t dlen,
/* Three delimiters. */
if (dlen <= 3) return 0;
delim = data[0];
- if (isdigit(delim) || delim < 33 || delim > 126
- || data[1] != delim || data[2] != delim)
+ if (isdigit(delim) || delim < 33 || delim > 126 ||
+ data[1] != delim || data[2] != delim)
return 0;
return get_port(data, 3, dlen, delim, &cmd->u.tcp.port);
@@ -366,8 +366,8 @@ static int help(struct sk_buff *skb,
typeof(nf_nat_ftp_hook) nf_nat_ftp;
/* Until there's been traffic both ways, don't look in packets. */
- if (ctinfo != IP_CT_ESTABLISHED
- && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
+ if (ctinfo != IP_CT_ESTABLISHED &&
+ ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 1b816a2ea813..98916ef26f5d 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -384,7 +384,7 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
};
/* this module per-net specifics */
-static int dccp_net_id;
+static int dccp_net_id __read_mostly;
struct dccp_net {
int dccp_loose;
unsigned int dccp_timeout[CT_DCCP_MAX + 1];
@@ -810,12 +810,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
static __net_init int dccp_net_init(struct net *net)
{
- struct dccp_net *dn;
- int err;
-
- dn = kmalloc(sizeof(*dn), GFP_KERNEL);
- if (!dn)
- return -ENOMEM;
+ struct dccp_net *dn = dccp_pernet(net);
/* default values */
dn->dccp_loose = 1;
@@ -827,16 +822,11 @@ static __net_init int dccp_net_init(struct net *net)
dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
- err = net_assign_generic(net, dccp_net_id, dn);
- if (err)
- goto out;
-
#ifdef CONFIG_SYSCTL
- err = -ENOMEM;
dn->sysctl_table = kmemdup(dccp_sysctl_table,
sizeof(dccp_sysctl_table), GFP_KERNEL);
if (!dn->sysctl_table)
- goto out;
+ return -ENOMEM;
dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
@@ -851,15 +841,11 @@ static __net_init int dccp_net_init(struct net *net)
nf_net_netfilter_sysctl_path, dn->sysctl_table);
if (!dn->sysctl_header) {
kfree(dn->sysctl_table);
- goto out;
+ return -ENOMEM;
}
#endif
return 0;
-
-out:
- kfree(dn);
- return err;
}
static __net_exit void dccp_net_exit(struct net *net)
@@ -869,21 +855,20 @@ static __net_exit void dccp_net_exit(struct net *net)
unregister_net_sysctl_table(dn->sysctl_header);
kfree(dn->sysctl_table);
#endif
- kfree(dn);
-
- net_assign_generic(net, dccp_net_id, NULL);
}
static struct pernet_operations dccp_net_ops = {
.init = dccp_net_init,
.exit = dccp_net_exit,
+ .id = &dccp_net_id,
+ .size = sizeof(struct dccp_net),
};
static int __init nf_conntrack_proto_dccp_init(void)
{
int err;
- err = register_pernet_gen_subsys(&dccp_net_id, &dccp_net_ops);
+ err = register_pernet_subsys(&dccp_net_ops);
if (err < 0)
goto err1;
@@ -899,14 +884,14 @@ static int __init nf_conntrack_proto_dccp_init(void)
err3:
nf_conntrack_l4proto_unregister(&dccp_proto4);
err2:
- unregister_pernet_gen_subsys(dccp_net_id, &dccp_net_ops);
+ unregister_pernet_subsys(&dccp_net_ops);
err1:
return err;
}
static void __exit nf_conntrack_proto_dccp_fini(void)
{
- unregister_pernet_gen_subsys(dccp_net_id, &dccp_net_ops);
+ unregister_pernet_subsys(&dccp_net_ops);
nf_conntrack_l4proto_unregister(&dccp_proto6);
nf_conntrack_l4proto_unregister(&dccp_proto4);
}
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index a54a0af0edba..c99cfba64ddc 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -43,7 +43,7 @@
#define GRE_TIMEOUT (30 * HZ)
#define GRE_STREAM_TIMEOUT (180 * HZ)
-static int proto_gre_net_id;
+static int proto_gre_net_id __read_mostly;
struct netns_proto_gre {
rwlock_t keymap_lock;
struct list_head keymap_list;
@@ -300,32 +300,24 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
static int proto_gre_net_init(struct net *net)
{
- struct netns_proto_gre *net_gre;
- int rv;
+ struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
- net_gre = kmalloc(sizeof(struct netns_proto_gre), GFP_KERNEL);
- if (!net_gre)
- return -ENOMEM;
rwlock_init(&net_gre->keymap_lock);
INIT_LIST_HEAD(&net_gre->keymap_list);
- rv = net_assign_generic(net, proto_gre_net_id, net_gre);
- if (rv < 0)
- kfree(net_gre);
- return rv;
+ return 0;
}
static void proto_gre_net_exit(struct net *net)
{
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
-
nf_ct_gre_keymap_flush(net);
- kfree(net_gre);
}
static struct pernet_operations proto_gre_net_ops = {
.init = proto_gre_net_init,
.exit = proto_gre_net_exit,
+ .id = &proto_gre_net_id,
+ .size = sizeof(struct netns_proto_gre),
};
static int __init nf_ct_proto_gre_init(void)
@@ -335,7 +327,7 @@ static int __init nf_ct_proto_gre_init(void)
rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4);
if (rv < 0)
return rv;
- rv = register_pernet_gen_subsys(&proto_gre_net_id, &proto_gre_net_ops);
+ rv = register_pernet_subsys(&proto_gre_net_ops);
if (rv < 0)
nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
return rv;
@@ -344,7 +336,7 @@ static int __init nf_ct_proto_gre_init(void)
static void __exit nf_ct_proto_gre_fini(void)
{
nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
- unregister_pernet_gen_subsys(proto_gre_net_id, &proto_gre_net_ops);
+ unregister_pernet_subsys(&proto_gre_net_ops);
}
module_init(nf_ct_proto_gre_init);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 9cc6b5cb06af..37a8c74be619 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -492,6 +492,21 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
}
+#ifdef CONFIG_NF_NAT_NEEDED
+static inline s16 nat_offset(const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ u32 seq)
+{
+ typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset);
+
+ return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
+}
+#define NAT_OFFSET(pf, ct, dir, seq) \
+ (pf == NFPROTO_IPV4 ? nat_offset(ct, dir, seq) : 0)
+#else
+#define NAT_OFFSET(pf, ct, dir, seq) 0
+#endif
+
static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp *state,
enum ip_conntrack_dir dir,
@@ -506,6 +521,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
+ s16 receiver_offset;
bool res;
/*
@@ -519,11 +535,16 @@ static bool tcp_in_window(const struct nf_conn *ct,
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
tcp_sack(skb, dataoff, tcph, &sack);
+ /* Take into account NAT sequence number mangling */
+ receiver_offset = NAT_OFFSET(pf, ct, !dir, ack - 1);
+ ack -= receiver_offset;
+ sack -= receiver_offset;
+
pr_debug("tcp_in_window: START\n");
pr_debug("tcp_in_window: ");
nf_ct_dump_tuple(tuple);
- pr_debug("seq=%u ack=%u sack=%u win=%u end=%u\n",
- seq, ack, sack, win, end);
+ pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
+ seq, ack, receiver_offset, sack, receiver_offset, win, end);
pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -613,8 +634,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
pr_debug("tcp_in_window: ");
nf_ct_dump_tuple(tuple);
- pr_debug("seq=%u ack=%u sack =%u win=%u end=%u\n",
- seq, ack, sack, win, end);
+ pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
+ seq, ack, receiver_offset, sack, receiver_offset, win, end);
pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
"receiver end=%u maxend=%u maxwin=%u scale=%i\n",
sender->td_end, sender->td_maxend, sender->td_maxwin,
@@ -700,7 +721,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
before(seq, sender->td_maxend + 1) ?
after(end, sender->td_end - receiver->td_maxwin - 1) ?
before(sack, receiver->td_end + 1) ?
- after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG"
+ after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
: "ACK is under the lower bound (possible overly delayed ACK)"
: "ACK is over the upper bound (ACKed data not seen yet)"
: "SEQ is under the lower bound (already ACKed data retransmitted)"
@@ -715,39 +736,6 @@ static bool tcp_in_window(const struct nf_conn *ct,
return res;
}
-#ifdef CONFIG_NF_NAT_NEEDED
-/* Update sender->td_end after NAT successfully mangled the packet */
-/* Caller must linearize skb at tcp header. */
-void nf_conntrack_tcp_update(const struct sk_buff *skb,
- unsigned int dataoff,
- struct nf_conn *ct, int dir,
- s16 offset)
-{
- const struct tcphdr *tcph = (const void *)skb->data + dataoff;
- const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[dir];
- const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[!dir];
- __u32 end;
-
- end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
-
- spin_lock_bh(&ct->lock);
- /*
- * We have to worry for the ack in the reply packet only...
- */
- if (ct->proto.tcp.seen[dir].td_end + offset == end)
- ct->proto.tcp.seen[dir].td_end = end;
- ct->proto.tcp.last_end = end;
- spin_unlock_bh(&ct->lock);
- pr_debug("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
- "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
- sender->td_end, sender->td_maxend, sender->td_maxwin,
- sender->td_scale,
- receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
- receiver->td_scale);
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_tcp_update);
-#endif
-
#define TH_FIN 0x01
#define TH_SYN 0x02
#define TH_RST 0x04
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index c93494fef8ef..d65d3481919c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -128,9 +128,8 @@ EXPORT_SYMBOL(nf_log_packet);
#ifdef CONFIG_PROC_FS
static void *seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
{
- rcu_read_lock();
+ mutex_lock(&nf_log_mutex);
if (*pos >= ARRAY_SIZE(nf_loggers))
return NULL;
@@ -149,9 +148,8 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void seq_stop(struct seq_file *s, void *v)
- __releases(RCU)
{
- rcu_read_unlock();
+ mutex_unlock(&nf_log_mutex);
}
static int seq_show(struct seq_file *s, void *v)
@@ -161,7 +159,7 @@ static int seq_show(struct seq_file *s, void *v)
struct nf_logger *t;
int ret;
- logger = rcu_dereference(nf_loggers[*pos]);
+ logger = nf_loggers[*pos];
if (!logger)
ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -171,22 +169,16 @@ static int seq_show(struct seq_file *s, void *v)
if (ret < 0)
return ret;
- mutex_lock(&nf_log_mutex);
list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
ret = seq_printf(s, "%s", t->name);
- if (ret < 0) {
- mutex_unlock(&nf_log_mutex);
+ if (ret < 0)
return ret;
- }
if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
ret = seq_printf(s, ",");
- if (ret < 0) {
- mutex_unlock(&nf_log_mutex);
+ if (ret < 0)
return ret;
- }
}
}
- mutex_unlock(&nf_log_mutex);
return seq_printf(s, ")\n");
}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3aa66b2f9e87..9de0470d557e 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -677,7 +677,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
struct hlist_head *head = &instance_table[i];
hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
- if ((n->net == &init_net) &&
+ if ((net_eq(n->net, &init_net)) &&
(n->pid == inst->peer_pid))
__instance_destroy(inst);
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 680980954395..38f03f75a636 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -103,7 +103,7 @@ static int count_them(struct xt_connlimit_data *data,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr,
const union nf_inet_addr *mask,
- const struct xt_match *match)
+ u_int8_t family)
{
const struct nf_conntrack_tuple_hash *found;
struct xt_connlimit_conn *conn;
@@ -113,8 +113,7 @@ static int count_them(struct xt_connlimit_data *data,
bool addit = true;
int matches = 0;
-
- if (match->family == NFPROTO_IPV6)
+ if (family == NFPROTO_IPV6)
hash = &data->iphash[connlimit_iphash6(addr, mask)];
else
hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)];
@@ -157,8 +156,7 @@ static int count_them(struct xt_connlimit_data *data,
continue;
}
- if (same_source_net(addr, mask, &conn->tuple.src.u3,
- match->family))
+ if (same_source_net(addr, mask, &conn->tuple.src.u3, family))
/* same source network -> be counted! */
++matches;
nf_ct_put(found_ct);
@@ -207,7 +205,7 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
spin_lock_bh(&info->data->lock);
connections = count_them(info->data, tuple_ptr, &addr,
- &info->mask, par->match);
+ &info->mask, par->family);
spin_unlock_bh(&info->data->lock);
if (connections < 0) {
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 2e8089ecd0af..2773be6a71dd 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -112,7 +112,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
- return -ENOMEM;
+ return false;
/* For SMP, we only want to use one set of state. */
r->master = priv;
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 63e190504656..4d1a41bbd5d7 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -118,7 +118,7 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
{
struct xt_osf_user_finger *f;
struct xt_osf_finger *sf;
- int err = ENOENT;
+ int err = -ENOENT;
if (!osf_attrs[OSF_ATTR_FINGER])
return -EINVAL;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index fb357f010189..98ed22ee2ff4 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -472,13 +472,12 @@ int netlbl_unlhsh_add(struct net *net,
rcu_read_lock();
if (dev_name != NULL) {
- dev = dev_get_by_name(net, dev_name);
+ dev = dev_get_by_name_rcu(net, dev_name);
if (dev == NULL) {
ret_val = -ENODEV;
goto unlhsh_add_return;
}
ifindex = dev->ifindex;
- dev_put(dev);
iface = netlbl_unlhsh_search_iface(ifindex);
} else {
ifindex = 0;
@@ -737,13 +736,12 @@ int netlbl_unlhsh_remove(struct net *net,
rcu_read_lock();
if (dev_name != NULL) {
- dev = dev_get_by_name(net, dev_name);
+ dev = dev_get_by_name_rcu(net, dev_name);
if (dev == NULL) {
ret_val = -ENODEV;
goto unlhsh_remove_return;
}
iface = netlbl_unlhsh_search_iface(dev->ifindex);
- dev_put(dev);
} else
iface = rcu_dereference(netlbl_unlhsh_def);
if (iface == NULL) {
@@ -1552,7 +1550,7 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
struct netlbl_unlhsh_iface *iface;
rcu_read_lock();
- iface = netlbl_unlhsh_search_iface_def(skb->iif);
+ iface = netlbl_unlhsh_search_iface_def(skb->skb_iif);
if (iface == NULL)
goto unlabel_getattr_nolabel;
switch (family) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0cd2d8829313..a4957bf2ca60 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -428,7 +428,8 @@ static int __netlink_create(struct net *net, struct socket *sock,
return 0;
}
-static int netlink_create(struct net *net, struct socket *sock, int protocol)
+static int netlink_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct module *module = NULL;
struct mutex *cb_mutex;
@@ -497,7 +498,7 @@ static int netlink_release(struct socket *sock)
skb_queue_purge(&sk->sk_write_queue);
- if (nlk->pid && !nlk->subscriptions) {
+ if (nlk->pid) {
struct netlink_notify n = {
.net = sock_net(sk),
.protocol = sk->sk_protocol,
@@ -707,7 +708,7 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
+ DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
nladdr->nl_family = AF_NETLINK;
nladdr->nl_pad = 0;
@@ -1091,7 +1092,7 @@ static inline int do_one_set_err(struct sock *sk,
if (sk == p->exclude_sk)
goto out;
- if (sock_net(sk) != sock_net(p->exclude_sk))
+ if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
goto out;
if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 281fa597cae5..71604c6613b5 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -425,12 +425,13 @@ static struct proto nr_proto = {
.obj_size = sizeof(struct nr_sock),
};
-static int nr_create(struct net *net, struct socket *sock, int protocol)
+static int nr_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct nr_sock *nr;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 4eb1ac9a7679..aacba76070fc 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -597,15 +597,15 @@ struct net_device *nr_dev_first(void)
{
struct net_device *dev, *first = NULL;
- read_lock(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
if (first)
dev_hold(first);
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return first;
}
@@ -617,16 +617,17 @@ struct net_device *nr_dev_get(ax25_address *addr)
{
struct net_device *dev;
- read_lock(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
- if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
+ ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
dev_hold(dev);
goto out;
}
}
dev = NULL;
out:
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return dev;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 33e68f20ec61..020562164b56 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -365,7 +365,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
if (skb->pkt_type == PACKET_LOOPBACK)
goto out;
- if (dev_net(dev) != sock_net(sk))
+ if (!net_eq(dev_net(dev), sock_net(sk)))
goto out;
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -437,7 +437,8 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
*/
saddr->spkt_device[13] = 0;
- dev = dev_get_by_name(sock_net(sk), saddr->spkt_device);
+ rcu_read_lock();
+ dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
err = -ENODEV;
if (dev == NULL)
goto out_unlock;
@@ -500,14 +501,13 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
*/
dev_queue_xmit(skb);
- dev_put(dev);
+ rcu_read_unlock();
return len;
out_free:
kfree_skb(skb);
out_unlock:
- if (dev)
- dev_put(dev);
+ rcu_read_unlock();
return err;
}
@@ -553,7 +553,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
sk = pt->af_packet_priv;
po = pkt_sk(sk);
- if (dev_net(dev) != sock_net(sk))
+ if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
skb->dev = dev;
@@ -674,7 +674,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
sk = pt->af_packet_priv;
po = pkt_sk(sk);
- if (dev_net(dev) != sock_net(sk))
+ if (!net_eq(dev_net(dev), sock_net(sk)))
goto drop;
if (dev->header_ops) {
@@ -984,10 +984,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
goto out_put;
size_max = po->tx_ring.frame_size
- - sizeof(struct skb_shared_info)
- - po->tp_hdrlen
- - LL_ALLOCATED_SPACE(dev)
- - sizeof(struct sockaddr_ll);
+ - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
if (size_max > dev->mtu + reserve)
size_max = dev->mtu + reserve;
@@ -1037,9 +1034,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
goto out_xmit;
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
- } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
- && (atomic_read(&po->tx_ring.pending))))
- );
+ } while (likely((ph != NULL) ||
+ ((!(msg->msg_flags & MSG_DONTWAIT)) &&
+ (atomic_read(&po->tx_ring.pending))))
+ );
err = len_sum;
goto out_put;
@@ -1347,7 +1345,8 @@ static struct proto packet_proto = {
* Create a packet of type SOCK_PACKET.
*/
-static int packet_create(struct net *net, struct socket *sock, int protocol)
+static int packet_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct packet_sock *po;
@@ -1521,12 +1520,13 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
- dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex);
- if (dev) {
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+ if (dev)
strlcpy(uaddr->sa_data, dev->name, 15);
- dev_put(dev);
- } else
+ else
memset(uaddr->sa_data, 0, 14);
+ rcu_read_unlock();
*uaddr_len = sizeof(*uaddr);
return 0;
@@ -1538,7 +1538,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
struct net_device *dev;
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
+ DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
if (peer)
return -EOPNOTSUPP;
@@ -1546,16 +1546,17 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
sll->sll_family = AF_PACKET;
sll->sll_ifindex = po->ifindex;
sll->sll_protocol = po->num;
- dev = dev_get_by_index(sock_net(sk), po->ifindex);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
- dev_put(dev);
} else {
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
sll->sll_halen = 0;
}
+ rcu_read_unlock();
*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
return 0;
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 66737aa995ea..526d0273991a 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -35,7 +35,6 @@
/* Transport protocol registration */
static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
-static DEFINE_SPINLOCK(proto_tab_lock);
static struct phonet_protocol *phonet_proto_get(int protocol)
{
@@ -44,11 +43,11 @@ static struct phonet_protocol *phonet_proto_get(int protocol)
if (protocol >= PHONET_NPROTO)
return NULL;
- spin_lock(&proto_tab_lock);
- pp = proto_tab[protocol];
+ rcu_read_lock();
+ pp = rcu_dereference(proto_tab[protocol]);
if (pp && !try_module_get(pp->prot->owner))
pp = NULL;
- spin_unlock(&proto_tab_lock);
+ rcu_read_unlock();
return pp;
}
@@ -60,7 +59,8 @@ static inline void phonet_proto_put(struct phonet_protocol *pp)
/* protocol family functions */
-static int pn_socket_create(struct net *net, struct socket *sock, int protocol)
+static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct pn_sock *pn;
@@ -438,6 +438,8 @@ static struct packet_type phonet_packet_type __read_mostly = {
.func = phonet_rcv,
};
+static DEFINE_MUTEX(proto_tab_lock);
+
int __init_or_module phonet_proto_register(int protocol,
struct phonet_protocol *pp)
{
@@ -450,12 +452,12 @@ int __init_or_module phonet_proto_register(int protocol,
if (err)
return err;
- spin_lock(&proto_tab_lock);
+ mutex_lock(&proto_tab_lock);
if (proto_tab[protocol])
err = -EBUSY;
else
- proto_tab[protocol] = pp;
- spin_unlock(&proto_tab_lock);
+ rcu_assign_pointer(proto_tab[protocol], pp);
+ mutex_unlock(&proto_tab_lock);
return err;
}
@@ -463,10 +465,11 @@ EXPORT_SYMBOL(phonet_proto_register);
void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
{
- spin_lock(&proto_tab_lock);
+ mutex_lock(&proto_tab_lock);
BUG_ON(proto_tab[protocol] != pp);
- proto_tab[protocol] = NULL;
- spin_unlock(&proto_tab_lock);
+ rcu_assign_pointer(proto_tab[protocol], NULL);
+ mutex_unlock(&proto_tab_lock);
+ synchronize_rcu();
proto_unregister(pp->prot);
}
EXPORT_SYMBOL(phonet_proto_unregister);
@@ -480,6 +483,7 @@ static int __init phonet_init(void)
if (err)
return err;
+ pn_sock_init();
err = sock_register(&phonet_proto_family);
if (err) {
printk(KERN_ALERT
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index cbaa1d67d77b..b6356f3832f6 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -714,8 +714,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
return -EINVAL;
lock_sock(sk);
- if (sock_flag(sk, SOCK_URGINLINE)
- && !skb_queue_empty(&pn->ctrlreq_queue))
+ if (sock_flag(sk, SOCK_URGINLINE) &&
+ !skb_queue_empty(&pn->ctrlreq_queue))
answ = skb_peek(&pn->ctrlreq_queue)->len;
else if (!skb_queue_empty(&sk->sk_receive_queue))
answ = skb_peek(&sk->sk_receive_queue)->len;
@@ -843,7 +843,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len)
{
struct pep_sock *pn = pep_sk(sk);
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
long timeo;
int flags = msg->msg_flags;
int err, done;
@@ -851,6 +851,16 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR))
return -EOPNOTSUPP;
+ skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return -ENOBUFS;
+
+ skb_reserve(skb, MAX_PHONET_HEADER + 3);
+ err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ if (err < 0)
+ goto outfree;
+
lock_sock(sk);
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
@@ -894,28 +904,13 @@ disabled:
goto disabled;
}
- if (!skb) {
- skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
- flags & MSG_DONTWAIT, &err);
- if (skb == NULL)
- goto out;
- skb_reserve(skb, MAX_PHONET_HEADER + 3);
-
- if (sk->sk_state != TCP_ESTABLISHED ||
- !atomic_read(&pn->tx_credits))
- goto disabled; /* sock_alloc_send_skb might sleep */
- }
-
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
- if (err < 0)
- goto out;
-
err = pipe_skb_send(sk, skb);
if (err >= 0)
err = len; /* success! */
skb = NULL;
out:
release_sock(sk);
+outfree:
kfree_skb(skb);
return err;
}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 6d64fda1afc9..bc4a33bf2d3d 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -34,7 +34,7 @@
#include <net/phonet/pn_dev.h>
struct phonet_routes {
- spinlock_t lock;
+ struct mutex lock;
struct net_device *table[64];
};
@@ -43,7 +43,7 @@ struct phonet_net {
struct phonet_routes routes;
};
-int phonet_net_id;
+int phonet_net_id __read_mostly;
struct phonet_device_list *phonet_device_list(struct net *net)
{
@@ -61,7 +61,8 @@ static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
pnd->netdev = dev;
bitmap_zero(pnd->addrs, 64);
- list_add(&pnd->list, &pndevs->list);
+ BUG_ON(!mutex_is_locked(&pndevs->lock));
+ list_add_rcu(&pnd->list, &pndevs->list);
return pnd;
}
@@ -70,6 +71,7 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
+ BUG_ON(!mutex_is_locked(&pndevs->lock));
list_for_each_entry(pnd, &pndevs->list, list) {
if (pnd->netdev == dev)
return pnd;
@@ -77,6 +79,18 @@ static struct phonet_device *__phonet_get(struct net_device *dev)
return NULL;
}
+static struct phonet_device *__phonet_get_rcu(struct net_device *dev)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd;
+
+ list_for_each_entry_rcu(pnd, &pndevs->list, list) {
+ if (pnd->netdev == dev)
+ return pnd;
+ }
+ return NULL;
+}
+
static void phonet_device_destroy(struct net_device *dev)
{
struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
@@ -84,11 +98,11 @@ static void phonet_device_destroy(struct net_device *dev)
ASSERT_RTNL();
- spin_lock_bh(&pndevs->lock);
+ mutex_lock(&pndevs->lock);
pnd = __phonet_get(dev);
if (pnd)
- list_del(&pnd->list);
- spin_unlock_bh(&pndevs->lock);
+ list_del_rcu(&pnd->list);
+ mutex_unlock(&pndevs->lock);
if (pnd) {
u8 addr;
@@ -106,8 +120,8 @@ struct net_device *phonet_device_get(struct net *net)
struct phonet_device *pnd;
struct net_device *dev = NULL;
- spin_lock_bh(&pndevs->lock);
- list_for_each_entry(pnd, &pndevs->list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(pnd, &pndevs->list, list) {
dev = pnd->netdev;
BUG_ON(!dev);
@@ -118,7 +132,7 @@ struct net_device *phonet_device_get(struct net *net)
}
if (dev)
dev_hold(dev);
- spin_unlock_bh(&pndevs->lock);
+ rcu_read_unlock();
return dev;
}
@@ -128,7 +142,7 @@ int phonet_address_add(struct net_device *dev, u8 addr)
struct phonet_device *pnd;
int err = 0;
- spin_lock_bh(&pndevs->lock);
+ mutex_lock(&pndevs->lock);
/* Find or create Phonet-specific device data */
pnd = __phonet_get(dev);
if (pnd == NULL)
@@ -137,7 +151,7 @@ int phonet_address_add(struct net_device *dev, u8 addr)
err = -ENOMEM;
else if (test_and_set_bit(addr >> 2, pnd->addrs))
err = -EEXIST;
- spin_unlock_bh(&pndevs->lock);
+ mutex_unlock(&pndevs->lock);
return err;
}
@@ -147,27 +161,32 @@ int phonet_address_del(struct net_device *dev, u8 addr)
struct phonet_device *pnd;
int err = 0;
- spin_lock_bh(&pndevs->lock);
+ mutex_lock(&pndevs->lock);
pnd = __phonet_get(dev);
- if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
+ if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) {
err = -EADDRNOTAVAIL;
- else if (bitmap_empty(pnd->addrs, 64)) {
- list_del(&pnd->list);
+ pnd = NULL;
+ } else if (bitmap_empty(pnd->addrs, 64))
+ list_del_rcu(&pnd->list);
+ else
+ pnd = NULL;
+ mutex_unlock(&pndevs->lock);
+
+ if (pnd) {
+ synchronize_rcu();
kfree(pnd);
}
- spin_unlock_bh(&pndevs->lock);
return err;
}
/* Gets a source address toward a destination, through a interface. */
u8 phonet_address_get(struct net_device *dev, u8 daddr)
{
- struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
struct phonet_device *pnd;
u8 saddr;
- spin_lock_bh(&pndevs->lock);
- pnd = __phonet_get(dev);
+ rcu_read_lock();
+ pnd = __phonet_get_rcu(dev);
if (pnd) {
BUG_ON(bitmap_empty(pnd->addrs, 64));
@@ -178,7 +197,7 @@ u8 phonet_address_get(struct net_device *dev, u8 daddr)
saddr = find_first_bit(pnd->addrs, 64) << 2;
} else
saddr = PN_NO_ADDR;
- spin_unlock_bh(&pndevs->lock);
+ rcu_read_unlock();
if (saddr == PN_NO_ADDR) {
/* Fallback to another device */
@@ -200,8 +219,8 @@ int phonet_address_lookup(struct net *net, u8 addr)
struct phonet_device *pnd;
int err = -EADDRNOTAVAIL;
- spin_lock_bh(&pndevs->lock);
- list_for_each_entry(pnd, &pndevs->list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(pnd, &pndevs->list, list) {
/* Don't allow unregistering devices! */
if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
((pnd->netdev->flags & IFF_UP)) != IFF_UP)
@@ -213,7 +232,7 @@ int phonet_address_lookup(struct net *net, u8 addr)
}
}
found:
- spin_unlock_bh(&pndevs->lock);
+ rcu_read_unlock();
return err;
}
@@ -248,17 +267,22 @@ static void phonet_route_autodel(struct net_device *dev)
/* Remove left-over Phonet routes */
bitmap_zero(deleted, 64);
- spin_lock_bh(&pnn->routes.lock);
+ mutex_lock(&pnn->routes.lock);
for (i = 0; i < 64; i++)
if (dev == pnn->routes.table[i]) {
+ rcu_assign_pointer(pnn->routes.table[i], NULL);
set_bit(i, deleted);
- pnn->routes.table[i] = NULL;
- dev_put(dev);
}
- spin_unlock_bh(&pnn->routes.lock);
+ mutex_unlock(&pnn->routes.lock);
+
+ if (bitmap_empty(deleted, 64))
+ return; /* short-circuit RCU */
+ synchronize_rcu();
for (i = find_first_bit(deleted, 64); i < 64;
- i = find_next_bit(deleted, 64, i + 1))
+ i = find_next_bit(deleted, 64, i + 1)) {
rtm_phonet_notify(RTM_DELROUTE, dev, i);
+ dev_put(dev);
+ }
}
/* notify Phonet of device events */
@@ -289,19 +313,14 @@ static struct notifier_block phonet_device_notifier = {
/* Per-namespace Phonet devices handling */
static int phonet_init_net(struct net *net)
{
- struct phonet_net *pnn = kzalloc(sizeof(*pnn), GFP_KERNEL);
- if (!pnn)
- return -ENOMEM;
+ struct phonet_net *pnn = net_generic(net, phonet_net_id);
- if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) {
- kfree(pnn);
+ if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops))
return -ENOMEM;
- }
INIT_LIST_HEAD(&pnn->pndevs.list);
- spin_lock_init(&pnn->pndevs.lock);
- spin_lock_init(&pnn->routes.lock);
- net_assign_generic(net, phonet_net_id, pnn);
+ mutex_init(&pnn->pndevs.lock);
+ mutex_init(&pnn->routes.lock);
return 0;
}
@@ -325,18 +344,19 @@ static void phonet_exit_net(struct net *net)
rtnl_unlock();
proc_net_remove(net, "phonet");
- kfree(pnn);
}
static struct pernet_operations phonet_net_ops = {
.init = phonet_init_net,
.exit = phonet_exit_net,
+ .id = &phonet_net_id,
+ .size = sizeof(struct phonet_net),
};
/* Initialize Phonet devices list */
int __init phonet_device_init(void)
{
- int err = register_pernet_gen_device(&phonet_net_id, &phonet_net_ops);
+ int err = register_pernet_device(&phonet_net_ops);
if (err)
return err;
@@ -351,7 +371,7 @@ void phonet_device_exit(void)
{
rtnl_unregister_all(PF_PHONET);
unregister_netdevice_notifier(&phonet_device_notifier);
- unregister_pernet_gen_device(phonet_net_id, &phonet_net_ops);
+ unregister_pernet_device(&phonet_net_ops);
}
int phonet_route_add(struct net_device *dev, u8 daddr)
@@ -361,13 +381,13 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
int err = -EEXIST;
daddr = daddr >> 2;
- spin_lock_bh(&routes->lock);
+ mutex_lock(&routes->lock);
if (routes->table[daddr] == NULL) {
- routes->table[daddr] = dev;
+ rcu_assign_pointer(routes->table[daddr], dev);
dev_hold(dev);
err = 0;
}
- spin_unlock_bh(&routes->lock);
+ mutex_unlock(&routes->lock);
return err;
}
@@ -375,17 +395,20 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
{
struct phonet_net *pnn = net_generic(dev_net(dev), phonet_net_id);
struct phonet_routes *routes = &pnn->routes;
- int err = -ENOENT;
daddr = daddr >> 2;
- spin_lock_bh(&routes->lock);
- if (dev == routes->table[daddr]) {
- routes->table[daddr] = NULL;
- dev_put(dev);
- err = 0;
- }
- spin_unlock_bh(&routes->lock);
- return err;
+ mutex_lock(&routes->lock);
+ if (dev == routes->table[daddr])
+ rcu_assign_pointer(routes->table[daddr], NULL);
+ else
+ dev = NULL;
+ mutex_unlock(&routes->lock);
+
+ if (!dev)
+ return -ENOENT;
+ synchronize_rcu();
+ dev_put(dev);
+ return 0;
}
struct net_device *phonet_route_get(struct net *net, u8 daddr)
@@ -397,9 +420,9 @@ struct net_device *phonet_route_get(struct net *net, u8 daddr)
ASSERT_RTNL(); /* no need to hold the device */
daddr >>= 2;
- spin_lock_bh(&routes->lock);
- dev = routes->table[daddr];
- spin_unlock_bh(&routes->lock);
+ rcu_read_lock();
+ dev = rcu_dereference(routes->table[daddr]);
+ rcu_read_unlock();
return dev;
}
@@ -409,11 +432,12 @@ struct net_device *phonet_route_output(struct net *net, u8 daddr)
struct phonet_routes *routes = &pnn->routes;
struct net_device *dev;
- spin_lock_bh(&routes->lock);
- dev = routes->table[daddr >> 2];
+ daddr >>= 2;
+ rcu_read_lock();
+ dev = rcu_dereference(routes->table[daddr]);
if (dev)
dev_hold(dev);
- spin_unlock_bh(&routes->lock);
+ rcu_read_unlock();
if (!dev)
dev = phonet_device_get(net); /* Default route */
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d8f5d3fb9ee2..2e6c7eb8e76a 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -53,8 +53,7 @@ void phonet_address_notify(int event, struct net_device *dev, u8 addr)
RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
+ rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
}
static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = {
@@ -132,8 +131,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
int addr_idx = 0, addr_start_idx = cb->args[1];
pndevs = phonet_device_list(sock_net(skb->sk));
- spin_lock_bh(&pndevs->lock);
- list_for_each_entry(pnd, &pndevs->list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(pnd, &pndevs->list, list) {
u8 addr;
if (dev_idx > dev_start_idx)
@@ -155,7 +154,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
out:
- spin_unlock_bh(&pndevs->lock);
+ rcu_read_unlock();
cb->args[0] = dev_idx;
cb->args[1] = addr_idx;
@@ -212,8 +211,7 @@ void rtm_phonet_notify(int event, struct net_device *dev, u8 dst)
RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL);
return;
errout:
- if (err < 0)
- rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err);
+ rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err);
}
static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = {
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 0412beb59a05..69c8b826a0ce 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -45,13 +45,28 @@ static int pn_socket_release(struct socket *sock)
return 0;
}
+#define PN_HASHSIZE 16
+#define PN_HASHMASK (PN_HASHSIZE-1)
+
+
static struct {
- struct hlist_head hlist;
+ struct hlist_head hlist[PN_HASHSIZE];
spinlock_t lock;
-} pnsocks = {
- .hlist = HLIST_HEAD_INIT,
- .lock = __SPIN_LOCK_UNLOCKED(pnsocks.lock),
-};
+} pnsocks;
+
+void __init pn_sock_init(void)
+{
+ unsigned i;
+
+ for (i = 0; i < PN_HASHSIZE; i++)
+ INIT_HLIST_HEAD(pnsocks.hlist + i);
+ spin_lock_init(&pnsocks.lock);
+}
+
+static struct hlist_head *pn_hash_list(u16 obj)
+{
+ return pnsocks.hlist + (obj & PN_HASHMASK);
+}
/*
* Find address based on socket address, match only certain fields.
@@ -64,10 +79,11 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
struct sock *rval = NULL;
u16 obj = pn_sockaddr_get_object(spn);
u8 res = spn->spn_resource;
+ struct hlist_head *hlist = pn_hash_list(obj);
spin_lock_bh(&pnsocks.lock);
- sk_for_each(sknode, node, &pnsocks.hlist) {
+ sk_for_each(sknode, node, hlist) {
struct pn_sock *pn = pn_sk(sknode);
BUG_ON(!pn->sobject); /* unbound socket */
@@ -82,8 +98,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
if (pn->resource != res)
continue;
}
- if (pn_addr(pn->sobject)
- && pn_addr(pn->sobject) != pn_addr(obj))
+ if (pn_addr(pn->sobject) &&
+ pn_addr(pn->sobject) != pn_addr(obj))
continue;
rval = sknode;
@@ -99,31 +115,39 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
/* Deliver a broadcast packet (only in bottom-half) */
void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
{
- struct hlist_node *node;
- struct sock *sknode;
+ struct hlist_head *hlist = pnsocks.hlist;
+ unsigned h;
spin_lock(&pnsocks.lock);
- sk_for_each(sknode, node, &pnsocks.hlist) {
- struct sk_buff *clone;
+ for (h = 0; h < PN_HASHSIZE; h++) {
+ struct hlist_node *node;
+ struct sock *sknode;
- if (!net_eq(sock_net(sknode), net))
- continue;
- if (!sock_flag(sknode, SOCK_BROADCAST))
- continue;
+ sk_for_each(sknode, node, hlist) {
+ struct sk_buff *clone;
- clone = skb_clone(skb, GFP_ATOMIC);
- if (clone) {
- sock_hold(sknode);
- sk_receive_skb(sknode, clone, 0);
+ if (!net_eq(sock_net(sknode), net))
+ continue;
+ if (!sock_flag(sknode, SOCK_BROADCAST))
+ continue;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (clone) {
+ sock_hold(sknode);
+ sk_receive_skb(sknode, clone, 0);
+ }
}
+ hlist++;
}
spin_unlock(&pnsocks.lock);
}
void pn_sock_hash(struct sock *sk)
{
+ struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
+
spin_lock_bh(&pnsocks.lock);
- sk_add_node(sk, &pnsocks.hlist);
+ sk_add_node(sk, hlist);
spin_unlock_bh(&pnsocks.lock);
}
EXPORT_SYMBOL(pn_sock_hash);
@@ -439,15 +463,20 @@ EXPORT_SYMBOL(pn_sock_get_port);
static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
{
struct net *net = seq_file_net(seq);
+ struct hlist_head *hlist = pnsocks.hlist;
struct hlist_node *node;
struct sock *sknode;
+ unsigned h;
- sk_for_each(sknode, node, &pnsocks.hlist) {
- if (!net_eq(net, sock_net(sknode)))
- continue;
- if (!pos)
- return sknode;
- pos--;
+ for (h = 0; h < PN_HASHSIZE; h++) {
+ sk_for_each(sknode, node, hlist) {
+ if (!net_eq(net, sock_net(sknode)))
+ continue;
+ if (!pos)
+ return sknode;
+ pos--;
+ }
+ hlist++;
}
return NULL;
}
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index a202e5b36079..853c52be781f 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -174,8 +174,8 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
mask |= (POLLIN | POLLRDNORM);
spin_unlock(&rs->rs_lock);
}
- if (!list_empty(&rs->rs_recv_queue)
- || !list_empty(&rs->rs_notify_queue))
+ if (!list_empty(&rs->rs_recv_queue) ||
+ !list_empty(&rs->rs_notify_queue))
mask |= (POLLIN | POLLRDNORM);
if (rs->rs_snd_bytes < rds_sk_sndbuf(rs))
mask |= (POLLOUT | POLLWRNORM);
@@ -265,6 +265,9 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
case RDS_GET_MR:
ret = rds_get_mr(rs, optval, optlen);
break;
+ case RDS_GET_MR_FOR_DEST:
+ ret = rds_get_mr_for_dest(rs, optval, optlen);
+ break;
case RDS_FREE_MR:
ret = rds_free_mr(rs, optval, optlen);
break;
@@ -305,8 +308,8 @@ static int rds_getsockopt(struct socket *sock, int level, int optname,
if (len < sizeof(int))
ret = -EINVAL;
else
- if (put_user(rs->rs_recverr, (int __user *) optval)
- || put_user(sizeof(int), optlen))
+ if (put_user(rs->rs_recverr, (int __user *) optval) ||
+ put_user(sizeof(int), optlen))
ret = -EFAULT;
else
ret = 0;
@@ -407,7 +410,8 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
return 0;
}
-static int rds_create(struct net *net, struct socket *sock, int protocol)
+static int rds_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index dd2711df640b..6d06cac2649c 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -218,6 +218,8 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
spin_lock_irqsave(&rds_cong_lock, flags);
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
+ if (conn->c_loopback)
+ continue;
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index cc8b568c0c84..278f607ab603 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -133,10 +133,8 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
spin_lock_irqsave(&rds_conn_lock, flags);
conn = rds_conn_lookup(head, laddr, faddr, trans);
- if (conn
- && conn->c_loopback
- && conn->c_trans != &rds_loop_transport
- && !is_outgoing) {
+ if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
+ !is_outgoing) {
/* This is a looped back IB connection, and we're
* called by the code handling the incoming connect.
* We need a second connection object into which we
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 1378b854cac0..64df4e79b29f 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -98,6 +98,7 @@ struct rds_ib_connection {
struct rds_ib_send_work *i_sends;
/* rx */
+ struct tasklet_struct i_recv_tasklet;
struct mutex i_recv_mutex;
struct rds_ib_work_ring i_recv_ring;
struct rds_ib_incoming *i_ibinc;
@@ -303,6 +304,7 @@ void rds_ib_inc_free(struct rds_incoming *inc);
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
size_t size);
void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_ib_recv_tasklet_fn(unsigned long data);
void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index c2d372f13dbb..647cb8ffc39b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -377,8 +377,8 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
}
/* Even if len is crap *now* I still want to check it. -ASG */
- if (event->param.conn.private_data_len < sizeof (*dp)
- || dp->dp_protocol_major == 0)
+ if (event->param.conn.private_data_len < sizeof (*dp) ||
+ dp->dp_protocol_major == 0)
return RDS_PROTOCOL_3_0;
common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
@@ -694,6 +694,8 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
return -ENOMEM;
INIT_LIST_HEAD(&ic->ib_node);
+ tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
+ (unsigned long) ic);
mutex_init(&ic->i_recv_mutex);
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&ic->i_ack_lock);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index ef3ab5b7283e..4b0da865a72c 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -187,11 +187,8 @@ void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
INIT_LIST_HEAD(list);
spin_unlock_irq(list_lock);
- list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
- if (ic->conn->c_passive)
- rds_conn_destroy(ic->conn->c_passive);
+ list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
rds_conn_destroy(ic->conn);
- }
}
struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
@@ -573,8 +570,8 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
spin_unlock_irqrestore(&pool->list_lock, flags);
/* If we've pinned too many pages, request a flush */
- if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
- || atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
+ atomic_read(&pool->dirty_count) >= pool->max_items / 10)
queue_work(rds_wq, &pool->flush_worker);
if (invalidate) {
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index cd7a6cfcab03..04dc0d3f3c95 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -143,15 +143,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
int ret = -ENOMEM;
if (recv->r_ibinc == NULL) {
- if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) {
+ if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
rds_ib_stats_inc(s_ib_rx_alloc_limit);
goto out;
}
recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
kptr_gfp);
- if (recv->r_ibinc == NULL)
+ if (recv->r_ibinc == NULL) {
+ atomic_dec(&rds_ib_allocation);
goto out;
- atomic_inc(&rds_ib_allocation);
+ }
INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
}
@@ -229,8 +230,8 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
int ret = 0;
u32 pos;
- while ((prefill || rds_conn_up(conn))
- && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
+ while ((prefill || rds_conn_up(conn)) &&
+ rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
if (pos >= ic->i_recv_ring.w_nr) {
printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
pos);
@@ -770,10 +771,10 @@ static void rds_ib_process_recv(struct rds_connection *conn,
hdr = &ibinc->ii_inc.i_hdr;
/* We can't just use memcmp here; fragments of a
* single message may carry different ACKs */
- if (hdr->h_sequence != ihdr->h_sequence
- || hdr->h_len != ihdr->h_len
- || hdr->h_sport != ihdr->h_sport
- || hdr->h_dport != ihdr->h_dport) {
+ if (hdr->h_sequence != ihdr->h_sequence ||
+ hdr->h_len != ihdr->h_len ||
+ hdr->h_sport != ihdr->h_sport ||
+ hdr->h_dport != ihdr->h_dport) {
rds_ib_conn_error(conn,
"fragment header mismatch; forcing reconnect\n");
return;
@@ -824,17 +825,22 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_ib_connection *ic = conn->c_transport_data;
- struct ib_wc wc;
- struct rds_ib_ack_state state = { 0, };
- struct rds_ib_recv_work *recv;
rdsdebug("conn %p cq %p\n", conn, cq);
rds_ib_stats_inc(s_ib_rx_cq_call);
- ib_req_notify_cq(cq, IB_CQ_SOLICITED);
+ tasklet_schedule(&ic->i_recv_tasklet);
+}
+
+static inline void rds_poll_cq(struct rds_ib_connection *ic,
+ struct rds_ib_ack_state *state)
+{
+ struct rds_connection *conn = ic->conn;
+ struct ib_wc wc;
+ struct rds_ib_recv_work *recv;
- while (ib_poll_cq(cq, 1, &wc) > 0) {
+ while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
(unsigned long long)wc.wr_id, wc.status, wc.byte_len,
be32_to_cpu(wc.ex.imm_data));
@@ -852,7 +858,7 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
/* We expect errors as the qp is drained during shutdown */
if (wc.status == IB_WC_SUCCESS) {
- rds_ib_process_recv(conn, recv, wc.byte_len, &state);
+ rds_ib_process_recv(conn, recv, wc.byte_len, state);
} else {
rds_ib_conn_error(conn, "recv completion on "
"%pI4 had status %u, disconnecting and "
@@ -863,6 +869,17 @@ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
rds_ib_ring_free(&ic->i_recv_ring, 1);
}
+}
+
+void rds_ib_recv_tasklet_fn(unsigned long data)
+{
+ struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
+ struct rds_connection *conn = ic->conn;
+ struct rds_ib_ack_state state = { 0, };
+
+ rds_poll_cq(ic, &state);
+ ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
+ rds_poll_cq(ic, &state);
if (state.ack_next_valid)
rds_ib_set_ack(ic, state.ack_next, state.ack_required);
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 23bf830db2d5..a10fab6886d1 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -252,8 +252,8 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
rds_ib_ring_free(&ic->i_send_ring, completed);
- if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)
- || test_bit(0, &conn->c_map_queued))
+ if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
+ test_bit(0, &conn->c_map_queued))
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
/* We expect errors as the qp is drained during shutdown */
diff --git a/net/rds/iw.h b/net/rds/iw.h
index dd72b62bd506..eef2f0c28476 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -119,6 +119,7 @@ struct rds_iw_connection {
struct rds_iw_send_work *i_sends;
/* rx */
+ struct tasklet_struct i_recv_tasklet;
struct mutex i_recv_mutex;
struct rds_iw_work_ring i_recv_ring;
struct rds_iw_incoming *i_iwinc;
@@ -330,6 +331,7 @@ void rds_iw_inc_free(struct rds_incoming *inc);
int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
size_t size);
void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_iw_recv_tasklet_fn(unsigned long data);
void rds_iw_recv_init_ring(struct rds_iw_connection *ic);
void rds_iw_recv_clear_ring(struct rds_iw_connection *ic);
void rds_iw_recv_init_ack(struct rds_iw_connection *ic);
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a416b0d492b1..394cf6b4d0aa 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -696,6 +696,8 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
return -ENOMEM;
INIT_LIST_HEAD(&ic->iw_node);
+ tasklet_init(&ic->i_recv_tasklet, rds_iw_recv_tasklet_fn,
+ (unsigned long) ic);
mutex_init(&ic->i_recv_mutex);
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&ic->i_ack_lock);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index de4a1b16bf7b..9eda11cca956 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -245,11 +245,8 @@ void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock)
INIT_LIST_HEAD(list);
spin_unlock_irq(list_lock);
- list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) {
- if (ic->conn->c_passive)
- rds_conn_destroy(ic->conn->c_passive);
+ list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node)
rds_conn_destroy(ic->conn);
- }
}
static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
@@ -576,8 +573,8 @@ void rds_iw_free_mr(void *trans_private, int invalidate)
rds_iw_free_fastreg(pool, ibmr);
/* If we've pinned too many pages, request a flush */
- if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
- || atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
+ atomic_read(&pool->dirty_count) >= pool->max_items / 10)
queue_work(rds_wq, &pool->flush_worker);
if (invalidate) {
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 8683f5f66c4b..54af7d6b92da 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -143,15 +143,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
int ret = -ENOMEM;
if (recv->r_iwinc == NULL) {
- if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) {
+ if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
rds_iw_stats_inc(s_iw_rx_alloc_limit);
goto out;
}
recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
kptr_gfp);
- if (recv->r_iwinc == NULL)
+ if (recv->r_iwinc == NULL) {
+ atomic_dec(&rds_iw_allocation);
goto out;
- atomic_inc(&rds_iw_allocation);
+ }
INIT_LIST_HEAD(&recv->r_iwinc->ii_frags);
rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
}
@@ -229,8 +230,8 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
int ret = 0;
u32 pos;
- while ((prefill || rds_conn_up(conn))
- && rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
+ while ((prefill || rds_conn_up(conn)) &&
+ rds_iw_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
if (pos >= ic->i_recv_ring.w_nr) {
printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
pos);
@@ -729,10 +730,10 @@ static void rds_iw_process_recv(struct rds_connection *conn,
hdr = &iwinc->ii_inc.i_hdr;
/* We can't just use memcmp here; fragments of a
* single message may carry different ACKs */
- if (hdr->h_sequence != ihdr->h_sequence
- || hdr->h_len != ihdr->h_len
- || hdr->h_sport != ihdr->h_sport
- || hdr->h_dport != ihdr->h_dport) {
+ if (hdr->h_sequence != ihdr->h_sequence ||
+ hdr->h_len != ihdr->h_len ||
+ hdr->h_sport != ihdr->h_sport ||
+ hdr->h_dport != ihdr->h_dport) {
rds_iw_conn_error(conn,
"fragment header mismatch; forcing reconnect\n");
return;
@@ -783,17 +784,22 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_iw_connection *ic = conn->c_transport_data;
- struct ib_wc wc;
- struct rds_iw_ack_state state = { 0, };
- struct rds_iw_recv_work *recv;
rdsdebug("conn %p cq %p\n", conn, cq);
rds_iw_stats_inc(s_iw_rx_cq_call);
- ib_req_notify_cq(cq, IB_CQ_SOLICITED);
+ tasklet_schedule(&ic->i_recv_tasklet);
+}
+
+static inline void rds_poll_cq(struct rds_iw_connection *ic,
+ struct rds_iw_ack_state *state)
+{
+ struct rds_connection *conn = ic->conn;
+ struct ib_wc wc;
+ struct rds_iw_recv_work *recv;
- while (ib_poll_cq(cq, 1, &wc) > 0) {
+ while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
(unsigned long long)wc.wr_id, wc.status, wc.byte_len,
be32_to_cpu(wc.ex.imm_data));
@@ -811,7 +817,7 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
/* We expect errors as the qp is drained during shutdown */
if (wc.status == IB_WC_SUCCESS) {
- rds_iw_process_recv(conn, recv, wc.byte_len, &state);
+ rds_iw_process_recv(conn, recv, wc.byte_len, state);
} else {
rds_iw_conn_error(conn, "recv completion on "
"%pI4 had status %u, disconnecting and "
@@ -822,6 +828,17 @@ void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context)
rds_iw_ring_free(&ic->i_recv_ring, 1);
}
+}
+
+void rds_iw_recv_tasklet_fn(unsigned long data)
+{
+ struct rds_iw_connection *ic = (struct rds_iw_connection *) data;
+ struct rds_connection *conn = ic->conn;
+ struct rds_iw_ack_state state = { 0, };
+
+ rds_poll_cq(ic, &state);
+ ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
+ rds_poll_cq(ic, &state);
if (state.ack_next_valid)
rds_iw_set_ack(ic, state.ack_next, state.ack_required);
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 1f5abe3cf2b4..1379e9d66a78 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -288,8 +288,8 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
rds_iw_ring_free(&ic->i_send_ring, completed);
- if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)
- || test_bit(0, &conn->c_map_queued))
+ if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
+ test_bit(0, &conn->c_map_queued))
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
/* We expect errors as the qp is drained during shutdown */
@@ -519,8 +519,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
/* Fastreg support */
- if (rds_rdma_cookie_key(rm->m_rdma_cookie)
- && !ic->i_fastreg_posted) {
+ if (rds_rdma_cookie_key(rm->m_rdma_cookie) && !ic->i_fastreg_posted) {
ret = -EAGAIN;
goto out;
}
diff --git a/net/rds/message.c b/net/rds/message.c
index ca50a8ec9742..73e600ffd87f 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -122,8 +122,7 @@ int rds_message_add_extension(struct rds_header *hdr,
if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
return 0;
- if (type >= __RDS_EXTHDR_MAX
- || len != rds_exthdr_size[type])
+ if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
return 0;
if (ext_len >= RDS_HEADER_EXT_SPACE)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8dc83d2caa58..4c64daa1f5d5 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -317,6 +317,30 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
return __rds_rdma_map(rs, &args, NULL, NULL);
}
+int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
+{
+ struct rds_get_mr_for_dest_args args;
+ struct rds_get_mr_args new_args;
+
+ if (optlen != sizeof(struct rds_get_mr_for_dest_args))
+ return -EINVAL;
+
+ if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
+ sizeof(struct rds_get_mr_for_dest_args)))
+ return -EFAULT;
+
+ /*
+ * Initially, just behave like get_mr().
+ * TODO: Implement get_mr as wrapper around this
+ * and deprecate it.
+ */
+ new_args.vec = args.vec;
+ new_args.cookie_addr = args.cookie_addr;
+ new_args.flags = args.flags;
+
+ return __rds_rdma_map(rs, &new_args, NULL, NULL);
+}
+
/*
* Free the MR indicated by the given R_Key
*/
@@ -607,8 +631,8 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
{
struct rds_rdma_op *op;
- if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
- || rm->m_rdma_op != NULL)
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
+ rm->m_rdma_op != NULL)
return -EINVAL;
op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
@@ -631,8 +655,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
u32 r_key;
int err = 0;
- if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t))
- || rm->m_rdma_cookie != 0)
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
+ rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
@@ -668,8 +692,8 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
- if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args))
- || rm->m_rdma_cookie != 0)
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
+ rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
diff --git a/net/rds/rdma.h b/net/rds/rdma.h
index 425512098b0b..909c39835a5d 100644
--- a/net/rds/rdma.h
+++ b/net/rds/rdma.h
@@ -61,6 +61,7 @@ static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
}
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
+int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
void rds_rdma_drop_keys(struct rds_sock *rs);
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
diff --git a/net/rds/recv.c b/net/rds/recv.c
index fdff33c7b432..b426d67f760c 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -195,8 +195,8 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
* XXX we could spend more on the wire to get more robust failure
* detection, arguably worth it to avoid data corruption.
*/
- if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq
- && (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
+ if (be64_to_cpu(inc->i_hdr.h_sequence) < conn->c_next_rx_seq &&
+ (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) {
rds_stats_inc(s_recv_drop_old_seq);
goto out;
}
@@ -432,10 +432,9 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
}
timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
- (!list_empty(&rs->rs_notify_queue)
- || rs->rs_cong_notify
- || rds_next_incoming(rs, &inc)),
- timeo);
+ (!list_empty(&rs->rs_notify_queue) ||
+ rs->rs_cong_notify ||
+ rds_next_incoming(rs, &inc)), timeo);
rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
timeo);
if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
diff --git a/net/rds/send.c b/net/rds/send.c
index 28c88ff3d038..b2fccfc20769 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -235,8 +235,8 @@ int rds_send_xmit(struct rds_connection *conn)
* connection.
* Therefore, we never retransmit messages with RDMA ops.
*/
- if (rm->m_rdma_op
- && test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
+ if (rm->m_rdma_op &&
+ test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
spin_lock_irqsave(&conn->c_lock, flags);
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
list_move(&rm->m_conn_item, &to_be_dropped);
@@ -247,8 +247,8 @@ int rds_send_xmit(struct rds_connection *conn)
/* Require an ACK every once in a while */
len = ntohl(rm->m_inc.i_hdr.h_len);
- if (conn->c_unacked_packets == 0
- || conn->c_unacked_bytes < len) {
+ if (conn->c_unacked_packets == 0 ||
+ conn->c_unacked_bytes < len) {
__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
@@ -418,8 +418,8 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
spin_lock(&rm->m_rs_lock);
ro = rm->m_rdma_op;
- if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
- && ro && ro->r_notify && ro->r_notifier) {
+ if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
+ ro && ro->r_notify && ro->r_notifier) {
notifier = ro->r_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
@@ -549,8 +549,7 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
list_del_init(&rm->m_sock_item);
rds_send_sndbuf_remove(rs, rm);
- if (ro && ro->r_notifier
- && (status || ro->r_notify)) {
+ if (ro && ro->r_notifier && (status || ro->r_notify)) {
notifier = ro->r_notifier;
list_add_tail(&notifier->n_list,
&rs->rs_notify_queue);
@@ -877,8 +876,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (ret)
goto out;
- if ((rm->m_rdma_cookie || rm->m_rdma_op)
- && conn->c_trans->xmit_rdma == NULL) {
+ if ((rm->m_rdma_cookie || rm->m_rdma_op) &&
+ conn->c_trans->xmit_rdma == NULL) {
if (printk_ratelimit())
printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
rm->m_rdma_op, conn->c_trans->xmit_rdma);
@@ -890,8 +889,8 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
* have scheduled a delayed reconnect however - in this case
* we should not interfere.
*/
- if (rds_conn_state(conn) == RDS_CONN_DOWN
- && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
+ if (rds_conn_state(conn) == RDS_CONN_DOWN &&
+ !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
@@ -973,8 +972,8 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
* have scheduled a delayed reconnect however - in this case
* we should not interfere.
*/
- if (rds_conn_state(conn) == RDS_CONN_DOWN
- && !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
+ if (rds_conn_state(conn) == RDS_CONN_DOWN &&
+ !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index dd7e0cad1e7c..00fa10e59af8 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -170,8 +170,8 @@ void rds_shutdown_worker(struct work_struct *work)
* handler is supposed to check for state DISCONNECTING
*/
mutex_lock(&conn->c_cm_lock);
- if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
- && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
+ if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) &&
+ !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
rds_conn_error(conn, "shutdown called in state %d\n",
atomic_read(&conn->c_state));
mutex_unlock(&conn->c_cm_lock);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ba2efb960c60..448e5a0fcc2e 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -592,11 +592,13 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
return "wwan";
case RFKILL_TYPE_GPS:
return "gps";
+ case RFKILL_TYPE_FM:
+ return "fm";
default:
BUG();
}
- BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_GPS + 1);
+ BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
}
static ssize_t rfkill_type_show(struct device *dev,
@@ -1189,6 +1191,7 @@ static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
#endif
static const struct file_operations rfkill_fops = {
+ .owner = THIS_MODULE,
.open = rfkill_fop_open,
.read = rfkill_fop_read,
.write = rfkill_fop_write,
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c17734c2ce89..8feb9e5d6623 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -512,12 +512,13 @@ static struct proto rose_proto = {
.obj_size = sizeof(struct rose_sock),
};
-static int rose_create(struct net *net, struct socket *sock, int protocol)
+static int rose_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct rose_sock *rose;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 9478d9b3d977..795c4b025e31 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -77,8 +77,9 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
- if (ax25cmp(&rose_route->neighbour, &rose_neigh->callsign) == 0
- && rose_neigh->dev == dev)
+ if (ax25cmp(&rose_route->neighbour,
+ &rose_neigh->callsign) == 0 &&
+ rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
@@ -311,8 +312,9 @@ static int rose_del_node(struct rose_route_struct *rose_route,
rose_neigh = rose_neigh_list;
while (rose_neigh != NULL) {
- if (ax25cmp(&rose_route->neighbour, &rose_neigh->callsign) == 0
- && rose_neigh->dev == dev)
+ if (ax25cmp(&rose_route->neighbour,
+ &rose_neigh->callsign) == 0 &&
+ rose_neigh->dev == dev)
break;
rose_neigh = rose_neigh->next;
}
@@ -578,18 +580,18 @@ static int rose_clear_routes(void)
/*
* Check that the device given is a valid AX.25 interface that is "up".
+ * called whith RTNL
*/
-static struct net_device *rose_ax25_dev_get(char *devname)
+static struct net_device *rose_ax25_dev_find(char *devname)
{
struct net_device *dev;
- if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
+ if ((dev = __dev_get_by_name(&init_net, devname)) == NULL)
return NULL;
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
return dev;
- dev_put(dev);
return NULL;
}
@@ -600,13 +602,13 @@ struct net_device *rose_dev_first(void)
{
struct net_device *dev, *first = NULL;
- read_lock(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE)
if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
first = dev;
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return first;
}
@@ -618,8 +620,8 @@ struct net_device *rose_dev_get(rose_address *addr)
{
struct net_device *dev;
- read_lock(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) {
dev_hold(dev);
goto out;
@@ -627,7 +629,7 @@ struct net_device *rose_dev_get(rose_address *addr)
}
dev = NULL;
out:
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return dev;
}
@@ -635,14 +637,14 @@ static int rose_dev_exists(rose_address *addr)
{
struct net_device *dev;
- read_lock(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0)
goto out;
}
dev = NULL;
out:
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
return dev != NULL;
}
@@ -720,27 +722,23 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg)
case SIOCADDRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
- if ((dev = rose_ax25_dev_get(rose_route.device)) == NULL)
+ if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
- if (rose_dev_exists(&rose_route.address)) { /* Can't add routes to ourself */
- dev_put(dev);
+ if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */
return -EINVAL;
- }
if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
return -EINVAL;
if (rose_route.ndigis > AX25_MAX_DIGIS)
return -EINVAL;
err = rose_add_node(&rose_route, dev);
- dev_put(dev);
return err;
case SIOCDELRT:
if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct)))
return -EFAULT;
- if ((dev = rose_ax25_dev_get(rose_route.device)) == NULL)
+ if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL)
return -EINVAL;
err = rose_del_node(&rose_route, dev);
- dev_put(dev);
return err;
case SIOCRSCLRRT:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 6817c9781ef3..287b1415cee9 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -608,14 +608,15 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
/*
* create an RxRPC socket
*/
-static int rxrpc_create(struct net *net, struct socket *sock, int protocol)
+static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct rxrpc_sock *rx;
struct sock *sk;
_enter("%p,%d", sock, protocol);
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
/* we support transport protocol UDP only */
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index ca2e1fd2bf69..2a740035aa6b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -969,7 +969,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 pid = skb ? NETLINK_CB(skb).pid : 0;
int ret = 0, ovr = 0;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
@@ -1052,7 +1052,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
struct nlattr *kind = find_dump_kind(cb->nlh);
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return 0;
if (kind == NULL) {
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index b9aaab4e0354..d329170243cb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -65,48 +65,53 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
struct tc_mirred *parm;
struct tcf_mirred *m;
struct tcf_common *pc;
- struct net_device *dev = NULL;
- int ret = 0, err;
- int ok_push = 0;
+ struct net_device *dev;
+ int ret, ok_push = 0;
if (nla == NULL)
return -EINVAL;
-
- err = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy);
- if (err < 0)
- return err;
-
+ ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy);
+ if (ret < 0)
+ return ret;
if (tb[TCA_MIRRED_PARMS] == NULL)
return -EINVAL;
parm = nla_data(tb[TCA_MIRRED_PARMS]);
-
+ switch (parm->eaction) {
+ case TCA_EGRESS_MIRROR:
+ case TCA_EGRESS_REDIR:
+ break;
+ default:
+ return -EINVAL;
+ }
if (parm->ifindex) {
dev = __dev_get_by_index(&init_net, parm->ifindex);
if (dev == NULL)
return -ENODEV;
switch (dev->type) {
- case ARPHRD_TUNNEL:
- case ARPHRD_TUNNEL6:
- case ARPHRD_SIT:
- case ARPHRD_IPGRE:
- case ARPHRD_VOID:
- case ARPHRD_NONE:
- ok_push = 0;
- break;
- default:
- ok_push = 1;
- break;
+ case ARPHRD_TUNNEL:
+ case ARPHRD_TUNNEL6:
+ case ARPHRD_SIT:
+ case ARPHRD_IPGRE:
+ case ARPHRD_VOID:
+ case ARPHRD_NONE:
+ ok_push = 0;
+ break;
+ default:
+ ok_push = 1;
+ break;
}
+ } else {
+ dev = NULL;
}
pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info);
if (!pc) {
- if (!parm->ifindex)
+ if (dev == NULL)
return -EINVAL;
pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind,
&mirred_idx_gen, &mirred_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
if (!ovr) {
@@ -119,12 +124,12 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
spin_lock_bh(&m->tcf_lock);
m->tcf_action = parm->action;
m->tcfm_eaction = parm->eaction;
- if (parm->ifindex) {
+ if (dev != NULL) {
m->tcfm_ifindex = parm->ifindex;
if (ret != ACT_P_CREATED)
dev_put(m->tcfm_dev);
- m->tcfm_dev = dev;
dev_hold(dev);
+ m->tcfm_dev = dev;
m->tcfm_ok_push = ok_push;
}
spin_unlock_bh(&m->tcf_lock);
@@ -148,57 +153,57 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_mirred *m = a->priv;
struct net_device *dev;
- struct sk_buff *skb2 = NULL;
- u32 at = G_TC_AT(skb->tc_verd);
+ struct sk_buff *skb2;
+ u32 at;
+ int retval, err = 1;
spin_lock(&m->tcf_lock);
-
- dev = m->tcfm_dev;
m->tcf_tm.lastuse = jiffies;
- if (!(dev->flags&IFF_UP) ) {
+ dev = m->tcfm_dev;
+ if (!(dev->flags & IFF_UP)) {
if (net_ratelimit())
printk("mirred to Houston: device %s is gone!\n",
dev->name);
-bad_mirred:
- if (skb2 != NULL)
- kfree_skb(skb2);
- m->tcf_qstats.overlimits++;
- m->tcf_bstats.bytes += qdisc_pkt_len(skb);
- m->tcf_bstats.packets++;
- spin_unlock(&m->tcf_lock);
- /* should we be asking for packet to be dropped?
- * may make sense for redirect case only
- */
- return TC_ACT_SHOT;
+ goto out;
}
skb2 = skb_act_clone(skb, GFP_ATOMIC);
if (skb2 == NULL)
- goto bad_mirred;
- if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
- m->tcfm_eaction != TCA_EGRESS_REDIR) {
- if (net_ratelimit())
- printk("tcf_mirred unknown action %d\n",
- m->tcfm_eaction);
- goto bad_mirred;
- }
+ goto out;
m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
m->tcf_bstats.packets++;
- if (!(at & AT_EGRESS))
+ at = G_TC_AT(skb->tc_verd);
+ if (!(at & AT_EGRESS)) {
if (m->tcfm_ok_push)
skb_push(skb2, skb2->dev->hard_header_len);
+ }
/* mirror is always swallowed */
if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
skb2->dev = dev;
- skb2->iif = skb->dev->ifindex;
+ skb2->skb_iif = skb->dev->ifindex;
dev_queue_xmit(skb2);
+ err = 0;
+
+out:
+ if (err) {
+ m->tcf_qstats.overlimits++;
+ m->tcf_bstats.bytes += qdisc_pkt_len(skb);
+ m->tcf_bstats.packets++;
+ /* should we be asking for packet to be dropped?
+ * may make sense for redirect case only
+ */
+ retval = TC_ACT_SHOT;
+ } else {
+ retval = m->tcf_action;
+ }
spin_unlock(&m->tcf_lock);
- return m->tcf_action;
+
+ return retval;
}
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 7cf6c0fbc7a6..3725d8fa29db 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -137,7 +137,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
int err;
int tp_created = 0;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
replay:
@@ -404,6 +404,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
}
+/* called with RTNL */
static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -417,12 +418,12 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
const struct Qdisc_class_ops *cops;
struct tcf_dump_args arg;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return 0;
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len;
- if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
+ if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
return skb->len;
if (!tcm->tcm_parent)
@@ -484,7 +485,6 @@ errout:
if (cl)
cops->put(q, cl);
out:
- dev_put(dev);
return skb->len;
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 9402a7fd3785..e054c62857e1 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -171,7 +171,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
static u32 flow_get_iif(const struct sk_buff *skb)
{
- return skb->iif;
+ return skb->skb_iif;
}
static u32 flow_get_priority(const struct sk_buff *skb)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 7034ea4530e5..dd9414e44200 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -170,21 +170,23 @@ restart:
for (s = sht[h1]; s; s = s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
protocol == s->protocol &&
- !(s->dpi.mask & (*(u32*)(xprt+s->dpi.offset)^s->dpi.key))
+ !(s->dpi.mask &
+ (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
#if RSVP_DST_LEN == 4
- && dst[0] == s->dst[0]
- && dst[1] == s->dst[1]
- && dst[2] == s->dst[2]
+ dst[0] == s->dst[0] &&
+ dst[1] == s->dst[1] &&
+ dst[2] == s->dst[2] &&
#endif
- && tunnelid == s->tunnelid) {
+ tunnelid == s->tunnelid) {
for (f = s->ht[h2]; f; f = f->next) {
if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
!(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
#if RSVP_DST_LEN == 4
- && src[0] == f->src[0]
- && src[1] == f->src[1]
- && src[2] == f->src[2]
+ &&
+ src[0] == f->src[0] &&
+ src[1] == f->src[1] &&
+ src[2] == f->src[2]
#endif
) {
*res = f->res;
@@ -493,13 +495,13 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
pinfo && pinfo->protocol == s->protocol &&
- memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0
+ memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
#if RSVP_DST_LEN == 4
- && dst[0] == s->dst[0]
- && dst[1] == s->dst[1]
- && dst[2] == s->dst[2]
+ dst[0] == s->dst[0] &&
+ dst[1] == s->dst[1] &&
+ dst[2] == s->dst[2] &&
#endif
- && pinfo->tunnelid == s->tunnelid) {
+ pinfo->tunnelid == s->tunnelid) {
insert:
/* OK, we found appropriate session */
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 18d85d259104..24dce8b648a4 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -303,17 +303,18 @@ META_COLLECTOR(var_sk_bound_if)
{
SKIP_NONLOCAL(skb);
- if (skb->sk->sk_bound_dev_if == 0) {
+ if (skb->sk->sk_bound_dev_if == 0) {
dst->value = (unsigned long) "any";
dst->len = 3;
- } else {
+ } else {
struct net_device *dev;
- dev = dev_get_by_index(&init_net, skb->sk->sk_bound_dev_if);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(skb->sk),
+ skb->sk->sk_bound_dev_if);
*err = var_dev(dev, dst);
- if (dev)
- dev_put(dev);
- }
+ rcu_read_unlock();
+ }
}
META_COLLECTOR(int_sk_refcnt)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1acfd29cc826..75fd1c672c61 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -947,7 +947,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *p = NULL;
int err;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
@@ -1009,7 +1009,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *q, *p;
int err;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
replay:
@@ -1274,14 +1274,15 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
int s_idx, s_q_idx;
struct net_device *dev;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return 0;
s_idx = cb->args[0];
s_q_idx = q_idx = cb->args[1];
- read_lock(&dev_base_lock);
+
+ rcu_read_lock();
idx = 0;
- for_each_netdev(&init_net, dev) {
+ for_each_netdev_rcu(&init_net, dev) {
struct netdev_queue *dev_queue;
if (idx < s_idx)
@@ -1302,7 +1303,7 @@ cont:
}
done:
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
cb->args[0] = idx;
cb->args[1] = q_idx;
@@ -1333,7 +1334,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 qid = TC_H_MAJ(clid);
int err;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EINVAL;
if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
@@ -1575,7 +1576,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
struct net_device *dev;
int t, s_t;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return 0;
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4ae6aa562f2b..5173c1e1b19c 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -119,32 +119,26 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
spin_unlock(root_lock);
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_tx_queue_stopped(txq) &&
- !netif_tx_queue_frozen(txq))
+ if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
ret = dev_hard_start_xmit(skb, dev, txq);
+
HARD_TX_UNLOCK(dev, txq);
spin_lock(root_lock);
- switch (ret) {
- case NETDEV_TX_OK:
- /* Driver sent out skb successfully */
+ if (dev_xmit_complete(ret)) {
+ /* Driver sent out skb successfully or skb was consumed */
ret = qdisc_qlen(q);
- break;
-
- case NETDEV_TX_LOCKED:
+ } else if (ret == NETDEV_TX_LOCKED) {
/* Driver try lock failed */
ret = handle_dev_cpu_collision(skb, txq, q);
- break;
-
- default:
+ } else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
printk(KERN_WARNING "BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
- break;
}
if (ret && (netif_tx_queue_stopped(txq) ||
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 2e38d1abd830..508cf5f3a6d5 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1344,8 +1344,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
};
/* check for valid classid */
- if (!classid || TC_H_MAJ(classid ^ sch->handle)
- || htb_find(classid, sch))
+ if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
+ htb_find(classid, sch))
goto failure;
/* check maximal depth */
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2b88295cb7b7..d8b10e054627 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -199,9 +199,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* do it now in software before we mangle it.
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
- if (!(skb = skb_unshare(skb, GFP_ATOMIC))
- || (skb->ip_summed == CHECKSUM_PARTIAL
- && skb_checksum_help(skb))) {
+ if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb))) {
sch->qstats.drops++;
return NET_XMIT_DROP;
}
@@ -210,9 +210,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
cb = netem_skb_cb(skb);
- if (q->gap == 0 /* not doing reordering */
- || q->counter < q->gap /* inside last reordering gap */
- || q->reorder < get_crandom(&q->reorder_cor)) {
+ if (q->gap == 0 || /* not doing reordering */
+ q->counter < q->gap || /* inside last reordering gap */
+ q->reorder < get_crandom(&q->reorder_cor)) {
psched_time_t now;
psched_tdiff_t delay;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 5a002c247231..db69637069c4 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -190,10 +190,13 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
if (m->slaves) {
if (m->dev->flags & IFF_UP) {
- if ((m->dev->flags&IFF_POINTOPOINT && !(dev->flags&IFF_POINTOPOINT))
- || (m->dev->flags&IFF_BROADCAST && !(dev->flags&IFF_BROADCAST))
- || (m->dev->flags&IFF_MULTICAST && !(dev->flags&IFF_MULTICAST))
- || dev->mtu < m->dev->mtu)
+ if ((m->dev->flags & IFF_POINTOPOINT &&
+ !(dev->flags & IFF_POINTOPOINT)) ||
+ (m->dev->flags & IFF_BROADCAST &&
+ !(dev->flags & IFF_BROADCAST)) ||
+ (m->dev->flags & IFF_MULTICAST &&
+ !(dev->flags & IFF_MULTICAST)) ||
+ dev->mtu < m->dev->mtu)
return -EINVAL;
} else {
if (!(dev->flags&IFF_POINTOPOINT))
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 8450960df24f..df5abbff63e2 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -63,6 +63,12 @@
static void sctp_assoc_bh_rcv(struct work_struct *work);
static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
+/* Keep track of the new idr low so that we don't re-use association id
+ * numbers too fast. It is protected by they idr spin lock is in the
+ * range of 1 - INT_MAX.
+ */
+static u32 idr_low = 1;
+
/* 1st Level Abstractions. */
@@ -167,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
- sp->autoclose * HZ;
+ (unsigned long)sp->autoclose * HZ;
/* Initilizes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -512,7 +518,13 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
* to this destination address earlier. The sender MUST set
* CYCLING_CHANGEOVER to indicate that this switch is a
* double switch to the same destination address.
+ *
+ * Really, only bother is we have data queued or outstanding on
+ * the association.
*/
+ if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
+ return;
+
if (transport->cacc.changeover_active)
transport->cacc.cycling_changeover = changeover;
@@ -732,6 +744,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
peer->partial_bytes_acked = 0;
peer->flight_size = 0;
+ peer->burst_limited = 0;
/* Set the transport's RTO.initial value */
peer->rto = asoc->rto_initial;
@@ -1377,8 +1390,9 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
case SCTP_STATE_SHUTDOWN_RECEIVED:
case SCTP_STATE_SHUTDOWN_SENT:
if ((asoc->rwnd > asoc->a_rwnd) &&
- ((asoc->rwnd - asoc->a_rwnd) >=
- min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pathmtu)))
+ ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
+ (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift),
+ asoc->pathmtu)))
return 1;
break;
default:
@@ -1485,15 +1499,13 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
* local endpoint and the remote peer.
*/
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
- gfp_t gfp)
+ sctp_scope_t scope, gfp_t gfp)
{
- sctp_scope_t scope;
int flags;
/* Use scoping rules to determine the subset of addresses from
* the endpoint.
*/
- scope = sctp_scope(&asoc->peer.active_path->ipaddr);
flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
@@ -1547,7 +1559,12 @@ retry:
spin_lock_bh(&sctp_assocs_id_lock);
error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
- 1, &assoc_id);
+ idr_low, &assoc_id);
+ if (!error) {
+ idr_low = assoc_id + 1;
+ if (idr_low == INT_MAX)
+ idr_low = 1;
+ }
spin_unlock_bh(&sctp_assocs_id_lock);
if (error == -EAGAIN)
goto retry;
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index acf7c4d128f7..8e4320040f05 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -263,9 +263,18 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
if (0 == i)
frag |= SCTP_DATA_FIRST_FRAG;
- if ((i == (whole - 1)) && !over)
+ if ((i == (whole - 1)) && !over) {
frag |= SCTP_DATA_LAST_FRAG;
+ /* The application requests to set the I-bit of the
+ * last DATA chunk of a user message when providing
+ * the user message to the SCTP implementation.
+ */
+ if ((sinfo->sinfo_flags & SCTP_EOF) ||
+ (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
+ frag |= SCTP_DATA_SACK_IMM;
+ }
+
chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
if (!chunk)
@@ -297,6 +306,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
else
frag = SCTP_DATA_LAST_FRAG;
+ if ((sinfo->sinfo_flags & SCTP_EOF) ||
+ (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
+ frag |= SCTP_DATA_SACK_IMM;
+
chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
if (!chunk)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index bb280e60e00a..cc50fbe99291 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -837,15 +837,16 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
if (type & IPV6_ADDR_LINKLOCAL) {
if (!addr->v6.sin6_scope_id)
return 0;
- dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id);
- if (!dev)
- return 0;
- if (!ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(&init_net,
+ addr->v6.sin6_scope_id);
+ if (!dev ||
+ !ipv6_chk_addr(&init_net, &addr->v6.sin6_addr,
dev, 0)) {
- dev_put(dev);
+ rcu_read_unlock();
return 0;
}
- dev_put(dev);
+ rcu_read_unlock();
} else if (type == IPV6_ADDR_MAPPED) {
if (!opt->v4mapped)
return 0;
@@ -873,10 +874,12 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
if (type & IPV6_ADDR_LINKLOCAL) {
if (!addr->v6.sin6_scope_id)
return 0;
- dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id);
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(&init_net,
+ addr->v6.sin6_scope_id);
+ rcu_read_unlock();
if (!dev)
return 0;
- dev_put(dev);
}
af = opt->pf->af;
}
@@ -930,7 +933,6 @@ static struct inet_protosw sctpv6_seqpacket_protosw = {
.protocol = IPPROTO_SCTP,
.prot = &sctpv6_prot,
.ops = &inet6_seqpacket_ops,
- .capability = -1,
.no_check = 0,
.flags = SCTP_PROTOSW_FLAG
};
@@ -939,7 +941,6 @@ static struct inet_protosw sctpv6_stream_protosw = {
.protocol = IPPROTO_SCTP,
.prot = &sctpv6_prot,
.ops = &inet6_seqpacket_ops,
- .capability = -1,
.no_check = 0,
.flags = SCTP_PROTOSW_FLAG,
};
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 5cbda8f1ddfd..7c5589363433 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -429,23 +429,22 @@ int sctp_packet_transmit(struct sctp_packet *packet)
list_del_init(&chunk->list);
if (sctp_chunk_is_data(chunk)) {
- if (!chunk->has_tsn) {
- sctp_chunk_assign_ssn(chunk);
- sctp_chunk_assign_tsn(chunk);
-
- /* 6.3.1 C4) When data is in flight and when allowed
- * by rule C5, a new RTT measurement MUST be made each
- * round trip. Furthermore, new RTT measurements
- * SHOULD be made no more than once per round-trip
- * for a given destination transport address.
- */
+ if (!chunk->resent) {
+
+ /* 6.3.1 C4) When data is in flight and when allowed
+ * by rule C5, a new RTT measurement MUST be made each
+ * round trip. Furthermore, new RTT measurements
+ * SHOULD be made no more than once per round-trip
+ * for a given destination transport address.
+ */
if (!tp->rto_pending) {
chunk->rtt_in_progress = 1;
tp->rto_pending = 1;
}
- } else
- chunk->resent = 1;
+ }
+
+ chunk->resent = 1;
has_data = 1;
}
@@ -557,8 +556,6 @@ int sctp_packet_transmit(struct sctp_packet *packet)
struct timer_list *timer;
unsigned long timeout;
- tp->last_time_used = jiffies;
-
/* Restart the AUTOCLOSE timer when sending data. */
if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) {
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
@@ -617,7 +614,6 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
sctp_xmit_t retval = SCTP_XMIT_OK;
size_t datasize, rwnd, inflight, flight_size;
struct sctp_transport *transport = packet->transport;
- __u32 max_burst_bytes;
struct sctp_association *asoc = transport->asoc;
struct sctp_outq *q = &asoc->outqueue;
@@ -650,28 +646,6 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
}
}
- /* sctpimpguide-05 2.14.2
- * D) When the time comes for the sender to
- * transmit new DATA chunks, the protocol parameter Max.Burst MUST
- * first be applied to limit how many new DATA chunks may be sent.
- * The limit is applied by adjusting cwnd as follows:
- * if ((flightsize + Max.Burst * MTU) < cwnd)
- * cwnd = flightsize + Max.Burst * MTU
- */
- max_burst_bytes = asoc->max_burst * asoc->pathmtu;
- if ((flight_size + max_burst_bytes) < transport->cwnd) {
- transport->cwnd = flight_size + max_burst_bytes;
- SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
- "transport: %p, cwnd: %d, "
- "ssthresh: %d, flight_size: %d, "
- "pba: %d\n",
- __func__, transport,
- transport->cwnd,
- transport->ssthresh,
- transport->flight_size,
- transport->partial_bytes_acked);
- }
-
/* RFC 2960 6.1 Transmission of DATA Chunks
*
* B) At any given time, the sender MUST NOT transmit new data
@@ -747,6 +721,8 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
/* Has been accepted for transmission. */
if (!asoc->peer.prsctp_capable)
chunk->msg->can_abandon = 0;
+ sctp_chunk_assign_tsn(chunk);
+ sctp_chunk_assign_ssn(chunk);
}
static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c9f20e28521b..229690f02a1d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -191,8 +191,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
__u32 tsn)
{
if (primary->cacc.changeover_active &&
- (sctp_cacc_skip_3_1(primary, transport, count_of_newacks)
- || sctp_cacc_skip_3_2(primary, tsn)))
+ (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
+ sctp_cacc_skip_3_2(primary, tsn)))
return 1;
return 0;
}
@@ -423,16 +423,6 @@ void sctp_retransmit_mark(struct sctp_outq *q,
if ((reason == SCTP_RTXR_FAST_RTX &&
(chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
(reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
- /* If this chunk was sent less then 1 rto ago, do not
- * retransmit this chunk, but give the peer time
- * to acknowlege it. Do this only when
- * retransmitting due to T3 timeout.
- */
- if (reason == SCTP_RTXR_T3_RTX &&
- time_before(jiffies, chunk->sent_at +
- transport->last_rto))
- continue;
-
/* RFC 2960 6.2.1 Processing a Received SACK
*
* C) Any time a DATA chunk is marked for
@@ -931,6 +921,14 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
goto sctp_flush_out;
}
+ /* Apply Max.Burst limitation to the current transport in
+ * case it will be used for new data. We are going to
+ * rest it before we return, but we want to apply the limit
+ * to the currently queued data.
+ */
+ if (transport)
+ sctp_transport_burst_limited(transport);
+
/* Finally, transmit new packets. */
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
@@ -976,6 +974,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
packet = &transport->packet;
sctp_packet_config(packet, vtag,
asoc->peer.ecn_capable);
+ /* We've switched transports, so apply the
+ * Burst limit to the new transport.
+ */
+ sctp_transport_burst_limited(transport);
}
SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
@@ -1011,6 +1013,13 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
break;
case SCTP_XMIT_OK:
+ /* The sender is in the SHUTDOWN-PENDING state,
+ * The sender MAY set the I-bit in the DATA
+ * chunk header.
+ */
+ if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
+ chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
+
break;
default:
@@ -1063,6 +1072,9 @@ sctp_flush_out:
packet = &t->packet;
if (!sctp_packet_empty(packet))
error = sctp_packet_transmit(packet);
+
+ /* Clear the burst limited state, if any */
+ sctp_transport_burst_reset(t);
}
return error;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d9f4cc2c7869..a3c8988758b1 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -205,14 +205,14 @@ static void sctp_get_local_addr_list(void)
struct list_head *pos;
struct sctp_af *af;
- read_lock(&dev_base_lock);
- for_each_netdev(&init_net, dev) {
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
__list_for_each(pos, &sctp_address_families) {
af = list_entry(pos, struct sctp_af, list);
af->copy_addrlist(&sctp_local_addr_list, dev);
}
}
- read_unlock(&dev_base_lock);
+ rcu_read_unlock();
}
/* Free the existing local addresses. */
@@ -909,7 +909,6 @@ static struct inet_protosw sctp_seqpacket_protosw = {
.protocol = IPPROTO_SCTP,
.prot = &sctp_prot,
.ops = &inet_seqpacket_ops,
- .capability = -1,
.no_check = 0,
.flags = SCTP_PROTOSW_FLAG
};
@@ -918,7 +917,6 @@ static struct inet_protosw sctp_stream_protosw = {
.protocol = IPPROTO_SCTP,
.prot = &sctp_prot,
.ops = &inet_seqpacket_ops,
- .capability = -1,
.no_check = 0,
.flags = SCTP_PROTOSW_FLAG
};
@@ -1260,6 +1258,9 @@ SCTP_STATIC __init int sctp_init(void)
/* Set SCOPE policy to enabled */
sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
+ /* Set the default rwnd update threshold */
+ sctp_rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT;
+
sctp_sysctl_register();
INIT_LIST_HEAD(&sctp_address_families);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9d881a61ac02..9e732916b671 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -987,7 +987,10 @@ static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
target = skb_put(chunk->skb, len);
- memcpy(target, data, len);
+ if (data)
+ memcpy(target, data, len);
+ else
+ memset(target, 0, len);
/* Adjust the chunk length field. */
chunk->chunk_hdr->length = htons(chunklen + len);
@@ -1129,16 +1132,18 @@ nodata:
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
__be16 cause_code, const void *payload,
- size_t paylen)
+ size_t paylen, size_t reserve_tail)
{
struct sctp_chunk *retval;
- retval = sctp_make_op_error_space(asoc, chunk, paylen);
+ retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail);
if (!retval)
goto nodata;
- sctp_init_cause(retval, cause_code, paylen);
+ sctp_init_cause(retval, cause_code, paylen + reserve_tail);
sctp_addto_chunk(retval, paylen, payload);
+ if (reserve_tail)
+ sctp_addto_param(retval, reserve_tail, NULL);
nodata:
return retval;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8674d4919556..d771cc1b777a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -217,8 +217,7 @@ static int sctp_gen_sack(struct sctp_association *asoc, int force,
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
} else {
- if (asoc->a_rwnd > asoc->rwnd)
- asoc->a_rwnd = asoc->rwnd;
+ asoc->a_rwnd = asoc->rwnd;
sack = sctp_make_sack(asoc);
if (!sack)
goto nomem;
@@ -480,7 +479,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
* that indicates that we have an outstanding HB.
*/
if (!is_hb || transport->hb_sent) {
- transport->last_rto = transport->rto;
transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
}
}
@@ -1418,6 +1416,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
asoc->init_last_sent_to = t;
chunk->transport = t;
t->init_sent_count++;
+ /* Set the new transport as primary */
+ sctp_assoc_set_primary(asoc, t);
break;
case SCTP_CMD_INIT_RESTART:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c8fae1983dd1..1ef9de9bbae9 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -384,6 +384,11 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
if (!new_asoc)
goto nomem;
+ if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
+ sctp_scope(sctp_source(chunk)),
+ GFP_ATOMIC) < 0)
+ goto nomem_init;
+
/* The call, sctp_process_init(), can fail on memory allocation. */
if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
sctp_source(chunk),
@@ -401,9 +406,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
- if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
- goto nomem_init;
-
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem_init;
@@ -994,14 +996,15 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
sctp_sf_heartbeat(ep, asoc, type, arg,
commands))
return SCTP_DISPOSITION_NOMEM;
+
/* Set transport error counter and association error counter
* when sending heartbeat.
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
- SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
SCTP_TRANSPORT(transport));
}
+ sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
+ SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
@@ -1452,6 +1455,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
if (!new_asoc)
goto nomem;
+ if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
+ sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
+ goto nomem;
+
/* In the outbound INIT ACK the endpoint MUST copy its current
* Verification Tag and Peers Verification tag into a reserved
* place (local tie-tag and per tie-tag) within the state cookie.
@@ -1488,9 +1495,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
sizeof(sctp_chunkhdr_t);
}
- if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
- goto nomem;
-
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem;
@@ -1717,7 +1721,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
err = sctp_make_op_error(asoc, chunk,
SCTP_ERROR_COOKIE_IN_SHUTDOWN,
- NULL, 0);
+ NULL, 0, 0);
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
@@ -2865,6 +2869,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
+ sctp_arg_t force = SCTP_NOFORCE();
int error;
if (!sctp_vtag_verify(chunk, asoc)) {
@@ -2898,6 +2903,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
BUG();
}
+ if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
+ force = SCTP_FORCE();
+
if (asoc->autoclose) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -2926,7 +2934,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
* more aggressive than the following algorithms allow.
*/
if (chunk->end_of_packet)
- sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
return SCTP_DISPOSITION_CONSUME;
@@ -2951,7 +2959,7 @@ discard_force:
discard_noforce:
if (chunk->end_of_packet)
- sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
+ sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
return SCTP_DISPOSITION_DISCARD;
consume:
@@ -3970,7 +3978,7 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
err_chunk = sctp_make_op_error(asoc, chunk,
SCTP_ERROR_UNSUP_HMAC,
&auth_hdr->hmac_id,
- sizeof(__u16));
+ sizeof(__u16), 0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
@@ -4062,7 +4070,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
- WORD_ROUND(ntohs(hdr->length)));
+ WORD_ROUND(ntohs(hdr->length)),
+ 0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
@@ -4081,7 +4090,8 @@ sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep,
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
- WORD_ROUND(ntohs(hdr->length)));
+ WORD_ROUND(ntohs(hdr->length)),
+ 0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
@@ -6045,7 +6055,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
&data_hdr->stream,
- sizeof(data_hdr->stream));
+ sizeof(data_hdr->stream),
+ sizeof(u16));
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 4085db99033d..89ab66e54740 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1080,6 +1080,13 @@ static int __sctp_connect(struct sock* sk,
err = -ENOMEM;
goto out_free;
}
+
+ err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
+ GFP_KERNEL);
+ if (err < 0) {
+ goto out_free;
+ }
+
}
/* Prime the peer's transport structures. */
@@ -1095,11 +1102,6 @@ static int __sctp_connect(struct sock* sk,
walk_size += af->sockaddr_len;
}
- err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
- if (err < 0) {
- goto out_free;
- }
-
/* In case the user of sctp_connectx() wants an association
* id back, assign one now.
*/
@@ -1274,22 +1276,30 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
}
/*
- * New (hopefully final) interface for the API. The option buffer is used
- * both for the returned association id and the addresses.
+ * New (hopefully final) interface for the API.
+ * We use the sctp_getaddrs_old structure so that use-space library
+ * can avoid any unnecessary allocations. The only defferent part
+ * is that we store the actual length of the address buffer into the
+ * addrs_num structure member. That way we can re-use the existing
+ * code.
*/
SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
char __user *optval,
int __user *optlen)
{
+ struct sctp_getaddrs_old param;
sctp_assoc_t assoc_id = 0;
int err = 0;
- if (len < sizeof(assoc_id))
+ if (len < sizeof(param))
return -EINVAL;
+ if (copy_from_user(&param, optval, sizeof(param)))
+ return -EFAULT;
+
err = __sctp_setsockopt_connectx(sk,
- (struct sockaddr __user *)(optval + sizeof(assoc_id)),
- len - sizeof(assoc_id), &assoc_id);
+ (struct sockaddr __user *)param.addrs,
+ param.addr_num, &assoc_id);
if (err == 0 || err == -EINPROGRESS) {
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
@@ -1689,6 +1699,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
}
asoc = new_asoc;
+ err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
+ if (err < 0) {
+ err = -ENOMEM;
+ goto out_free;
+ }
/* If the SCTP_INIT ancillary data is specified, set all
* the association init values accordingly.
@@ -1718,11 +1733,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
err = -ENOMEM;
goto out_free;
}
- err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
- if (err < 0) {
- err = -ENOMEM;
- goto out_free;
- }
}
/* ASSERT: we have a valid association at this point. */
@@ -2076,6 +2086,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
return -EINVAL;
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
+ /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
+ if (sp->autoclose > (MAX_SCHEDULE_TIMEOUT / HZ) )
+ sp->autoclose = (__u32)(MAX_SCHEDULE_TIMEOUT / HZ) ;
return 0;
}
@@ -2301,11 +2314,10 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
}
}
- /* Note that unless the spp_flag is set to SPP_PMTUD_ENABLE the value
- * of this field is ignored. Note also that a value of zero
- * indicates the current setting should be left unchanged.
+ /* Note that a value of zero indicates the current setting should be
+ left unchanged.
*/
- if ((params->spp_flags & SPP_PMTUD_ENABLE) && params->spp_pathmaxrxt) {
+ if (params->spp_pathmaxrxt) {
if (trans) {
trans->pathmaxrxt = params->spp_pathmaxrxt;
} else if (asoc) {
@@ -2344,8 +2356,8 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
pmtud_change == SPP_PMTUD ||
sackdelay_change == SPP_SACKDELAY ||
params.spp_sackdelay > 500 ||
- (params.spp_pathmtu
- && params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
+ (params.spp_pathmtu &&
+ params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
return -EINVAL;
/* If an address other than INADDR_ANY is specified, and
@@ -4339,90 +4351,6 @@ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval
return 0;
}
-static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len,
- char __user *optval,
- int __user *optlen)
-{
- sctp_assoc_t id;
- struct sctp_association *asoc;
- struct list_head *pos;
- int cnt = 0;
-
- if (len < sizeof(sctp_assoc_t))
- return -EINVAL;
-
- if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
- return -EFAULT;
-
- printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_NUM_OLD "
- "socket option deprecated\n");
- /* For UDP-style sockets, id specifies the association to query. */
- asoc = sctp_id2assoc(sk, id);
- if (!asoc)
- return -EINVAL;
-
- list_for_each(pos, &asoc->peer.transport_addr_list) {
- cnt ++;
- }
-
- return cnt;
-}
-
-/*
- * Old API for getting list of peer addresses. Does not work for 32-bit
- * programs running on a 64-bit kernel
- */
-static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len,
- char __user *optval,
- int __user *optlen)
-{
- struct sctp_association *asoc;
- int cnt = 0;
- struct sctp_getaddrs_old getaddrs;
- struct sctp_transport *from;
- void __user *to;
- union sctp_addr temp;
- struct sctp_sock *sp = sctp_sk(sk);
- int addrlen;
-
- if (len < sizeof(struct sctp_getaddrs_old))
- return -EINVAL;
-
- len = sizeof(struct sctp_getaddrs_old);
-
- if (copy_from_user(&getaddrs, optval, len))
- return -EFAULT;
-
- if (getaddrs.addr_num <= 0) return -EINVAL;
-
- printk(KERN_WARNING "SCTP: Use of SCTP_GET_PEER_ADDRS_OLD "
- "socket option deprecated\n");
-
- /* For UDP-style sockets, id specifies the association to query. */
- asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
- if (!asoc)
- return -EINVAL;
-
- to = (void __user *)getaddrs.addrs;
- list_for_each_entry(from, &asoc->peer.transport_addr_list,
- transports) {
- memcpy(&temp, &from->ipaddr, sizeof(temp));
- sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
- addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
- if (copy_to_user(to, &temp, addrlen))
- return -EFAULT;
- to += addrlen ;
- cnt ++;
- if (cnt >= getaddrs.addr_num) break;
- }
- getaddrs.addr_num = cnt;
- if (put_user(len, optlen))
- return -EFAULT;
- if (copy_to_user(optval, &getaddrs, len))
- return -EFAULT;
-
- return 0;
-}
static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
char __user *optval, int __user *optlen)
@@ -4475,125 +4403,6 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
return 0;
}
-static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len,
- char __user *optval,
- int __user *optlen)
-{
- sctp_assoc_t id;
- struct sctp_bind_addr *bp;
- struct sctp_association *asoc;
- struct sctp_sockaddr_entry *addr;
- int cnt = 0;
-
- if (len < sizeof(sctp_assoc_t))
- return -EINVAL;
-
- if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
- return -EFAULT;
-
- printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_NUM_OLD "
- "socket option deprecated\n");
-
- /*
- * For UDP-style sockets, id specifies the association to query.
- * If the id field is set to the value '0' then the locally bound
- * addresses are returned without regard to any particular
- * association.
- */
- if (0 == id) {
- bp = &sctp_sk(sk)->ep->base.bind_addr;
- } else {
- asoc = sctp_id2assoc(sk, id);
- if (!asoc)
- return -EINVAL;
- bp = &asoc->base.bind_addr;
- }
-
- /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid
- * addresses from the global local address list.
- */
- if (sctp_list_single_entry(&bp->address_list)) {
- addr = list_entry(bp->address_list.next,
- struct sctp_sockaddr_entry, list);
- if (sctp_is_any(sk, &addr->a)) {
- rcu_read_lock();
- list_for_each_entry_rcu(addr,
- &sctp_local_addr_list, list) {
- if (!addr->valid)
- continue;
-
- if ((PF_INET == sk->sk_family) &&
- (AF_INET6 == addr->a.sa.sa_family))
- continue;
-
- if ((PF_INET6 == sk->sk_family) &&
- inet_v6_ipv6only(sk) &&
- (AF_INET == addr->a.sa.sa_family))
- continue;
-
- cnt++;
- }
- rcu_read_unlock();
- } else {
- cnt = 1;
- }
- goto done;
- }
-
- /* Protection on the bound address list is not needed,
- * since in the socket option context we hold the socket lock,
- * so there is no way that the bound address list can change.
- */
- list_for_each_entry(addr, &bp->address_list, list) {
- cnt ++;
- }
-done:
- return cnt;
-}
-
-/* Helper function that copies local addresses to user and returns the number
- * of addresses copied.
- */
-static int sctp_copy_laddrs_old(struct sock *sk, __u16 port,
- int max_addrs, void *to,
- int *bytes_copied)
-{
- struct sctp_sockaddr_entry *addr;
- union sctp_addr temp;
- int cnt = 0;
- int addrlen;
-
- rcu_read_lock();
- list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) {
- if (!addr->valid)
- continue;
-
- if ((PF_INET == sk->sk_family) &&
- (AF_INET6 == addr->a.sa.sa_family))
- continue;
- if ((PF_INET6 == sk->sk_family) &&
- inet_v6_ipv6only(sk) &&
- (AF_INET == addr->a.sa.sa_family))
- continue;
- memcpy(&temp, &addr->a, sizeof(temp));
- if (!temp.v4.sin_port)
- temp.v4.sin_port = htons(port);
-
- sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
- &temp);
- addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
- memcpy(to, &temp, addrlen);
-
- to += addrlen;
- *bytes_copied += addrlen;
- cnt ++;
- if (cnt >= max_addrs) break;
- }
- rcu_read_unlock();
-
- return cnt;
-}
-
static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
size_t space_left, int *bytes_copied)
{
@@ -4637,112 +4446,6 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
return cnt;
}
-/* Old API for getting list of local addresses. Does not work for 32-bit
- * programs running on a 64-bit kernel
- */
-static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
- char __user *optval, int __user *optlen)
-{
- struct sctp_bind_addr *bp;
- struct sctp_association *asoc;
- int cnt = 0;
- struct sctp_getaddrs_old getaddrs;
- struct sctp_sockaddr_entry *addr;
- void __user *to;
- union sctp_addr temp;
- struct sctp_sock *sp = sctp_sk(sk);
- int addrlen;
- int err = 0;
- void *addrs;
- void *buf;
- int bytes_copied = 0;
-
- if (len < sizeof(struct sctp_getaddrs_old))
- return -EINVAL;
-
- len = sizeof(struct sctp_getaddrs_old);
- if (copy_from_user(&getaddrs, optval, len))
- return -EFAULT;
-
- if (getaddrs.addr_num <= 0 ||
- getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr)))
- return -EINVAL;
-
- printk(KERN_WARNING "SCTP: Use of SCTP_GET_LOCAL_ADDRS_OLD "
- "socket option deprecated\n");
-
- /*
- * For UDP-style sockets, id specifies the association to query.
- * If the id field is set to the value '0' then the locally bound
- * addresses are returned without regard to any particular
- * association.
- */
- if (0 == getaddrs.assoc_id) {
- bp = &sctp_sk(sk)->ep->base.bind_addr;
- } else {
- asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
- if (!asoc)
- return -EINVAL;
- bp = &asoc->base.bind_addr;
- }
-
- to = getaddrs.addrs;
-
- /* Allocate space for a local instance of packed array to hold all
- * the data. We store addresses here first and then put write them
- * to the user in one shot.
- */
- addrs = kmalloc(sizeof(union sctp_addr) * getaddrs.addr_num,
- GFP_KERNEL);
- if (!addrs)
- return -ENOMEM;
-
- /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
- * addresses from the global local address list.
- */
- if (sctp_list_single_entry(&bp->address_list)) {
- addr = list_entry(bp->address_list.next,
- struct sctp_sockaddr_entry, list);
- if (sctp_is_any(sk, &addr->a)) {
- cnt = sctp_copy_laddrs_old(sk, bp->port,
- getaddrs.addr_num,
- addrs, &bytes_copied);
- goto copy_getaddrs;
- }
- }
-
- buf = addrs;
- /* Protection on the bound address list is not needed since
- * in the socket option context we hold a socket lock and
- * thus the bound address list can't change.
- */
- list_for_each_entry(addr, &bp->address_list, list) {
- memcpy(&temp, &addr->a, sizeof(temp));
- sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
- addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
- memcpy(buf, &temp, addrlen);
- buf += addrlen;
- bytes_copied += addrlen;
- cnt ++;
- if (cnt >= getaddrs.addr_num) break;
- }
-
-copy_getaddrs:
- /* copy the entire address list into the user provided space */
- if (copy_to_user(to, addrs, bytes_copied)) {
- err = -EFAULT;
- goto error;
- }
-
- /* copy the leading structure back to user */
- getaddrs.addr_num = cnt;
- if (copy_to_user(optval, &getaddrs, len))
- err = -EFAULT;
-
-error:
- kfree(addrs);
- return err;
-}
static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
char __user *optval, int __user *optlen)
@@ -5593,22 +5296,6 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_INITMSG:
retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
break;
- case SCTP_GET_PEER_ADDRS_NUM_OLD:
- retval = sctp_getsockopt_peer_addrs_num_old(sk, len, optval,
- optlen);
- break;
- case SCTP_GET_LOCAL_ADDRS_NUM_OLD:
- retval = sctp_getsockopt_local_addrs_num_old(sk, len, optval,
- optlen);
- break;
- case SCTP_GET_PEER_ADDRS_OLD:
- retval = sctp_getsockopt_peer_addrs_old(sk, len, optval,
- optlen);
- break;
- case SCTP_GET_LOCAL_ADDRS_OLD:
- retval = sctp_getsockopt_local_addrs_old(sk, len, optval,
- optlen);
- break;
case SCTP_GET_PEER_ADDRS:
retval = sctp_getsockopt_peer_addrs(sk, len, optval,
optlen);
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index ab7151da120f..ae03ded2bf1a 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -52,6 +52,7 @@ static int int_max = INT_MAX;
static int sack_timer_min = 1;
static int sack_timer_max = 500;
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
+static int rwnd_scale_max = 16;
extern int sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
@@ -284,6 +285,18 @@ static ctl_table sctp_table[] = {
.extra1 = &zero,
.extra2 = &addr_scope_max,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "rwnd_update_shift",
+ .data = &sctp_rwnd_upd_shift,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &one,
+ .extra2 = &rwnd_scale_max,
+ },
+
{ .ctl_name = 0 }
};
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index c256e4839316..b827d21dbe54 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,7 +74,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
* given destination transport address, set RTO to the protocol
* parameter 'RTO.Initial'.
*/
- peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
+ peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rtt = 0;
peer->rttvar = 0;
peer->srtt = 0;
@@ -83,7 +83,6 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer->fast_recovery = 0;
peer->last_time_heard = jiffies;
- peer->last_time_used = jiffies;
peer->last_time_ecne_reduced = jiffies;
peer->init_sent_count = 0;
@@ -308,7 +307,8 @@ void sctp_transport_route(struct sctp_transport *transport,
/* Initialize sk->sk_rcv_saddr, if the transport is the
* association's active path for getsockname().
*/
- if (asoc && (transport == asoc->peer.active_path))
+ if (asoc && (!asoc->peer.primary_path ||
+ (transport == asoc->peer.active_path)))
opt->pf->af->to_sk_saddr(&transport->saddr,
asoc->base.sk);
} else
@@ -385,7 +385,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
tp->rto = tp->asoc->rto_max;
tp->rtt = rtt;
- tp->last_rto = tp->rto;
/* Reset rto_pending so that a new RTT measurement is started when a
* new data chunk is sent.
@@ -564,10 +563,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
* to be done every RTO interval, we do it every hearbeat
* interval.
*/
- if (time_after(jiffies, transport->last_time_used +
- transport->rto))
- transport->cwnd = max(transport->cwnd/2,
- 4*transport->asoc->pathmtu);
+ transport->cwnd = max(transport->cwnd/2,
+ 4*transport->asoc->pathmtu);
break;
}
@@ -578,6 +575,43 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
transport->cwnd, transport->ssthresh);
}
+/* Apply Max.Burst limit to the congestion window:
+ * sctpimpguide-05 2.14.2
+ * D) When the time comes for the sender to
+ * transmit new DATA chunks, the protocol parameter Max.Burst MUST
+ * first be applied to limit how many new DATA chunks may be sent.
+ * The limit is applied by adjusting cwnd as follows:
+ * if ((flightsize+ Max.Burst * MTU) < cwnd)
+ * cwnd = flightsize + Max.Burst * MTU
+ */
+
+void sctp_transport_burst_limited(struct sctp_transport *t)
+{
+ struct sctp_association *asoc = t->asoc;
+ u32 old_cwnd = t->cwnd;
+ u32 max_burst_bytes;
+
+ if (t->burst_limited)
+ return;
+
+ max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
+ if (max_burst_bytes < old_cwnd) {
+ t->cwnd = max_burst_bytes;
+ t->burst_limited = old_cwnd;
+ }
+}
+
+/* Restore the old cwnd congestion window, after the burst had it's
+ * desired effect.
+ */
+void sctp_transport_burst_reset(struct sctp_transport *t)
+{
+ if (t->burst_limited) {
+ t->cwnd = t->burst_limited;
+ t->burst_limited = 0;
+ }
+}
+
/* What is the next timeout value for this transport? */
unsigned long sctp_transport_timeout(struct sctp_transport *t)
{
@@ -600,8 +634,9 @@ void sctp_transport_reset(struct sctp_transport *t)
* (see Section 6.2.1)
*/
t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
+ t->burst_limited = 0;
t->ssthresh = asoc->peer.i.a_rwnd;
- t->last_rto = t->rto = asoc->rto_initial;
+ t->rto = asoc->rto_initial;
t->rtt = 0;
t->srtt = 0;
t->rttvar = 0;
diff --git a/net/socket.c b/net/socket.c
index 9dff31c9b799..b94c3dd71015 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -97,6 +97,12 @@
#include <net/sock.h>
#include <linux/netfilter.h>
+#include <linux/if_tun.h>
+#include <linux/ipv6_route.h>
+#include <linux/route.h>
+#include <linux/sockios.h>
+#include <linux/atalk.h>
+
static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
@@ -919,6 +925,24 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
EXPORT_SYMBOL(dlci_ioctl_set);
+static long sock_do_ioctl(struct net *net, struct socket *sock,
+ unsigned int cmd, unsigned long arg)
+{
+ int err;
+ void __user *argp = (void __user *)arg;
+
+ err = sock->ops->ioctl(sock, cmd, arg);
+
+ /*
+ * If this ioctl is unknown try to hand it down
+ * to the NIC driver.
+ */
+ if (err == -ENOIOCTLCMD)
+ err = dev_ioctl(net, cmd, argp);
+
+ return err;
+}
+
/*
* With an ioctl, arg may well be a user mode pointer, but we don't know
* what to do with it - that's up to the protocol still.
@@ -992,14 +1016,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
mutex_unlock(&dlci_ioctl_mutex);
break;
default:
- err = sock->ops->ioctl(sock, cmd, arg);
-
- /*
- * If this ioctl is unknown try to hand it down
- * to the NIC driver.
- */
- if (err == -ENOIOCTLCMD)
- err = dev_ioctl(net, cmd, argp);
+ err = sock_do_ioctl(net, sock, cmd, arg);
break;
}
return err;
@@ -1252,7 +1269,7 @@ static int __sock_create(struct net *net, int family, int type, int protocol,
/* Now protected by module ref count */
rcu_read_unlock();
- err = pf->create(net, sock, protocol);
+ err = pf->create(net, sock, protocol, kern);
if (err < 0)
goto out_module_put;
@@ -2127,6 +2144,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
int fput_needed, err, datagrams;
struct socket *sock;
struct mmsghdr __user *entry;
+ struct compat_mmsghdr __user *compat_entry;
struct msghdr msg_sys;
struct timespec end_time;
@@ -2146,19 +2164,30 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
goto out_put;
entry = mmsg;
+ compat_entry = (struct compat_mmsghdr __user *)mmsg;
while (datagrams < vlen) {
/*
* No need to ask LSM for more than the first datagram.
*/
- err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags, datagrams);
- if (err < 0)
- break;
- err = put_user(err, &entry->msg_len);
+ if (MSG_CMSG_COMPAT & flags) {
+ err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+ &msg_sys, flags, datagrams);
+ if (err < 0)
+ break;
+ err = __put_user(err, &compat_entry->msg_len);
+ ++compat_entry;
+ } else {
+ err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
+ &msg_sys, flags, datagrams);
+ if (err < 0)
+ break;
+ err = put_user(err, &entry->msg_len);
+ ++entry;
+ }
+
if (err)
break;
- ++entry;
++datagrams;
if (timeout) {
@@ -2459,6 +2488,552 @@ void socket_seq_show(struct seq_file *seq)
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_COMPAT
+static int do_siocgstamp(struct net *net, struct socket *sock,
+ unsigned int cmd, struct compat_timeval __user *up)
+{
+ mm_segment_t old_fs = get_fs();
+ struct timeval ktv;
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
+ set_fs(old_fs);
+ if (!err) {
+ err = put_user(ktv.tv_sec, &up->tv_sec);
+ err |= __put_user(ktv.tv_usec, &up->tv_usec);
+ }
+ return err;
+}
+
+static int do_siocgstampns(struct net *net, struct socket *sock,
+ unsigned int cmd, struct compat_timespec __user *up)
+{
+ mm_segment_t old_fs = get_fs();
+ struct timespec kts;
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
+ set_fs(old_fs);
+ if (!err) {
+ err = put_user(kts.tv_sec, &up->tv_sec);
+ err |= __put_user(kts.tv_nsec, &up->tv_nsec);
+ }
+ return err;
+}
+
+static int dev_ifname32(struct net *net, struct compat_ifreq __user *uifr32)
+{
+ struct ifreq __user *uifr;
+ int err;
+
+ uifr = compat_alloc_user_space(sizeof(struct ifreq));
+ if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
+ return -EFAULT;
+
+ err = dev_ioctl(net, SIOCGIFNAME, uifr);
+ if (err)
+ return err;
+
+ if (copy_in_user(uifr32, uifr, sizeof(struct compat_ifreq)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
+{
+ struct compat_ifconf ifc32;
+ struct ifconf ifc;
+ struct ifconf __user *uifc;
+ struct compat_ifreq __user *ifr32;
+ struct ifreq __user *ifr;
+ unsigned int i, j;
+ int err;
+
+ if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
+ return -EFAULT;
+
+ if (ifc32.ifcbuf == 0) {
+ ifc32.ifc_len = 0;
+ ifc.ifc_len = 0;
+ ifc.ifc_req = NULL;
+ uifc = compat_alloc_user_space(sizeof(struct ifconf));
+ } else {
+ size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) *
+ sizeof (struct ifreq);
+ uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
+ ifc.ifc_len = len;
+ ifr = ifc.ifc_req = (void __user *)(uifc + 1);
+ ifr32 = compat_ptr(ifc32.ifcbuf);
+ for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) {
+ if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq)))
+ return -EFAULT;
+ ifr++;
+ ifr32++;
+ }
+ }
+ if (copy_to_user(uifc, &ifc, sizeof(struct ifconf)))
+ return -EFAULT;
+
+ err = dev_ioctl(net, SIOCGIFCONF, uifc);
+ if (err)
+ return err;
+
+ if (copy_from_user(&ifc, uifc, sizeof(struct ifconf)))
+ return -EFAULT;
+
+ ifr = ifc.ifc_req;
+ ifr32 = compat_ptr(ifc32.ifcbuf);
+ for (i = 0, j = 0;
+ i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
+ i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) {
+ if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq)))
+ return -EFAULT;
+ ifr32++;
+ ifr++;
+ }
+
+ if (ifc32.ifcbuf == 0) {
+ /* Translate from 64-bit structure multiple to
+ * a 32-bit one.
+ */
+ i = ifc.ifc_len;
+ i = ((i / sizeof(struct ifreq)) * sizeof(struct compat_ifreq));
+ ifc32.ifc_len = i;
+ } else {
+ ifc32.ifc_len = i;
+ }
+ if (copy_to_user(uifc32, &ifc32, sizeof(struct compat_ifconf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
+{
+ struct ifreq __user *ifr;
+ u32 data;
+ void __user *datap;
+
+ ifr = compat_alloc_user_space(sizeof(*ifr));
+
+ if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+ return -EFAULT;
+
+ if (get_user(data, &ifr32->ifr_ifru.ifru_data))
+ return -EFAULT;
+
+ datap = compat_ptr(data);
+ if (put_user(datap, &ifr->ifr_ifru.ifru_data))
+ return -EFAULT;
+
+ return dev_ioctl(net, SIOCETHTOOL, ifr);
+}
+
+static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32)
+{
+ void __user *uptr;
+ compat_uptr_t uptr32;
+ struct ifreq __user *uifr;
+
+ uifr = compat_alloc_user_space(sizeof (*uifr));
+ if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
+ return -EFAULT;
+
+ if (get_user(uptr32, &uifr32->ifr_settings.ifs_ifsu))
+ return -EFAULT;
+
+ uptr = compat_ptr(uptr32);
+
+ if (put_user(uptr, &uifr->ifr_settings.ifs_ifsu.raw_hdlc))
+ return -EFAULT;
+
+ return dev_ioctl(net, SIOCWANDEV, uifr);
+}
+
+static int bond_ioctl(struct net *net, unsigned int cmd,
+ struct compat_ifreq __user *ifr32)
+{
+ struct ifreq kifr;
+ struct ifreq __user *uifr;
+ mm_segment_t old_fs;
+ int err;
+ u32 data;
+ void __user *datap;
+
+ switch (cmd) {
+ case SIOCBONDENSLAVE:
+ case SIOCBONDRELEASE:
+ case SIOCBONDSETHWADDR:
+ case SIOCBONDCHANGEACTIVE:
+ if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq)))
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = dev_ioctl(net, cmd, &kifr);
+ set_fs (old_fs);
+
+ return err;
+ case SIOCBONDSLAVEINFOQUERY:
+ case SIOCBONDINFOQUERY:
+ uifr = compat_alloc_user_space(sizeof(*uifr));
+ if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
+ return -EFAULT;
+
+ if (get_user(data, &ifr32->ifr_ifru.ifru_data))
+ return -EFAULT;
+
+ datap = compat_ptr(data);
+ if (put_user(datap, &uifr->ifr_ifru.ifru_data))
+ return -EFAULT;
+
+ return dev_ioctl(net, cmd, uifr);
+ default:
+ return -EINVAL;
+ };
+}
+
+static int siocdevprivate_ioctl(struct net *net, unsigned int cmd,
+ struct compat_ifreq __user *u_ifreq32)
+{
+ struct ifreq __user *u_ifreq64;
+ char tmp_buf[IFNAMSIZ];
+ void __user *data64;
+ u32 data32;
+
+ if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
+ IFNAMSIZ))
+ return -EFAULT;
+ if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
+ return -EFAULT;
+ data64 = compat_ptr(data32);
+
+ u_ifreq64 = compat_alloc_user_space(sizeof(*u_ifreq64));
+
+ /* Don't check these user accesses, just let that get trapped
+ * in the ioctl handler instead.
+ */
+ if (copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0],
+ IFNAMSIZ))
+ return -EFAULT;
+ if (__put_user(data64, &u_ifreq64->ifr_ifru.ifru_data))
+ return -EFAULT;
+
+ return dev_ioctl(net, cmd, u_ifreq64);
+}
+
+static int dev_ifsioc(struct net *net, struct socket *sock,
+ unsigned int cmd, struct compat_ifreq __user *uifr32)
+{
+ struct ifreq __user *uifr;
+ int err;
+
+ uifr = compat_alloc_user_space(sizeof(*uifr));
+ if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
+ return -EFAULT;
+
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
+
+ if (!err) {
+ switch (cmd) {
+ case SIOCGIFFLAGS:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFINDEX:
+ case SIOCGIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFPFLAGS:
+ case SIOCGIFTXQLEN:
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
+ err = -EFAULT;
+ break;
+ }
+ }
+ return err;
+}
+
+static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
+ struct compat_ifreq __user *uifr32)
+{
+ struct ifreq ifr;
+ struct compat_ifmap __user *uifmap32;
+ mm_segment_t old_fs;
+ int err;
+
+ uifmap32 = &uifr32->ifr_ifru.ifru_map;
+ err = copy_from_user(&ifr, uifr32, sizeof(ifr.ifr_name));
+ err |= __get_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
+ err |= __get_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
+ err |= __get_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
+ err |= __get_user(ifr.ifr_map.irq, &uifmap32->irq);
+ err |= __get_user(ifr.ifr_map.dma, &uifmap32->dma);
+ err |= __get_user(ifr.ifr_map.port, &uifmap32->port);
+ if (err)
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = dev_ioctl(net, cmd, (void __user *)&ifr);
+ set_fs (old_fs);
+
+ if (cmd == SIOCGIFMAP && !err) {
+ err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
+ err |= __put_user(ifr.ifr_map.mem_start, &uifmap32->mem_start);
+ err |= __put_user(ifr.ifr_map.mem_end, &uifmap32->mem_end);
+ err |= __put_user(ifr.ifr_map.base_addr, &uifmap32->base_addr);
+ err |= __put_user(ifr.ifr_map.irq, &uifmap32->irq);
+ err |= __put_user(ifr.ifr_map.dma, &uifmap32->dma);
+ err |= __put_user(ifr.ifr_map.port, &uifmap32->port);
+ if (err)
+ err = -EFAULT;
+ }
+ return err;
+}
+
+static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uifr32)
+{
+ void __user *uptr;
+ compat_uptr_t uptr32;
+ struct ifreq __user *uifr;
+
+ uifr = compat_alloc_user_space(sizeof (*uifr));
+ if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
+ return -EFAULT;
+
+ if (get_user(uptr32, &uifr32->ifr_data))
+ return -EFAULT;
+
+ uptr = compat_ptr(uptr32);
+
+ if (put_user(uptr, &uifr->ifr_data))
+ return -EFAULT;
+
+ return dev_ioctl(net, SIOCSHWTSTAMP, uifr);
+}
+
+struct rtentry32 {
+ u32 rt_pad1;
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ unsigned short rt_flags;
+ short rt_pad2;
+ u32 rt_pad3;
+ unsigned char rt_tos;
+ unsigned char rt_class;
+ short rt_pad4;
+ short rt_metric; /* +1 for binary compatibility! */
+ /* char * */ u32 rt_dev; /* forcing the device at add */
+ u32 rt_mtu; /* per route MTU/Window */
+ u32 rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+};
+
+struct in6_rtmsg32 {
+ struct in6_addr rtmsg_dst;
+ struct in6_addr rtmsg_src;
+ struct in6_addr rtmsg_gateway;
+ u32 rtmsg_type;
+ u16 rtmsg_dst_len;
+ u16 rtmsg_src_len;
+ u32 rtmsg_metric;
+ u32 rtmsg_info;
+ u32 rtmsg_flags;
+ s32 rtmsg_ifindex;
+};
+
+static int routing_ioctl(struct net *net, struct socket *sock,
+ unsigned int cmd, void __user *argp)
+{
+ int ret;
+ void *r = NULL;
+ struct in6_rtmsg r6;
+ struct rtentry r4;
+ char devname[16];
+ u32 rtdev;
+ mm_segment_t old_fs = get_fs();
+
+ if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */
+ struct in6_rtmsg32 __user *ur6 = argp;
+ ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst),
+ 3 * sizeof(struct in6_addr));
+ ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type));
+ ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
+ ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
+ ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric));
+ ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info));
+ ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags));
+ ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
+
+ r = (void *) &r6;
+ } else { /* ipv4 */
+ struct rtentry32 __user *ur4 = argp;
+ ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst),
+ 3 * sizeof(struct sockaddr));
+ ret |= __get_user (r4.rt_flags, &(ur4->rt_flags));
+ ret |= __get_user (r4.rt_metric, &(ur4->rt_metric));
+ ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu));
+ ret |= __get_user (r4.rt_window, &(ur4->rt_window));
+ ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt));
+ ret |= __get_user (rtdev, &(ur4->rt_dev));
+ if (rtdev) {
+ ret |= copy_from_user (devname, compat_ptr(rtdev), 15);
+ r4.rt_dev = devname; devname[15] = 0;
+ } else
+ r4.rt_dev = NULL;
+
+ r = (void *) &r4;
+ }
+
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ set_fs (KERNEL_DS);
+ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
+ set_fs (old_fs);
+
+out:
+ return ret;
+}
+
+/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
+ * for some operations; this forces use of the newer bridge-utils that
+ * use compatiable ioctls
+ */
+static int old_bridge_ioctl(compat_ulong_t __user *argp)
+{
+ compat_ulong_t tmp;
+
+ if (get_user(tmp, argp))
+ return -EFAULT;
+ if (tmp == BRCTL_GET_VERSION)
+ return BRCTL_VERSION + 1;
+ return -EINVAL;
+}
+
+static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
+ unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = compat_ptr(arg);
+ struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
+
+ if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))
+ return siocdevprivate_ioctl(net, cmd, argp);
+
+ switch (cmd) {
+ case SIOCSIFBR:
+ case SIOCGIFBR:
+ return old_bridge_ioctl(argp);
+ case SIOCGIFNAME:
+ return dev_ifname32(net, argp);
+ case SIOCGIFCONF:
+ return dev_ifconf(net, argp);
+ case SIOCETHTOOL:
+ return ethtool_ioctl(net, argp);
+ case SIOCWANDEV:
+ return compat_siocwandev(net, argp);
+ case SIOCGIFMAP:
+ case SIOCSIFMAP:
+ return compat_sioc_ifmap(net, cmd, argp);
+ case SIOCBONDENSLAVE:
+ case SIOCBONDRELEASE:
+ case SIOCBONDSETHWADDR:
+ case SIOCBONDSLAVEINFOQUERY:
+ case SIOCBONDINFOQUERY:
+ case SIOCBONDCHANGEACTIVE:
+ return bond_ioctl(net, cmd, argp);
+ case SIOCADDRT:
+ case SIOCDELRT:
+ return routing_ioctl(net, sock, cmd, argp);
+ case SIOCGSTAMP:
+ return do_siocgstamp(net, sock, cmd, argp);
+ case SIOCGSTAMPNS:
+ return do_siocgstampns(net, sock, cmd, argp);
+ case SIOCSHWTSTAMP:
+ return compat_siocshwtstamp(net, argp);
+
+ case FIOSETOWN:
+ case SIOCSPGRP:
+ case FIOGETOWN:
+ case SIOCGPGRP:
+ case SIOCBRADDBR:
+ case SIOCBRDELBR:
+ case SIOCGIFVLAN:
+ case SIOCSIFVLAN:
+ case SIOCADDDLCI:
+ case SIOCDELDLCI:
+ return sock_ioctl(file, cmd, arg);
+
+ case SIOCGIFFLAGS:
+ case SIOCSIFFLAGS:
+ case SIOCGIFMETRIC:
+ case SIOCSIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCSIFMTU:
+ case SIOCGIFMEM:
+ case SIOCSIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCSIFHWADDR:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ case SIOCGIFINDEX:
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCSIFHWBROADCAST:
+ case SIOCDIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCSIFPFLAGS:
+ case SIOCGIFPFLAGS:
+ case SIOCGIFTXQLEN:
+ case SIOCSIFTXQLEN:
+ case SIOCBRADDIF:
+ case SIOCBRDELIF:
+ case SIOCSIFNAME:
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return dev_ifsioc(net, sock, cmd, argp);
+
+ case SIOCSARP:
+ case SIOCGARP:
+ case SIOCDARP:
+ case SIOCATMARK:
+ return sock_do_ioctl(net, sock, cmd, arg);
+ }
+
+ /* Prevent warning from compat_sys_ioctl, these always
+ * result in -EINVAL in the native case anyway. */
+ switch (cmd) {
+ case SIOCRTMSG:
+ case SIOCGIFCOUNT:
+ case SIOCSRARP:
+ case SIOCGRARP:
+ case SIOCDRARP:
+ case SIOCSIFLINK:
+ case SIOCGIFSLAVE:
+ case SIOCSIFSLAVE:
+ return -EINVAL;
+ }
+
+ return -ENOIOCTLCMD;
+}
+
static long compat_sock_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
@@ -2477,6 +3052,9 @@ static long compat_sock_ioctl(struct file *file, unsigned cmd,
(cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST))
ret = compat_wext_handle_ioctl(net, cmd, arg);
+ if (ret == -ENOIOCTLCMD)
+ ret = compat_sock_ioctl_trans(file, sock, cmd, arg);
+
return ret;
}
#endif
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 22e8fd89477f..c7450c8f0a7c 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -306,24 +306,25 @@ EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
* @sap: buffer into which to plant socket address
* @salen: size of buffer
*
+ * @uaddr does not have to be '\0'-terminated, but strict_strtoul() and
+ * rpc_pton() require proper string termination to be successful.
+ *
* Returns the size of the socket address if successful; otherwise
* zero is returned.
*/
size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
struct sockaddr *sap, const size_t salen)
{
- char *c, buf[RPCBIND_MAXUADDRLEN];
+ char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')];
unsigned long portlo, porthi;
unsigned short port;
- if (uaddr_len > sizeof(buf))
+ if (uaddr_len > RPCBIND_MAXUADDRLEN)
return 0;
memcpy(buf, uaddr, uaddr_len);
- buf[uaddr_len] = '\n';
- buf[uaddr_len + 1] = '\0';
-
+ buf[uaddr_len] = '\0';
c = strrchr(buf, '.');
if (unlikely(c == NULL))
return 0;
@@ -332,9 +333,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
if (unlikely(portlo > 255))
return 0;
- c[0] = '\n';
- c[1] = '\0';
-
+ *c = '\0';
c = strrchr(buf, '.');
if (unlikely(c == NULL))
return 0;
@@ -345,8 +344,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
port = (unsigned short)((porthi << 8) | portlo);
- c[0] = '\0';
-
+ *c = '\0';
if (rpc_pton(buf, strlen(buf), sap, salen) == 0)
return 0;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 54a4e042f104..7535a7bed2fa 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -332,9 +332,9 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
list_add_tail(&new->cr_lru, &free);
spin_unlock(&cache->lock);
found:
- if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)
- && cred->cr_ops->cr_init != NULL
- && !(flags & RPCAUTH_LOOKUP_NEW)) {
+ if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
+ cred->cr_ops->cr_init != NULL &&
+ !(flags & RPCAUTH_LOOKUP_NEW)) {
int res = cred->cr_ops->cr_init(auth, cred);
if (res < 0) {
put_rpccred(cred);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index f160be6c1a46..17562b4c35f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -75,8 +75,8 @@ krb5_get_seq_num(struct crypto_blkcipher *key,
if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
return code;
- if ((plain[4] != plain[5]) || (plain[4] != plain[6])
- || (plain[4] != plain[7]))
+ if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
+ (plain[4] != plain[7]))
return (s32)KG_BAD_SEQ;
*direction = plain[4];
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index f6c51e562a02..e34bc531fcb9 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -105,8 +105,8 @@ static int rsi_match(struct cache_head *a, struct cache_head *b)
{
struct rsi *item = container_of(a, struct rsi, h);
struct rsi *tmp = container_of(b, struct rsi, h);
- return netobj_equal(&item->in_handle, &tmp->in_handle)
- && netobj_equal(&item->in_token, &tmp->in_token);
+ return netobj_equal(&item->in_handle, &tmp->in_handle) &&
+ netobj_equal(&item->in_token, &tmp->in_token);
}
static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index d6eee291a0e2..39bddba53ba1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -401,9 +401,8 @@ static int cache_clean(void)
for (; ch; cp= & ch->next, ch= *cp) {
if (current_detail->nextcheck > ch->expiry_time)
current_detail->nextcheck = ch->expiry_time+1;
- if (ch->expiry_time >= get_seconds()
- && ch->last_refresh >= current_detail->flush_time
- )
+ if (ch->expiry_time >= get_seconds() &&
+ ch->last_refresh >= current_detail->flush_time)
continue;
if (test_and_clear_bit(CACHE_PENDING, &ch->flags))
cache_dequeue(current_detail, ch);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 952f206ff307..538ca433a56c 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1103,8 +1103,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
procp->pc_release(rqstp, NULL, rqstp->rq_resp);
goto dropit;
}
- if (*statp == rpc_success && (xdr = procp->pc_encode)
- && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
+ if (*statp == rpc_success &&
+ (xdr = procp->pc_encode) &&
+ !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
dprintk("svc: failed to encode reply\n");
/* serv->sv_stats->rpcsystemerr++; */
*statp = rpc_system_err;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index df124f78ee48..b845e2293dfe 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -129,8 +129,8 @@ static void svc_xprt_free(struct kref *kref)
struct svc_xprt *xprt =
container_of(kref, struct svc_xprt, xpt_ref);
struct module *owner = xprt->xpt_class->xcl_owner;
- if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)
- && xprt->xpt_auth_cache != NULL)
+ if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) &&
+ xprt->xpt_auth_cache != NULL)
svcauth_unix_info_release(xprt->xpt_auth_cache);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
@@ -846,8 +846,8 @@ static void svc_age_temp_xprts(unsigned long closure)
* through, close it. */
if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags))
continue;
- if (atomic_read(&xprt->xpt_ref.refcount) > 1
- || test_bit(XPT_BUSY, &xprt->xpt_flags))
+ if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
+ test_bit(XPT_BUSY, &xprt->xpt_flags))
continue;
svc_xprt_get(xprt);
list_move(le, &to_be_aged);
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index e64109b02aee..4e9393c24687 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -46,8 +46,8 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
dprintk("svc: svc_authenticate (%d)\n", flavor);
spin_lock(&authtab_lock);
- if (flavor >= RPC_AUTH_MAXFLAVOR || !(aops = authtab[flavor])
- || !try_module_get(aops->owner)) {
+ if (flavor >= RPC_AUTH_MAXFLAVOR || !(aops = authtab[flavor]) ||
+ !try_module_get(aops->owner)) {
spin_unlock(&authtab_lock);
*authp = rpc_autherr_badcred;
return SVC_DENIED;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index f4c7ff3a53e6..4a8f6558718a 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -125,8 +125,8 @@ static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
{
struct ip_map *orig = container_of(corig, struct ip_map, h);
struct ip_map *new = container_of(cnew, struct ip_map, h);
- return strcmp(orig->m_class, new->m_class) == 0
- && ipv6_addr_equal(&orig->m_addr, &new->m_addr);
+ return strcmp(orig->m_class, new->m_class) == 0 &&
+ ipv6_addr_equal(&orig->m_addr, &new->m_addr);
}
static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
{
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index c2a17876defd..870929e08e5d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -111,7 +111,7 @@ static void svc_release_skb(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL;
dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
- skb_free_datagram(svsk->sk_sk, skb);
+ skb_free_datagram_locked(svsk->sk_sk, skb);
}
}
@@ -578,7 +578,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
"svc: received unknown control message %d/%d; "
"dropping RPC reply datagram\n",
cmh->cmsg_level, cmh->cmsg_type);
- skb_free_datagram(svsk->sk_sk, skb);
+ skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
@@ -588,18 +588,18 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
local_bh_enable();
/* checksum error */
- skb_free_datagram(svsk->sk_sk, skb);
+ skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
local_bh_enable();
- skb_free_datagram(svsk->sk_sk, skb);
+ skb_free_datagram_locked(svsk->sk_sk, skb);
} else {
/* we can use it in-place */
rqstp->rq_arg.head[0].iov_base = skb->data +
sizeof(struct udphdr);
rqstp->rq_arg.head[0].iov_len = len;
if (skb_checksum_complete(skb)) {
- skb_free_datagram(svsk->sk_sk, skb);
+ skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
rqstp->rq_xprt_ctxt = skb;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 9e884383134f..f92e37eb413c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -337,10 +337,9 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
{
- if ((RDMA_TRANSPORT_IWARP ==
- rdma_node_get_transport(xprt->sc_cm_id->
- device->node_type))
- && sge_count > 1)
+ if ((rdma_node_get_transport(xprt->sc_cm_id->device->node_type) ==
+ RDMA_TRANSPORT_IWARP) &&
+ sge_count > 1)
return 1;
else
return min_t(int, sge_count, xprt->sc_max_sge);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 0cf5e8c27a10..3fa5751af0ec 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -42,6 +42,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/debug.h>
#include <linux/sunrpc/rpc_rdma.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 465aafc2007f..2209aa87d899 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -878,8 +878,8 @@ if (strnicmp(ia->ri_id->device->dma_device->bus->name, "pci", 3) == 0) {
* others indicate a transport condition which has already
* undergone a best-effort.
*/
- if (ep->rep_connected == -ECONNREFUSED
- && ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
+ if (ep->rep_connected == -ECONNREFUSED &&
+ ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
goto retry;
}
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 689fdefe9d04..a7eac00cd363 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -437,11 +437,11 @@ void tipc_cltr_recv_routing_table(struct sk_buff *buf)
break;
case ROUTE_ADDITION:
if (!is_slave(tipc_own_addr)) {
- assert(!in_own_cluster(c_ptr->addr)
- || is_slave(rem_node));
+ assert(!in_own_cluster(c_ptr->addr) ||
+ is_slave(rem_node));
} else {
- assert(in_own_cluster(c_ptr->addr)
- && !is_slave(rem_node));
+ assert(in_own_cluster(c_ptr->addr) &&
+ !is_slave(rem_node));
}
n_ptr = c_ptr->nodes[tipc_node(rem_node)];
if (!n_ptr)
@@ -451,11 +451,11 @@ void tipc_cltr_recv_routing_table(struct sk_buff *buf)
break;
case ROUTE_REMOVAL:
if (!is_slave(tipc_own_addr)) {
- assert(!in_own_cluster(c_ptr->addr)
- || is_slave(rem_node));
+ assert(!in_own_cluster(c_ptr->addr) ||
+ is_slave(rem_node));
} else {
- assert(in_own_cluster(c_ptr->addr)
- && !is_slave(rem_node));
+ assert(in_own_cluster(c_ptr->addr) &&
+ !is_slave(rem_node));
}
n_ptr = c_ptr->nodes[tipc_node(rem_node)];
if (n_ptr)
diff --git a/net/tipc/link.c b/net/tipc/link.c
index dd4c18b9a35b..6f50f6423f63 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -378,8 +378,8 @@ static void link_timeout(struct link *l_ptr)
struct tipc_msg *msg = buf_msg(l_ptr->first_out);
u32 length = msg_size(msg);
- if ((msg_user(msg) == MSG_FRAGMENTER)
- && (msg_type(msg) == FIRST_FRAGMENT)) {
+ if ((msg_user(msg) == MSG_FRAGMENTER) &&
+ (msg_type(msg) == FIRST_FRAGMENT)) {
length = msg_size(msg_get_wrapped(msg));
}
if (length) {
@@ -2788,8 +2788,8 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
/* Is there an incomplete message waiting for this fragment? */
- while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
- || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
+ while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
+ (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
prev = pbuf;
pbuf = pbuf->next;
}
@@ -3325,8 +3325,8 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
(l_ptr->last_out)), l_ptr->out_queue_size);
if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
msg_seqno(buf_msg(l_ptr->first_out)))
- != (l_ptr->out_queue_size - 1))
- || (l_ptr->last_out->next != NULL)) {
+ != (l_ptr->out_queue_size - 1)) ||
+ (l_ptr->last_out->next != NULL)) {
tipc_printf(buf, "\nSend queue inconsistency\n");
tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e6d9abf7440e..1ea64f09cc45 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -177,6 +177,7 @@ static void reject_rx_queue(struct sock *sk)
* @net: network namespace (must be default network)
* @sock: pre-allocated socket structure
* @protocol: protocol indicator (must be 0)
+ * @kern: caused by kernel or by userspace?
*
* This routine creates additional data structures used by the TIPC socket,
* initializes them, and links them together.
@@ -184,7 +185,8 @@ static void reject_rx_queue(struct sock *sk)
* Returns 0 on success, errno otherwise
*/
-static int tipc_create(struct net *net, struct socket *sock, int protocol)
+static int tipc_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
const struct proto_ops *ops;
socket_state state;
@@ -193,7 +195,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol)
/* Validate arguments */
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (unlikely(protocol != 0))
@@ -1134,13 +1136,11 @@ restart:
/* Loop around if more data is required */
- if ((sz_copied < buf_len) /* didn't get all requested data */
- && (!skb_queue_empty(&sk->sk_receive_queue) ||
- (flags & MSG_WAITALL))
- /* ... and more is ready or required */
- && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
- && (!err) /* ... and haven't reached a FIN */
- )
+ if ((sz_copied < buf_len) && /* didn't get all requested data */
+ (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (flags & MSG_WAITALL)) && /* and more is ready or required */
+ (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
+ (!err)) /* and haven't reached a FIN */
goto restart;
exit:
@@ -1528,7 +1528,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
buf = skb_peek(&sk->sk_receive_queue);
- res = tipc_create(sock_net(sock->sk), new_sock, 0);
+ res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
if (!res) {
struct sock *new_sk = new_sock->sk;
struct tipc_sock *new_tsock = tipc_sk(new_sk);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0747d8a9232f..ac91f0dfa144 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -364,9 +364,9 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
sub->seq.upper = htohl(s->seq.upper, swap);
sub->timeout = htohl(s->timeout, swap);
sub->filter = htohl(s->filter, swap);
- if ((!(sub->filter & TIPC_SUB_PORTS)
- == !(sub->filter & TIPC_SUB_SERVICE))
- || (sub->seq.lower > sub->seq.upper)) {
+ if ((!(sub->filter & TIPC_SUB_PORTS) ==
+ !(sub->filter & TIPC_SUB_SERVICE)) ||
+ (sub->seq.lower > sub->seq.upper)) {
warn("Subscription rejected, illegal request\n");
kfree(sub);
subscr_terminate(subscriber);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3291902f0b88..f25511903115 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -621,7 +621,8 @@ out:
return sk;
}
-static int unix_create(struct net *net, struct socket *sock, int protocol)
+static int unix_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
if (protocol && protocol != PF_UNIX)
return -EPROTONOSUPPORT;
@@ -1032,8 +1033,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
goto out;
addr_len = err;
- if (test_bit(SOCK_PASSCRED, &sock->flags)
- && !u->addr && (err = unix_autobind(sock)) != 0)
+ if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
+ (err = unix_autobind(sock)) != 0)
goto out;
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
@@ -1258,7 +1259,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_
{
struct sock *sk = sock->sk;
struct unix_sock *u;
- struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
+ DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
int err = 0;
if (peer) {
@@ -1377,8 +1378,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto out;
}
- if (test_bit(SOCK_PASSCRED, &sock->flags)
- && !u->addr && (err = unix_autobind(sock)) != 0)
+ if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
+ && (err = unix_autobind(sock)) != 0)
goto out;
err = -EMSGSIZE;
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d631a17186bc..d3bfb6ef13ae 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -388,6 +388,8 @@ int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
}
mutex_lock(&wimax_dev->mutex);
result = wimax_dev_is_ready(wimax_dev);
+ if (result == -ENOMEDIUM)
+ result = 0;
if (result < 0)
goto error_not_ready;
result = -ENOSYS;
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 70ef4df863b9..ae752a64d920 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -107,8 +107,8 @@ void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
if (state != wimax_dev->rf_hw) {
wimax_dev->rf_hw = state;
- if (wimax_dev->rf_hw == WIMAX_RF_ON
- && wimax_dev->rf_sw == WIMAX_RF_ON)
+ if (wimax_dev->rf_hw == WIMAX_RF_ON &&
+ wimax_dev->rf_sw == WIMAX_RF_ON)
wimax_state = WIMAX_ST_READY;
else
wimax_state = WIMAX_ST_RADIO_OFF;
@@ -163,8 +163,8 @@ void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
if (state != wimax_dev->rf_sw) {
wimax_dev->rf_sw = state;
- if (wimax_dev->rf_hw == WIMAX_RF_ON
- && wimax_dev->rf_sw == WIMAX_RF_ON)
+ if (wimax_dev->rf_hw == WIMAX_RF_ON &&
+ wimax_dev->rf_sw == WIMAX_RF_ON)
wimax_state = WIMAX_ST_READY;
else
wimax_state = WIMAX_ST_RADIO_OFF;
@@ -305,8 +305,15 @@ int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
mutex_lock(&wimax_dev->mutex);
result = wimax_dev_is_ready(wimax_dev);
- if (result < 0)
+ if (result < 0) {
+ /* While initializing, < 1.4.3 wimax-tools versions use
+ * this call to check if the device is a valid WiMAX
+ * device; so we allow it to proceed always,
+ * considering the radios are all off. */
+ if (result == -ENOMEDIUM && state == WIMAX_RF_QUERY)
+ result = WIMAX_RF_OFF << 1 | WIMAX_RF_OFF;
goto error_not_ready;
+ }
switch (state) {
case WIMAX_RF_ON:
case WIMAX_RF_OFF:
@@ -355,6 +362,7 @@ int wimax_rfkill_add(struct wimax_dev *wimax_dev)
wimax_dev->rfkill = rfkill;
+ rfkill_init_sw_state(rfkill, 1);
result = rfkill_register(wimax_dev->rfkill);
if (result < 0)
goto error_rfkill_register;
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 79fb7d7c640f..c8866412f830 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -60,6 +60,14 @@
#define D_SUBMODULE stack
#include "debug-levels.h"
+static char wimax_debug_params[128];
+module_param_string(debug, wimax_debug_params, sizeof(wimax_debug_params),
+ 0644);
+MODULE_PARM_DESC(debug,
+ "String of space-separated NAME:VALUE pairs, where NAMEs "
+ "are the different debug submodules and VALUE are the "
+ "initial debug value to set.");
+
/*
* Authoritative source for the RE_STATE_CHANGE attribute policy
*
@@ -562,6 +570,9 @@ int __init wimax_subsys_init(void)
int result, cnt;
d_fnstart(4, NULL, "()\n");
+ d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params,
+ "wimax.debug");
+
snprintf(wimax_gnl_family.name, sizeof(wimax_gnl_family.name),
"WiMAX");
result = genl_register_family(&wimax_gnl_family);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 614bdcec1c80..90e93a5701aa 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -74,12 +74,6 @@ config CFG80211_REG_DEBUG
If unsure, say N.
-config CFG80211_DEFAULT_PS_VALUE
- int
- default 1 if CFG80211_DEFAULT_PS
- default 0
- depends on CFG80211
-
config CFG80211_DEFAULT_PS
bool "enable powersave by default"
depends on CFG80211
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 07252967be9c..fe6f402a22af 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -14,6 +14,7 @@
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
+#include <linux/sched.h>
#include <net/genetlink.h>
#include <net/cfg80211.h>
#include "nl80211.h"
@@ -230,7 +231,7 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev;
int err = 0;
- if (!rdev->wiphy.netnsok)
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
return -EOPNOTSUPP;
list_for_each_entry(wdev, &rdev->netdev_list, list) {
@@ -367,7 +368,9 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
rdev->wiphy.dev.class = &ieee80211_class;
rdev->wiphy.dev.platform_data = rdev;
- rdev->wiphy.ps_default = CONFIG_CFG80211_DEFAULT_PS_VALUE;
+#ifdef CONFIG_CFG80211_DEFAULT_PS
+ rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+#endif
wiphy_net_set(&rdev->wiphy, &init_net);
@@ -482,7 +485,7 @@ int wiphy_register(struct wiphy *wiphy)
if (IS_ERR(rdev->wiphy.debugfsdir))
rdev->wiphy.debugfsdir = NULL;
- if (wiphy->custom_regulatory) {
+ if (wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
struct regulatory_request request;
request.wiphy_idx = get_wiphy_idx(wiphy);
@@ -546,7 +549,7 @@ void wiphy_unregister(struct wiphy *wiphy)
* First remove the hardware from everywhere, this makes
* it impossible to find from userspace.
*/
- cfg80211_debugfs_rdev_del(rdev);
+ debugfs_remove_recursive(rdev->wiphy.debugfsdir);
list_del(&rdev->list);
/*
@@ -569,7 +572,6 @@ void wiphy_unregister(struct wiphy *wiphy)
cfg80211_rdev_list_generation++;
device_del(&rdev->wiphy.dev);
- debugfs_remove(rdev->wiphy.debugfsdir);
mutex_unlock(&cfg80211_mutex);
@@ -681,7 +683,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
wdev->wext.default_key = -1;
wdev->wext.default_mgmt_key = -1;
wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
- wdev->wext.ps = wdev->wiphy->ps_default;
+ if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
+ wdev->wext.ps = true;
+ else
+ wdev->wext.ps = false;
wdev->wext.ps_timeout = 100;
if (rdev->ops->set_power_mgmt)
if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
@@ -693,6 +698,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
#endif
if (!dev->ethtool_ops)
dev->ethtool_ops = &cfg80211_ethtool_ops;
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION ||
+ wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
+ dev->priv_flags |= IFF_DONT_BRIDGE;
break;
case NETDEV_GOING_DOWN:
switch (wdev->iftype) {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 2a33d8bc886b..a9db9e6255bb 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -72,17 +72,6 @@ struct cfg80211_registered_device {
/* current channel */
struct ieee80211_channel *channel;
-#ifdef CONFIG_CFG80211_DEBUGFS
- /* Debugfs entries */
- struct wiphy_debugfsdentries {
- struct dentry *rts_threshold;
- struct dentry *fragmentation_threshold;
- struct dentry *short_retry_limit;
- struct dentry *long_retry_limit;
- struct dentry *ht40allow_map;
- } debugfs;
-#endif
-
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -284,6 +273,8 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
struct cfg80211_ibss_params *params,
struct cfg80211_cached_keys *connkeys);
void cfg80211_clear_ibss(struct net_device *dev, bool nowext);
+int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, bool nowext);
int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext);
void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
@@ -358,6 +349,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
void cfg80211_conn_work(struct work_struct *work);
+void cfg80211_sme_failed_assoc(struct wireless_dev *wdev);
bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
/* internal helpers */
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 13d93d84f902..2e4895615037 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -104,11 +104,7 @@ static const struct file_operations ht40allow_map_ops = {
};
#define DEBUGFS_ADD(name) \
- rdev->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \
- &rdev->wiphy, &name## _ops);
-#define DEBUGFS_DEL(name) \
- debugfs_remove(rdev->debugfs.name); \
- rdev->debugfs.name = NULL;
+ debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops);
void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev)
{
@@ -120,12 +116,3 @@ void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev)
DEBUGFS_ADD(long_retry_limit);
DEBUGFS_ADD(ht40allow_map);
}
-
-void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev)
-{
- DEBUGFS_DEL(rts_threshold);
- DEBUGFS_DEL(fragmentation_threshold);
- DEBUGFS_DEL(short_retry_limit);
- DEBUGFS_DEL(long_retry_limit);
- DEBUGFS_DEL(ht40allow_map);
-}
diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h
index 6419b6d6ce3e..74fdd3811427 100644
--- a/net/wireless/debugfs.h
+++ b/net/wireless/debugfs.h
@@ -3,12 +3,9 @@
#ifdef CONFIG_CFG80211_DEBUGFS
void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev);
-void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev);
#else
static inline
void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {}
-static inline
-void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev) {}
#endif
#endif /* __CFG80211_DEBUGFS_H */
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 39b6d92e2828..34dfc93fa713 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -169,8 +169,8 @@ void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
wdev_unlock(wdev);
}
-static int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
- struct net_device *dev, bool nowext)
+int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, bool nowext)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 1f87b4e7f4f7..1001db4912f7 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -94,6 +94,13 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
}
WARN_ON(!bss);
+ } else if (wdev->conn) {
+ cfg80211_sme_failed_assoc(wdev);
+ /*
+ * do not call connect_result() now because the
+ * sme will schedule work that does it later.
+ */
+ goto out;
}
if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) {
@@ -236,21 +243,12 @@ void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
}
EXPORT_SYMBOL(cfg80211_send_disassoc);
-void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
+static void __cfg80211_auth_remove(struct wireless_dev *wdev, const u8 *addr)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct wiphy *wiphy = wdev->wiphy;
- struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
int i;
bool done = false;
- wdev_lock(wdev);
-
- nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
- if (wdev->sme_state == CFG80211_SME_CONNECTING)
- __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- false, NULL);
+ ASSERT_WDEV_LOCK(wdev);
for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
if (wdev->authtry_bsses[i] &&
@@ -265,6 +263,29 @@ void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
}
WARN_ON(!done);
+}
+
+void __cfg80211_auth_canceled(struct net_device *dev, const u8 *addr)
+{
+ __cfg80211_auth_remove(dev->ieee80211_ptr, addr);
+}
+EXPORT_SYMBOL(__cfg80211_auth_canceled);
+
+void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ wdev_lock(wdev);
+
+ nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
+ if (wdev->sme_state == CFG80211_SME_CONNECTING)
+ __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ false, NULL);
+
+ __cfg80211_auth_remove(wdev, addr);
wdev_unlock(wdev);
}
@@ -439,12 +460,23 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct cfg80211_assoc_request req;
struct cfg80211_internal_bss *bss;
int i, err, slot = -1;
+ bool was_connected = false;
ASSERT_WDEV_LOCK(wdev);
memset(&req, 0, sizeof(req));
- if (wdev->current_bss)
+ if (wdev->current_bss && prev_bssid &&
+ memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) {
+ /*
+ * Trying to reassociate: Allow this to proceed and let the old
+ * association to be dropped when the new one is completed.
+ */
+ if (wdev->sme_state == CFG80211_SME_CONNECTED) {
+ was_connected = true;
+ wdev->sme_state = CFG80211_SME_CONNECTING;
+ }
+ } else if (wdev->current_bss)
return -EALREADY;
req.ie = ie;
@@ -454,8 +486,11 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
req.prev_bssid = prev_bssid;
req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
- if (!req.bss)
+ if (!req.bss) {
+ if (was_connected)
+ wdev->sme_state = CFG80211_SME_CONNECTED;
return -ENOENT;
+ }
bss = bss_from_pub(req.bss);
@@ -473,6 +508,8 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
out:
+ if (err && was_connected)
+ wdev->sme_state = CFG80211_SME_CONNECTED;
/* still a reference in wdev->auth_bsses[slot] */
cfg80211_put_bss(req.bss);
return err;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f48394126bf9..149539ade15e 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -138,6 +138,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
+ [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
};
/* policy for the attributes */
@@ -151,6 +152,26 @@ nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = {
[NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
};
+/* ifidx get helper */
+static int nl80211_get_ifidx(struct netlink_callback *cb)
+{
+ int res;
+
+ res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
+ nl80211_fam.attrbuf, nl80211_fam.maxattr,
+ nl80211_policy);
+ if (res)
+ return res;
+
+ if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
+ return -EINVAL;
+
+ res = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
+ if (!res)
+ return -EINVAL;
+ return res;
+}
+
/* IE validation */
static bool is_valid_ie_attr(const struct nlattr *attr)
{
@@ -540,7 +561,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(deauth, DEAUTHENTICATE);
CMD(disassoc, DISASSOCIATE);
CMD(join_ibss, JOIN_IBSS);
- if (dev->wiphy.netnsok) {
+ if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
i++;
NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
}
@@ -947,6 +968,32 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
return 0;
}
+static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u8 use_4addr,
+ enum nl80211_iftype iftype)
+{
+ if (!use_4addr) {
+ if (netdev && netdev->br_port)
+ return -EBUSY;
+ return 0;
+ }
+
+ switch (iftype) {
+ case NL80211_IFTYPE_AP_VLAN:
+ if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP)
+ return 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_STATION)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
@@ -987,6 +1034,16 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
change = true;
}
+ if (info->attrs[NL80211_ATTR_4ADDR]) {
+ params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]);
+ change = true;
+ err = nl80211_valid_4addr(rdev, dev, params.use_4addr, ntype);
+ if (err)
+ goto unlock;
+ } else {
+ params.use_4addr = -1;
+ }
+
if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
if (ntype != NL80211_IFTYPE_MONITOR) {
err = -EINVAL;
@@ -1006,6 +1063,9 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
else
err = 0;
+ if (!err && params.use_4addr != -1)
+ dev->ieee80211_ptr->use_4addr = params.use_4addr;
+
unlock:
dev_put(dev);
cfg80211_unlock_rdev(rdev);
@@ -1053,6 +1113,13 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
}
+ if (info->attrs[NL80211_ATTR_4ADDR]) {
+ params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]);
+ err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type);
+ if (err)
+ goto unlock;
+ }
+
err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
&flags);
@@ -1682,20 +1749,10 @@ static int nl80211_dump_station(struct sk_buff *skb,
int sta_idx = cb->args[1];
int err;
- if (!ifidx) {
- err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
- nl80211_fam.attrbuf, nl80211_fam.maxattr,
- nl80211_policy);
- if (err)
- return err;
-
- if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
- return -EINVAL;
-
- ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
- if (!ifidx)
- return -EINVAL;
- }
+ if (!ifidx)
+ ifidx = nl80211_get_ifidx(cb);
+ if (ifidx < 0)
+ return ifidx;
rtnl_lock();
@@ -1800,7 +1857,7 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
}
/*
- * Get vlan interface making sure it is on the right wiphy.
+ * Get vlan interface making sure it is running and on the right wiphy.
*/
static int get_vlan(struct genl_info *info,
struct cfg80211_registered_device *rdev,
@@ -1818,6 +1875,8 @@ static int get_vlan(struct genl_info *info,
return -EINVAL;
if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy)
return -EINVAL;
+ if (!netif_running(*vlan))
+ return -ENETDOWN;
}
return 0;
}
@@ -2105,9 +2164,9 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
if (pinfo->filled & MPATH_INFO_FRAME_QLEN)
NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
pinfo->frame_qlen);
- if (pinfo->filled & MPATH_INFO_DSN)
- NLA_PUT_U32(msg, NL80211_MPATH_INFO_DSN,
- pinfo->dsn);
+ if (pinfo->filled & MPATH_INFO_SN)
+ NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN,
+ pinfo->sn);
if (pinfo->filled & MPATH_INFO_METRIC)
NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC,
pinfo->metric);
@@ -2145,20 +2204,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
int path_idx = cb->args[1];
int err;
- if (!ifidx) {
- err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
- nl80211_fam.attrbuf, nl80211_fam.maxattr,
- nl80211_policy);
- if (err)
- return err;
-
- if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
- return -EINVAL;
-
- ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
- if (!ifidx)
- return -EINVAL;
- }
+ if (!ifidx)
+ ifidx = nl80211_get_ifidx(cb);
+ if (ifidx < 0)
+ return ifidx;
rtnl_lock();
@@ -2605,6 +2654,8 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
cur_params.dot11MeshHWMPpreqMinInterval);
NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
cur_params.dot11MeshHWMPnetDiameterTraversalTime);
+ NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
+ cur_params.dot11MeshHWMPRootMode);
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
err = genlmsg_reply(msg, info);
@@ -2715,6 +2766,10 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
dot11MeshHWMPnetDiameterTraversalTime,
mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
+ dot11MeshHWMPRootMode, mask,
+ NL80211_MESHCONF_HWMP_ROOTMODE,
+ nla_get_u8);
/* Apply changes */
err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask);
@@ -2988,7 +3043,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- request->n_channels = n_channels;
if (n_ssids)
request->ssids = (void *)&request->channels[n_channels];
request->n_ssids = n_ssids;
@@ -2999,32 +3053,53 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->ie = (void *)(request->channels + n_channels);
}
+ i = 0;
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
- request->n_channels = n_channels;
- i = 0;
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) {
- request->channels[i] = ieee80211_get_channel(wiphy, nla_get_u32(attr));
- if (!request->channels[i]) {
+ struct ieee80211_channel *chan;
+
+ chan = ieee80211_get_channel(wiphy, nla_get_u32(attr));
+
+ if (!chan) {
err = -EINVAL;
goto out_free;
}
+
+ /* ignore disabled channels */
+ if (chan->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ request->channels[i] = chan;
i++;
}
} else {
/* all channels */
- i = 0;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
int j;
if (!wiphy->bands[band])
continue;
for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
- request->channels[i] = &wiphy->bands[band]->channels[j];
+ struct ieee80211_channel *chan;
+
+ chan = &wiphy->bands[band]->channels[j];
+
+ if (chan->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ request->channels[i] = chan;
i++;
}
}
}
+ if (!i) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ request->n_channels = i;
+
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
@@ -3161,21 +3236,11 @@ static int nl80211_dump_scan(struct sk_buff *skb,
int start = cb->args[1], idx = 0;
int err;
- if (!ifidx) {
- err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
- nl80211_fam.attrbuf, nl80211_fam.maxattr,
- nl80211_policy);
- if (err)
- return err;
-
- if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX])
- return -EINVAL;
-
- ifidx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]);
- if (!ifidx)
- return -EINVAL;
- cb->args[0] = ifidx;
- }
+ if (!ifidx)
+ ifidx = nl80211_get_ifidx(cb);
+ if (ifidx < 0)
+ return ifidx;
+ cb->args[0] = ifidx;
dev = dev_get_by_index(sock_net(skb->sk), ifidx);
if (!dev)
@@ -3218,6 +3283,106 @@ static int nl80211_dump_scan(struct sk_buff *skb,
return err;
}
+static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
+ int flags, struct net_device *dev,
+ struct survey_info *survey)
+{
+ void *hdr;
+ struct nlattr *infoattr;
+
+ /* Survey without a channel doesn't make sense */
+ if (!survey->channel)
+ return -EINVAL;
+
+ hdr = nl80211hdr_put(msg, pid, seq, flags,
+ NL80211_CMD_NEW_SURVEY_RESULTS);
+ if (!hdr)
+ return -ENOMEM;
+
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+
+ infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO);
+ if (!infoattr)
+ goto nla_put_failure;
+
+ NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY,
+ survey->channel->center_freq);
+ if (survey->filled & SURVEY_INFO_NOISE_DBM)
+ NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE,
+ survey->noise);
+
+ nla_nest_end(msg, infoattr);
+
+ return genlmsg_end(msg, hdr);
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int nl80211_dump_survey(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct survey_info survey;
+ struct cfg80211_registered_device *dev;
+ struct net_device *netdev;
+ int ifidx = cb->args[0];
+ int survey_idx = cb->args[1];
+ int res;
+
+ if (!ifidx)
+ ifidx = nl80211_get_ifidx(cb);
+ if (ifidx < 0)
+ return ifidx;
+ cb->args[0] = ifidx;
+
+ rtnl_lock();
+
+ netdev = __dev_get_by_index(sock_net(skb->sk), ifidx);
+ if (!netdev) {
+ res = -ENODEV;
+ goto out_rtnl;
+ }
+
+ dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
+ if (IS_ERR(dev)) {
+ res = PTR_ERR(dev);
+ goto out_rtnl;
+ }
+
+ if (!dev->ops->dump_survey) {
+ res = -EOPNOTSUPP;
+ goto out_err;
+ }
+
+ while (1) {
+ res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx,
+ &survey);
+ if (res == -ENOENT)
+ break;
+ if (res)
+ goto out_err;
+
+ if (nl80211_send_survey(skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ netdev,
+ &survey) < 0)
+ goto out;
+ survey_idx++;
+ }
+
+ out:
+ cb->args[1] = survey_idx;
+ res = skb->len;
+ out_err:
+ cfg80211_unlock_rdev(dev);
+ out_rtnl:
+ rtnl_unlock();
+
+ return res;
+}
+
static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type)
{
return auth_type <= NL80211_AUTHTYPE_MAX;
@@ -4295,6 +4460,11 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = NL80211_CMD_GET_SURVEY,
+ .policy = nl80211_policy,
+ .dumpit = nl80211_dump_survey,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
.name = "mlme",
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f256dfffbf46..1f33017737fd 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1008,7 +1008,7 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
if (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
request_wiphy && request_wiphy == wiphy &&
- request_wiphy->strict_regulatory) {
+ request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
/*
* This gaurantees the driver's requested regulatory domain
* will always be used as a base for further regulatory
@@ -1051,13 +1051,13 @@ static bool ignore_reg_update(struct wiphy *wiphy,
if (!last_request)
return true;
if (initiator == NL80211_REGDOM_SET_BY_CORE &&
- wiphy->custom_regulatory)
+ wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
return true;
/*
* wiphy->regd will be set once the device has its own
* desired regulatory domain set
*/
- if (wiphy->strict_regulatory && !wiphy->regd &&
+ if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
!is_world_regdom(last_request->alpha2))
return true;
return false;
@@ -1093,7 +1093,7 @@ static void handle_reg_beacon(struct wiphy *wiphy,
chan->beacon_found = true;
- if (wiphy->disable_beacon_hints)
+ if (wiphy->flags & WIPHY_FLAG_DISABLE_BEACON_HINTS)
return;
chan_before.center_freq = chan->center_freq;
@@ -1164,7 +1164,7 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
return true;
if (last_request &&
last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
- wiphy->custom_regulatory)
+ wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
return true;
return false;
}
@@ -1591,7 +1591,8 @@ static void reg_process_hint(struct regulatory_request *reg_request)
r = __regulatory_hint(wiphy, reg_request);
/* This is required so that the orig_* parameters are saved */
- if (r == -EALREADY && wiphy && wiphy->strict_regulatory)
+ if (r == -EALREADY && wiphy &&
+ wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
wiphy_update_regulatory(wiphy, reg_request->initiator);
out:
mutex_unlock(&reg_mutex);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2e8c515f3c5c..96df34c3c6ee 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -217,7 +217,7 @@ static bool is_mesh(struct cfg80211_bss *a,
a->len_information_elements);
if (!ie)
return false;
- if (ie[1] != IEEE80211_MESH_CONFIG_LEN)
+ if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
return false;
/*
@@ -225,7 +225,8 @@ static bool is_mesh(struct cfg80211_bss *a,
* comparing since that may differ between stations taking
* part in the same mesh.
*/
- return memcmp(ie + 2, meshcfg, IEEE80211_MESH_CONFIG_LEN - 2) == 0;
+ return memcmp(ie + 2, meshcfg,
+ sizeof(struct ieee80211_meshconf_ie) - 2) == 0;
}
static int cmp_bss(struct cfg80211_bss *a,
@@ -399,7 +400,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
res->pub.information_elements,
res->pub.len_information_elements);
if (!meshid || !meshcfg ||
- meshcfg[1] != IEEE80211_MESH_CONFIG_LEN) {
+ meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
/* bogus mesh */
kref_put(&res->ref, bss_release);
return NULL;
@@ -650,9 +651,15 @@ int cfg80211_wext_siwscan(struct net_device *dev,
i = 0;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
int j;
+
if (!wiphy->bands[band])
continue;
+
for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
+ /* ignore disabled channels */
+ if (wiphy->bands[band]->channels[j].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
/* If we have a wireless request structure and the
* wireless request specifies frequencies, then search
@@ -859,7 +866,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
break;
case WLAN_EID_MESH_CONFIG:
ismesh = true;
- if (ie[1] != IEEE80211_MESH_CONFIG_LEN)
+ if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
break;
buf = kmalloc(50, GFP_ATOMIC);
if (!buf)
@@ -867,35 +874,40 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
cfg = ie + 2;
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "Mesh network (version %d)", cfg[0]);
+ sprintf(buf, "Mesh Network Path Selection Protocol ID: "
+ "0x%02X", cfg[0]);
+ iwe.u.data.length = strlen(buf);
+ current_ev = iwe_stream_add_point(info, current_ev,
+ end_buf,
+ &iwe, buf);
+ sprintf(buf, "Path Selection Metric ID: 0x%02X",
+ cfg[1]);
+ iwe.u.data.length = strlen(buf);
+ current_ev = iwe_stream_add_point(info, current_ev,
+ end_buf,
+ &iwe, buf);
+ sprintf(buf, "Congestion Control Mode ID: 0x%02X",
+ cfg[2]);
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev,
end_buf,
&iwe, buf);
- sprintf(buf, "Path Selection Protocol ID: "
- "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
- cfg[4]);
+ sprintf(buf, "Synchronization ID: 0x%02X", cfg[3]);
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev,
end_buf,
&iwe, buf);
- sprintf(buf, "Path Selection Metric ID: "
- "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
- cfg[8]);
+ sprintf(buf, "Authentication ID: 0x%02X", cfg[4]);
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev,
end_buf,
&iwe, buf);
- sprintf(buf, "Congestion Control Mode ID: "
- "0x%02X%02X%02X%02X", cfg[9], cfg[10],
- cfg[11], cfg[12]);
+ sprintf(buf, "Formation Info: 0x%02X", cfg[5]);
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev,
end_buf,
&iwe, buf);
- sprintf(buf, "Channel Precedence: "
- "0x%02X%02X%02X%02X", cfg[13], cfg[14],
- cfg[15], cfg[16]);
+ sprintf(buf, "Capabilities: 0x%02X", cfg[6]);
iwe.u.data.length = strlen(buf);
current_ev = iwe_stream_add_point(info, current_ev,
end_buf,
@@ -925,8 +937,8 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
ie += ie[1] + 2;
}
- if (bss->pub.capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
- || ismesh) {
+ if (bss->pub.capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS) ||
+ ismesh) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = SIOCGIWMODE;
if (ismesh)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d3624152f7f7..0115d07d2c1a 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -26,6 +26,7 @@ struct cfg80211_conn {
CFG80211_CONN_AUTHENTICATING,
CFG80211_CONN_ASSOCIATE_NEXT,
CFG80211_CONN_ASSOCIATING,
+ CFG80211_CONN_DEAUTH_ASSOC_FAIL,
} state;
u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
u8 *ie;
@@ -148,6 +149,12 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
NULL, 0,
WLAN_REASON_DEAUTH_LEAVING);
return err;
+ case CFG80211_CONN_DEAUTH_ASSOC_FAIL:
+ __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
+ NULL, 0,
+ WLAN_REASON_DEAUTH_LEAVING);
+ /* return an error so that we call __cfg80211_connect_result() */
+ return -EINVAL;
default:
return 0;
}
@@ -158,6 +165,7 @@ void cfg80211_conn_work(struct work_struct *work)
struct cfg80211_registered_device *rdev =
container_of(work, struct cfg80211_registered_device, conn_work);
struct wireless_dev *wdev;
+ u8 bssid_buf[ETH_ALEN], *bssid = NULL;
rtnl_lock();
cfg80211_lock_rdev(rdev);
@@ -173,10 +181,13 @@ void cfg80211_conn_work(struct work_struct *work)
wdev_unlock(wdev);
continue;
}
+ if (wdev->conn->params.bssid) {
+ memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
+ bssid = bssid_buf;
+ }
if (cfg80211_conn_do_work(wdev))
__cfg80211_connect_result(
- wdev->netdev,
- wdev->conn->params.bssid,
+ wdev->netdev, bssid,
NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
false, NULL);
@@ -337,6 +348,15 @@ bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev)
return true;
}
+void cfg80211_sme_failed_assoc(struct wireless_dev *wdev)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL;
+ schedule_work(&rdev->conn_work);
+}
+
void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 3fc2df86278f..59361fdcb5d0 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -320,7 +320,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
break;
case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
if (unlikely(iftype != NL80211_IFTYPE_WDS &&
- iftype != NL80211_IFTYPE_MESH_POINT))
+ iftype != NL80211_IFTYPE_MESH_POINT &&
+ iftype != NL80211_IFTYPE_AP_VLAN &&
+ iftype != NL80211_IFTYPE_STATION))
return -1;
if (iftype == NL80211_IFTYPE_MESH_POINT) {
struct ieee80211s_hdr *meshdr =
@@ -656,7 +658,14 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
!(rdev->wiphy.interface_modes & (1 << ntype)))
return -EOPNOTSUPP;
+ /* if it's part of a bridge, reject changing type to station/ibss */
+ if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC ||
+ ntype == NL80211_IFTYPE_STATION))
+ return -EBUSY;
+
if (ntype != otype) {
+ dev->ieee80211_ptr->use_4addr = false;
+
switch (otype) {
case NL80211_IFTYPE_ADHOC:
cfg80211_leave_ibss(rdev, dev, false);
@@ -680,5 +689,34 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype);
+ if (!err && params && params->use_4addr != -1)
+ dev->ieee80211_ptr->use_4addr = params->use_4addr;
+
+ if (!err) {
+ dev->priv_flags &= ~IFF_DONT_BRIDGE;
+ switch (ntype) {
+ case NL80211_IFTYPE_STATION:
+ if (dev->ieee80211_ptr->use_4addr)
+ break;
+ /* fall through */
+ case NL80211_IFTYPE_ADHOC:
+ dev->priv_flags |= IFF_DONT_BRIDGE;
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ /* bridging OK */
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ /* monitor can't bridge anyway */
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case __NL80211_IFTYPE_AFTER_LAST:
+ /* not happening */
+ break;
+ }
+ }
+
return err;
}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 561a45cf2a6a..29091ac9f989 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -437,6 +437,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err, i;
+ bool rejoin = false;
if (!wdev->wext.keys) {
wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys),
@@ -466,8 +467,24 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if (remove) {
err = 0;
- if (wdev->current_bss)
+ if (wdev->current_bss) {
+ /*
+ * If removing the current TX key, we will need to
+ * join a new IBSS without the privacy bit clear.
+ */
+ if (idx == wdev->wext.default_key &&
+ wdev->iftype == NL80211_IFTYPE_ADHOC) {
+ __cfg80211_leave_ibss(rdev, wdev->netdev, true);
+ rejoin = true;
+ }
err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
+ }
+ /*
+ * Applications using wireless extensions expect to be
+ * able to delete keys that don't exist, so allow that.
+ */
+ if (err == -ENOENT)
+ err = 0;
if (!err) {
if (!addr) {
wdev->wext.keys->params[idx].key_len = 0;
@@ -478,12 +495,9 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
else if (idx == wdev->wext.default_mgmt_key)
wdev->wext.default_mgmt_key = -1;
}
- /*
- * Applications using wireless extensions expect to be
- * able to delete keys that don't exist, so allow that.
- */
- if (err == -ENOENT)
- return 0;
+
+ if (!err && rejoin)
+ err = cfg80211_ibss_wext_join(rdev, wdev);
return err;
}
@@ -511,11 +525,25 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
params->cipher == WLAN_CIPHER_SUITE_WEP104) &&
(tx_key || (!addr && wdev->wext.default_key == -1))) {
- if (wdev->current_bss)
+ if (wdev->current_bss) {
+ /*
+ * If we are getting a new TX key from not having
+ * had one before we need to join a new IBSS with
+ * the privacy bit set.
+ */
+ if (wdev->iftype == NL80211_IFTYPE_ADHOC &&
+ wdev->wext.default_key == -1) {
+ __cfg80211_leave_ibss(rdev, wdev->netdev, true);
+ rejoin = true;
+ }
err = rdev->ops->set_default_key(&rdev->wiphy,
dev, idx);
- if (!err)
+ }
+ if (!err) {
wdev->wext.default_key = idx;
+ if (rejoin)
+ err = cfg80211_ibss_wext_join(rdev, wdev);
+ }
return err;
}
@@ -539,10 +567,13 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
{
int err;
+ /* devlist mutex needed for possible IBSS re-join */
+ mutex_lock(&rdev->devlist_mtx);
wdev_lock(dev->ieee80211_ptr);
err = __cfg80211_set_encryption(rdev, dev, addr, remove,
tx_key, idx, params);
wdev_unlock(dev->ieee80211_ptr);
+ mutex_unlock(&rdev->devlist_mtx);
return err;
}
@@ -904,8 +935,6 @@ static int cfg80211_set_auth_alg(struct wireless_dev *wdev,
static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions)
{
- wdev->wext.connect.crypto.wpa_versions = 0;
-
if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA |
IW_AUTH_WPA_VERSION_WPA2|
IW_AUTH_WPA_VERSION_DISABLED))
@@ -933,8 +962,6 @@ static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions)
static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher)
{
- wdev->wext.connect.crypto.cipher_group = 0;
-
if (cipher & IW_AUTH_CIPHER_WEP40)
wdev->wext.connect.crypto.cipher_group =
WLAN_CIPHER_SUITE_WEP40;
@@ -950,6 +977,8 @@ static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher)
else if (cipher & IW_AUTH_CIPHER_AES_CMAC)
wdev->wext.connect.crypto.cipher_group =
WLAN_CIPHER_SUITE_AES_CMAC;
+ else if (cipher & IW_AUTH_CIPHER_NONE)
+ wdev->wext.connect.crypto.cipher_group = 0;
else
return -EINVAL;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index a4e5ddc8d4f5..58dfb954974a 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -911,8 +911,9 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
*/
static int wext_permission_check(unsigned int cmd)
{
- if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT)
- && !capable(CAP_NET_ADMIN))
+ if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE ||
+ cmd == SIOCGIWENCODEEXT) &&
+ !capable(CAP_NET_ADMIN))
return -EPERM;
return 0;
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e19d811788a5..e3219e4cd044 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -415,6 +415,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
int rc = -ENOPROTOOPT;
+ lock_kernel();
if (level != SOL_X25 || optname != X25_QBITINCL)
goto out;
@@ -429,6 +430,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname,
x25_sk(sk)->qbitincl = !!opt;
rc = 0;
out:
+ unlock_kernel();
return rc;
}
@@ -438,6 +440,7 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
int val, len, rc = -ENOPROTOOPT;
+ lock_kernel();
if (level != SOL_X25 || optname != X25_QBITINCL)
goto out;
@@ -458,6 +461,7 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
val = x25_sk(sk)->qbitincl;
rc = copy_to_user(optval, &val, len) ? -EFAULT : 0;
out:
+ unlock_kernel();
return rc;
}
@@ -466,12 +470,14 @@ static int x25_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
int rc = -EOPNOTSUPP;
+ lock_kernel();
if (sk->sk_state != TCP_LISTEN) {
memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
rc = 0;
}
+ unlock_kernel();
return rc;
}
@@ -501,13 +507,14 @@ out:
return sk;
}
-static int x25_create(struct net *net, struct socket *sock, int protocol)
+static int x25_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
{
struct sock *sk;
struct x25_sock *x25;
int rc = -ESOCKTNOSUPPORT;
- if (net != &init_net)
+ if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol)
@@ -597,6 +604,7 @@ static int x25_release(struct socket *sock)
struct sock *sk = sock->sk;
struct x25_sock *x25;
+ lock_kernel();
if (!sk)
goto out;
@@ -627,6 +635,7 @@ static int x25_release(struct socket *sock)
sock_orphan(sk);
out:
+ unlock_kernel();
return 0;
}
@@ -634,18 +643,23 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
+ int rc = 0;
+ lock_kernel();
if (!sock_flag(sk, SOCK_ZAPPED) ||
addr_len != sizeof(struct sockaddr_x25) ||
- addr->sx25_family != AF_X25)
- return -EINVAL;
+ addr->sx25_family != AF_X25) {
+ rc = -EINVAL;
+ goto out;
+ }
x25_sk(sk)->source_addr = addr->sx25_addr;
x25_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
-
- return 0;
+out:
+ unlock_kernel();
+ return rc;
}
static int x25_wait_for_connection_establishment(struct sock *sk)
@@ -686,6 +700,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
struct x25_route *rt;
int rc = 0;
+ lock_kernel();
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
@@ -763,6 +778,7 @@ out_put_route:
x25_route_put(rt);
out:
release_sock(sk);
+ unlock_kernel();
return rc;
}
@@ -802,6 +818,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
struct sk_buff *skb;
int rc = -EINVAL;
+ lock_kernel();
if (!sk || sk->sk_state != TCP_LISTEN)
goto out;
@@ -829,6 +846,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
out2:
release_sock(sk);
out:
+ unlock_kernel();
return rc;
}
@@ -838,10 +856,14 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)uaddr;
struct sock *sk = sock->sk;
struct x25_sock *x25 = x25_sk(sk);
+ int rc = 0;
+ lock_kernel();
if (peer) {
- if (sk->sk_state != TCP_ESTABLISHED)
- return -ENOTCONN;
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ rc = -ENOTCONN;
+ goto out;
+ }
sx25->sx25_addr = x25->dest_addr;
} else
sx25->sx25_addr = x25->source_addr;
@@ -849,7 +871,21 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
sx25->sx25_family = AF_X25;
*uaddr_len = sizeof(*sx25);
- return 0;
+out:
+ unlock_kernel();
+ return rc;
+}
+
+static unsigned int x25_datagram_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ int rc;
+
+ lock_kernel();
+ rc = datagram_poll(file, sock, wait);
+ unlock_kernel();
+
+ return rc;
}
int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
@@ -1002,6 +1038,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
size_t size;
int qbit = 0, rc = -EINVAL;
+ lock_kernel();
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_OOB|MSG_EOR|MSG_CMSG_COMPAT))
goto out;
@@ -1166,6 +1203,7 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
release_sock(sk);
rc = len;
out:
+ unlock_kernel();
return rc;
out_kfree_skb:
kfree_skb(skb);
@@ -1186,6 +1224,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned char *asmptr;
int rc = -ENOTCONN;
+ lock_kernel();
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
@@ -1259,6 +1298,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
out_free_dgram:
skb_free_datagram(sk, skb);
out:
+ unlock_kernel();
return rc;
}
@@ -1270,6 +1310,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
int rc;
+ lock_kernel();
switch (cmd) {
case TIOCOUTQ: {
int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
@@ -1430,6 +1471,17 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
break;
}
+ case SIOCX25SCAUSEDIAG: {
+ struct x25_causediag causediag;
+ rc = -EFAULT;
+ if (copy_from_user(&causediag, argp, sizeof(causediag)))
+ break;
+ x25->causediag = causediag;
+ rc = 0;
+ break;
+
+ }
+
case SIOCX25SCUDMATCHLEN: {
struct x25_subaddr sub_addr;
rc = -EINVAL;
@@ -1472,6 +1524,7 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
rc = -ENOIOCTLCMD;
break;
}
+ unlock_kernel();
return rc;
}
@@ -1542,15 +1595,19 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
break;
case SIOCGSTAMP:
rc = -EINVAL;
+ lock_kernel();
if (sk)
rc = compat_sock_get_timestamp(sk,
(struct timeval __user*)argp);
+ unlock_kernel();
break;
case SIOCGSTAMPNS:
rc = -EINVAL;
+ lock_kernel();
if (sk)
rc = compat_sock_get_timestampns(sk,
(struct timespec __user*)argp);
+ unlock_kernel();
break;
case SIOCGIFADDR:
case SIOCSIFADDR:
@@ -1569,16 +1626,22 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
rc = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
+ lock_kernel();
rc = x25_route_ioctl(cmd, argp);
+ unlock_kernel();
break;
case SIOCX25GSUBSCRIP:
+ lock_kernel();
rc = compat_x25_subscr_ioctl(cmd, argp);
+ unlock_kernel();
break;
case SIOCX25SSUBSCRIP:
rc = -EPERM;
if (!capable(CAP_NET_ADMIN))
break;
+ lock_kernel();
rc = compat_x25_subscr_ioctl(cmd, argp);
+ unlock_kernel();
break;
case SIOCX25GFACILITIES:
case SIOCX25SFACILITIES:
@@ -1587,6 +1650,7 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
case SIOCX25GCALLUSERDATA:
case SIOCX25SCALLUSERDATA:
case SIOCX25GCAUSEDIAG:
+ case SIOCX25SCAUSEDIAG:
case SIOCX25SCUDMATCHLEN:
case SIOCX25CALLACCPTAPPRV:
case SIOCX25SENDCALLACCPT:
@@ -1600,7 +1664,7 @@ static int compat_x25_ioctl(struct socket *sock, unsigned int cmd,
}
#endif
-static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
+static const struct proto_ops x25_proto_ops = {
.family = AF_X25,
.owner = THIS_MODULE,
.release = x25_release,
@@ -1609,7 +1673,7 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
.socketpair = sock_no_socketpair,
.accept = x25_accept,
.getname = x25_getname,
- .poll = datagram_poll,
+ .poll = x25_datagram_poll,
.ioctl = x25_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_x25_ioctl,
@@ -1624,8 +1688,6 @@ static const struct proto_ops SOCKOPS_WRAPPED(x25_proto_ops) = {
.sendpage = sock_no_sendpage,
};
-SOCKOPS_WRAP(x25_proto, AF_X25);
-
static struct packet_type x25_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_X25),
.func = x25_lapb_receive_frame,
@@ -1659,20 +1721,31 @@ static int __init x25_init(void)
if (rc != 0)
goto out;
- sock_register(&x25_family_ops);
+ rc = sock_register(&x25_family_ops);
+ if (rc != 0)
+ goto out_proto;
dev_add_pack(&x25_packet_type);
- register_netdevice_notifier(&x25_dev_notifier);
+ rc = register_netdevice_notifier(&x25_dev_notifier);
+ if (rc != 0)
+ goto out_sock;
printk(KERN_INFO "X.25 for Linux Version 0.2\n");
-#ifdef CONFIG_SYSCTL
x25_register_sysctl();
-#endif
- x25_proc_init();
+ rc = x25_proc_init();
+ if (rc != 0)
+ goto out_dev;
out:
return rc;
+out_dev:
+ unregister_netdevice_notifier(&x25_dev_notifier);
+out_sock:
+ sock_unregister(AF_X25);
+out_proto:
+ proto_unregister(&x25_proto);
+ goto out;
}
module_init(x25_init);
@@ -1682,9 +1755,7 @@ static void __exit x25_exit(void)
x25_link_free();
x25_route_free();
-#ifdef CONFIG_SYSCTL
x25_unregister_sysctl();
-#endif
unregister_netdevice_notifier(&x25_dev_notifier);
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 66961ea28c91..b95fae9ab393 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -136,8 +136,10 @@ struct net_device *x25_dev_get(char *devname)
#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
&& dev->type != ARPHRD_ETHER
#endif
- )))
+ ))){
dev_put(dev);
+ dev = NULL;
+ }
return dev;
}
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 511a5986af3e..352b32d216fc 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -225,6 +225,12 @@ void x25_write_internal(struct sock *sk, int frametype)
break;
case X25_CLEAR_REQUEST:
+ dptr = skb_put(skb, 3);
+ *dptr++ = frametype;
+ *dptr++ = x25->causediag.cause;
+ *dptr++ = x25->causediag.diagnostic;
+ break;
+
case X25_RESET_REQUEST:
dptr = skb_put(skb, 3);
*dptr++ = frametype;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index b39341072aa6..743c0134a6a9 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -200,6 +200,40 @@ static struct xfrm_algo_desc aalg_list[] = {
}
},
{
+ .name = "hmac(sha384)",
+
+ .uinfo = {
+ .auth = {
+ .icv_truncbits = 192,
+ .icv_fullbits = 384,
+ }
+ },
+
+ .desc = {
+ .sadb_alg_id = SADB_X_AALG_SHA2_384HMAC,
+ .sadb_alg_ivlen = 0,
+ .sadb_alg_minbits = 384,
+ .sadb_alg_maxbits = 384
+ }
+},
+{
+ .name = "hmac(sha512)",
+
+ .uinfo = {
+ .auth = {
+ .icv_truncbits = 256,
+ .icv_fullbits = 512,
+ }
+ },
+
+ .desc = {
+ .sadb_alg_id = SADB_X_AALG_SHA2_512HMAC,
+ .sadb_alg_ivlen = 0,
+ .sadb_alg_minbits = 512,
+ .sadb_alg_maxbits = 512
+ }
+},
+{
.name = "hmac(rmd160)",
.compat = "rmd160",
@@ -365,6 +399,7 @@ static struct xfrm_algo_desc ealg_list[] = {
},
{
.name = "cbc(camellia)",
+ .compat = "camellia",
.uinfo = {
.encr = {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index f2f7c638083e..d847f1a52b44 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -21,6 +21,9 @@
#include <linux/cache.h>
#include <linux/audit.h>
#include <asm/uaccess.h>
+#include <linux/ktime.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include "xfrm_hash.h"
@@ -352,7 +355,7 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
- del_timer_sync(&x->timer);
+ tasklet_hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
kfree(x->aalg);
kfree(x->ealg);
@@ -398,9 +401,10 @@ static inline unsigned long make_jiffies(long secs)
return secs*HZ;
}
-static void xfrm_timer_handler(unsigned long data)
+static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
{
- struct xfrm_state *x = (struct xfrm_state*)data;
+ struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
+ struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
struct net *net = xs_net(x);
unsigned long now = get_seconds();
long next = LONG_MAX;
@@ -451,8 +455,9 @@ static void xfrm_timer_handler(unsigned long data)
if (warn)
km_state_expired(x, 0, 0);
resched:
- if (next != LONG_MAX)
- mod_timer(&x->timer, jiffies + make_jiffies(next));
+ if (next != LONG_MAX){
+ tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
+ }
goto out;
@@ -474,6 +479,7 @@ expired:
out:
spin_unlock(&x->lock);
+ return HRTIMER_NORESTART;
}
static void xfrm_replay_timer_handler(unsigned long data);
@@ -492,7 +498,7 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
- setup_timer(&x->timer, xfrm_timer_handler, (unsigned long)x);
+ tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS);
setup_timer(&x->rtimer, xfrm_replay_timer_handler,
(unsigned long)x);
x->curlft.add_time = get_seconds();
@@ -843,8 +849,7 @@ found:
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
- x->timer.expires = jiffies + net->xfrm.sysctl_acq_expires*HZ;
- add_timer(&x->timer);
+ tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
} else {
@@ -921,7 +926,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
}
- mod_timer(&x->timer, jiffies + HZ);
+ tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
if (x->replay_maxage)
mod_timer(&x->rtimer, jiffies + x->replay_maxage);
@@ -1019,8 +1024,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
x->props.reqid = reqid;
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
xfrm_state_hold(x);
- x->timer.expires = jiffies + net->xfrm.sysctl_acq_expires*HZ;
- add_timer(&x->timer);
+ tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
h = xfrm_src_hash(net, daddr, saddr, family);
@@ -1110,7 +1114,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
x->props.saddr = orig->props.saddr;
if (orig->aalg) {
- x->aalg = xfrm_algo_clone(orig->aalg);
+ x->aalg = xfrm_algo_auth_clone(orig->aalg);
if (!x->aalg)
goto error;
}
@@ -1300,7 +1304,7 @@ out:
memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
x1->km.dying = 0;
- mod_timer(&x1->timer, jiffies + HZ);
+ tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
if (x1->curlft.use_time)
xfrm_state_check_expire(x1);
@@ -1325,7 +1329,7 @@ int xfrm_state_check_expire(struct xfrm_state *x)
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
x->km.state = XFRM_STATE_EXPIRED;
- mod_timer(&x->timer, jiffies);
+ tasklet_hrtimer_start(&x->mtimer, ktime_set(0,0), HRTIMER_MODE_REL);
return -EINVAL;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b95a2d64eb59..1ada6186933c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -62,6 +62,22 @@ static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
return 0;
}
+static int verify_auth_trunc(struct nlattr **attrs)
+{
+ struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
+ struct xfrm_algo_auth *algp;
+
+ if (!rt)
+ return 0;
+
+ algp = nla_data(rt);
+ if (nla_len(rt) < xfrm_alg_auth_len(algp))
+ return -EINVAL;
+
+ algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
+ return 0;
+}
+
static int verify_aead(struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
@@ -128,7 +144,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
err = -EINVAL;
switch (p->id.proto) {
case IPPROTO_AH:
- if (!attrs[XFRMA_ALG_AUTH] ||
+ if ((!attrs[XFRMA_ALG_AUTH] &&
+ !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ALG_COMP])
@@ -139,10 +156,12 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
if (attrs[XFRMA_ALG_COMP])
goto out;
if (!attrs[XFRMA_ALG_AUTH] &&
+ !attrs[XFRMA_ALG_AUTH_TRUNC] &&
!attrs[XFRMA_ALG_CRYPT] &&
!attrs[XFRMA_ALG_AEAD])
goto out;
if ((attrs[XFRMA_ALG_AUTH] ||
+ attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT]) &&
attrs[XFRMA_ALG_AEAD])
goto out;
@@ -152,6 +171,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
if (!attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_AUTH] ||
+ attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_CRYPT])
goto out;
break;
@@ -161,6 +181,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
case IPPROTO_ROUTING:
if (attrs[XFRMA_ALG_COMP] ||
attrs[XFRMA_ALG_AUTH] ||
+ attrs[XFRMA_ALG_AUTH_TRUNC] ||
attrs[XFRMA_ALG_AEAD] ||
attrs[XFRMA_ALG_CRYPT] ||
attrs[XFRMA_ENCAP] ||
@@ -176,6 +197,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
if ((err = verify_aead(attrs)))
goto out;
+ if ((err = verify_auth_trunc(attrs)))
+ goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
goto out;
if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
@@ -229,6 +252,66 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
return 0;
}
+static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
+ struct nlattr *rta)
+{
+ struct xfrm_algo *ualg;
+ struct xfrm_algo_auth *p;
+ struct xfrm_algo_desc *algo;
+
+ if (!rta)
+ return 0;
+
+ ualg = nla_data(rta);
+
+ algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
+ if (!algo)
+ return -ENOSYS;
+ *props = algo->desc.sadb_alg_id;
+
+ p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ strcpy(p->alg_name, algo->name);
+ p->alg_key_len = ualg->alg_key_len;
+ p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
+ memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
+
+ *algpp = p;
+ return 0;
+}
+
+static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
+ struct nlattr *rta)
+{
+ struct xfrm_algo_auth *p, *ualg;
+ struct xfrm_algo_desc *algo;
+
+ if (!rta)
+ return 0;
+
+ ualg = nla_data(rta);
+
+ algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
+ if (!algo)
+ return -ENOSYS;
+ if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
+ return -EINVAL;
+ *props = algo->desc.sadb_alg_id;
+
+ p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ strcpy(p->alg_name, algo->name);
+ if (!p->alg_trunc_len)
+ p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
+
+ *algpp = p;
+ return 0;
+}
+
static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props,
struct nlattr *rta)
{
@@ -332,10 +415,14 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
if ((err = attach_aead(&x->aead, &x->props.ealgo,
attrs[XFRMA_ALG_AEAD])))
goto error;
- if ((err = attach_one_algo(&x->aalg, &x->props.aalgo,
- xfrm_aalg_get_byname,
- attrs[XFRMA_ALG_AUTH])))
+ if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
+ attrs[XFRMA_ALG_AUTH_TRUNC])))
goto error;
+ if (!x->props.aalgo) {
+ if ((err = attach_auth(&x->aalg, &x->props.aalgo,
+ attrs[XFRMA_ALG_AUTH])))
+ goto error;
+ }
if ((err = attach_one_algo(&x->ealg, &x->props.ealgo,
xfrm_ealg_get_byname,
attrs[XFRMA_ALG_CRYPT])))
@@ -548,6 +635,24 @@ static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
return 0;
}
+static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
+{
+ struct xfrm_algo *algo;
+ struct nlattr *nla;
+
+ nla = nla_reserve(skb, XFRMA_ALG_AUTH,
+ sizeof(*algo) + (auth->alg_key_len + 7) / 8);
+ if (!nla)
+ return -EMSGSIZE;
+
+ algo = nla_data(nla);
+ strcpy(algo->alg_name, auth->alg_name);
+ memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
+ algo->alg_key_len = auth->alg_key_len;
+
+ return 0;
+}
+
/* Don't change this without updating xfrm_sa_len! */
static int copy_to_user_state_extra(struct xfrm_state *x,
struct xfrm_usersa_info *p,
@@ -563,8 +668,13 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
if (x->aead)
NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
- if (x->aalg)
- NLA_PUT(skb, XFRMA_ALG_AUTH, xfrm_alg_len(x->aalg), x->aalg);
+ if (x->aalg) {
+ if (copy_to_user_auth(x->aalg, skb))
+ goto nla_put_failure;
+
+ NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC,
+ xfrm_alg_auth_len(x->aalg), x->aalg);
+ }
if (x->ealg)
NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
if (x->calg)
@@ -2117,8 +2227,11 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x)
size_t l = 0;
if (x->aead)
l += nla_total_size(aead_len(x->aead));
- if (x->aalg)
- l += nla_total_size(xfrm_alg_len(x->aalg));
+ if (x->aalg) {
+ l += nla_total_size(sizeof(struct xfrm_algo) +
+ (x->aalg->alg_key_len + 7) / 8);
+ l += nla_total_size(xfrm_alg_auth_len(x->aalg));
+ }
if (x->ealg)
l += nla_total_size(xfrm_alg_len(x->ealg));
if (x->calg)
@@ -2608,22 +2721,24 @@ static int __net_init xfrm_user_net_init(struct net *net)
xfrm_netlink_rcv, NULL, THIS_MODULE);
if (nlsk == NULL)
return -ENOMEM;
+ net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
rcu_assign_pointer(net->xfrm.nlsk, nlsk);
return 0;
}
-static void __net_exit xfrm_user_net_exit(struct net *net)
+static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
{
- struct sock *nlsk = net->xfrm.nlsk;
-
- rcu_assign_pointer(net->xfrm.nlsk, NULL);
- synchronize_rcu();
- netlink_kernel_release(nlsk);
+ struct net *net;
+ list_for_each_entry(net, net_exit_list, exit_list)
+ rcu_assign_pointer(net->xfrm.nlsk, NULL);
+ synchronize_net();
+ list_for_each_entry(net, net_exit_list, exit_list)
+ netlink_kernel_release(net->xfrm.nlsk_stash);
}
static struct pernet_operations xfrm_user_net_ops = {
- .init = xfrm_user_net_init,
- .exit = xfrm_user_net_exit,
+ .init = xfrm_user_net_init,
+ .exit_batch = xfrm_user_net_exit,
};
static int __init xfrm_user_init(void)