aboutsummaryrefslogtreecommitdiff
path: root/net/sctp
diff options
context:
space:
mode:
authorMichio Honda <micchie@sfc.wide.ad.jp>2011-04-26 19:32:51 +0900
committerDavid S. Miller <davem@davemloft.net>2011-06-02 02:04:53 -0700
commit9f7d653b67aed2d92540fbb0a8adaf32fcf352ae (patch)
tree1e128f1465135dd70f202b8720c32cbfd536cb68 /net/sctp
parent3ced2dddf10f26f0aaff96f3345a3d876cea62f8 (diff)
sctp: Add Auto-ASCONF support (core).
SCTP reconfigure the IP addresses in the association by using ASCONF chunks as mentioned in RFC5061. For example, we can start to use the newly configured IP address in the existing association. This patch implements automatic ASCONF operation in the SCTP stack with address events in the host computer, which is called auto_asconf. Signed-off-by: Michio Honda <micchie@sfc.wide.ad.jp> Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Acked-by: Wei Yongjun <yjwei@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/bind_addr.c15
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c147
-rw-r--r--net/sctp/socket.c46
4 files changed, 206 insertions, 4 deletions
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 83e3011c19c..17d157325b6 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -534,6 +534,21 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
return 0;
}
+int sctp_is_ep_boundall(struct sock *sk)
+{
+ struct sctp_bind_addr *bp;
+ struct sctp_sockaddr_entry *addr;
+
+ bp = &sctp_sk(sk)->ep->base.bind_addr;
+ if (sctp_list_single_entry(&bp->address_list)) {
+ addr = list_entry(bp->address_list.next,
+ struct sctp_sockaddr_entry, list);
+ if (sctp_is_any(sk, &addr->a))
+ return 1;
+ }
+ return 0;
+}
+
/********************************************************************
* 3rd Level Abstractions
********************************************************************/
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0bb0d7cb9f1..aabaee41dd3 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -112,6 +112,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
addr->valid = 1;
spin_lock_bh(&sctp_local_addr_lock);
list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
+ sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
spin_unlock_bh(&sctp_local_addr_lock);
}
break;
@@ -122,6 +123,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
if (addr->a.sa.sa_family == AF_INET6 &&
ipv6_addr_equal(&addr->a.v6.sin6_addr,
&ifa->addr)) {
+ sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
found = 1;
addr->valid = 0;
list_del_rcu(&addr->list);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 67380a29e2e..013c6136c54 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -623,6 +623,142 @@ static void sctp_v4_ecn_capable(struct sock *sk)
INET_ECN_xmit(sk);
}
+void sctp_addr_wq_timeout_handler(unsigned long arg)
+{
+ struct sctp_sockaddr_entry *addrw, *temp;
+ struct sctp_sock *sp;
+
+ spin_lock_bh(&sctp_addr_wq_lock);
+
+ list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+ SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
+ " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
+ addrw);
+
+ /* Now we send an ASCONF for each association */
+ /* Note. we currently don't handle link local IPv6 addressees */
+ if (addrw->a.sa.sa_family == AF_INET6) {
+ struct in6_addr *in6;
+
+ if (ipv6_addr_type(&addrw->a.v6.sin6_addr) &
+ IPV6_ADDR_LINKLOCAL)
+ goto free_next;
+
+ in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
+ if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
+ addrw->state == SCTP_ADDR_NEW) {
+ unsigned long timeo_val;
+
+ SCTP_DEBUG_PRINTK("sctp_timo_handler: this is on DAD, trying %d sec later\n",
+ SCTP_ADDRESS_TICK_DELAY);
+ timeo_val = jiffies;
+ timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
+ mod_timer(&sctp_addr_wq_timer, timeo_val);
+ break;
+ }
+ }
+
+ list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
+ struct sock *sk;
+
+ sk = sctp_opt2sk(sp);
+ /* ignore bound-specific endpoints */
+ if (!sctp_is_ep_boundall(sk))
+ continue;
+ sctp_bh_lock_sock(sk);
+ if (sctp_asconf_mgmt(sp, addrw) < 0)
+ SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
+ sctp_bh_unlock_sock(sk);
+ }
+free_next:
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
+static void sctp_free_addr_wq(void)
+{
+ struct sctp_sockaddr_entry *addrw;
+ struct sctp_sockaddr_entry *temp;
+
+ spin_lock_bh(&sctp_addr_wq_lock);
+ del_timer(&sctp_addr_wq_timer);
+ list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
+/* lookup the entry for the same address in the addr_waitq
+ * sctp_addr_wq MUST be locked
+ */
+static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
+{
+ struct sctp_sockaddr_entry *addrw;
+
+ list_for_each_entry(addrw, &sctp_addr_waitq, list) {
+ if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
+ continue;
+ if (addrw->a.sa.sa_family == AF_INET) {
+ if (addrw->a.v4.sin_addr.s_addr ==
+ addr->a.v4.sin_addr.s_addr)
+ return addrw;
+ } else if (addrw->a.sa.sa_family == AF_INET6) {
+ if (ipv6_addr_equal(&addrw->a.v6.sin6_addr,
+ &addr->a.v6.sin6_addr))
+ return addrw;
+ }
+ }
+ return NULL;
+}
+
+void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
+{
+ struct sctp_sockaddr_entry *addrw;
+ unsigned long timeo_val;
+
+ /* first, we check if an opposite message already exist in the queue.
+ * If we found such message, it is removed.
+ * This operation is a bit stupid, but the DHCP client attaches the
+ * new address after a couple of addition and deletion of that address
+ */
+
+ spin_lock_bh(&sctp_addr_wq_lock);
+ /* Offsets existing events in addr_wq */
+ addrw = sctp_addr_wq_lookup(addr);
+ if (addrw) {
+ if (addrw->state != cmd) {
+ SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
+ " in wq %p\n", addrw->state, &addrw->a,
+ &sctp_addr_waitq);
+ list_del(&addrw->list);
+ kfree(addrw);
+ }
+ spin_unlock_bh(&sctp_addr_wq_lock);
+ return;
+ }
+
+ /* OK, we have to add the new address to the wait queue */
+ addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ if (addrw == NULL) {
+ spin_unlock_bh(&sctp_addr_wq_lock);
+ return;
+ }
+ addrw->state = cmd;
+ list_add_tail(&addrw->list, &sctp_addr_waitq);
+ SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
+ " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
+
+ if (!timer_pending(&sctp_addr_wq_timer)) {
+ timeo_val = jiffies;
+ timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
+ mod_timer(&sctp_addr_wq_timer, timeo_val);
+ }
+ spin_unlock_bh(&sctp_addr_wq_lock);
+}
+
/* Event handler for inet address addition/deletion events.
* The sctp_local_addr_list needs to be protocted by a spin lock since
* multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -650,6 +786,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
addr->valid = 1;
spin_lock_bh(&sctp_local_addr_lock);
list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
+ sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
spin_unlock_bh(&sctp_local_addr_lock);
}
break;
@@ -660,6 +797,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
if (addr->a.sa.sa_family == AF_INET &&
addr->a.v4.sin_addr.s_addr ==
ifa->ifa_local) {
+ sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
found = 1;
addr->valid = 0;
list_del_rcu(&addr->list);
@@ -1242,6 +1380,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Disable ADDIP by default. */
sctp_addip_enable = 0;
sctp_addip_noauth = 0;
+ sctp_default_auto_asconf = 0;
/* Enable PR-SCTP by default. */
sctp_prsctp_enable = 1;
@@ -1266,6 +1405,13 @@ SCTP_STATIC __init int sctp_init(void)
spin_lock_init(&sctp_local_addr_lock);
sctp_get_local_addr_list();
+ /* Initialize the address event list */
+ INIT_LIST_HEAD(&sctp_addr_waitq);
+ INIT_LIST_HEAD(&sctp_auto_asconf_splist);
+ spin_lock_init(&sctp_addr_wq_lock);
+ sctp_addr_wq_timer.expires = 0;
+ setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
+
status = sctp_v4_protosw_init();
if (status)
@@ -1337,6 +1483,7 @@ SCTP_STATIC __exit void sctp_exit(void)
/* Unregister with inet6/inet layers. */
sctp_v6_del_protocol();
sctp_v4_del_protocol();
+ sctp_free_addr_wq();
/* Free the control endpoint. */
inet_ctl_sock_destroy(sctp_ctl_sock);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6766913a53e..7eb1f1a736f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -811,6 +811,28 @@ out:
return retval;
}
+/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
+int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
+{
+ struct sock *sk = sctp_opt2sk(sp);
+ union sctp_addr *addr;
+ struct sctp_af *af;
+
+ /* It is safe to write port space in caller. */
+ addr = &addrw->a;
+ addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
+ af = sctp_get_af_specific(addr->sa.sa_family);
+ if (!af)
+ return -EINVAL;
+ if (sctp_verify_addr(sk, addr, af->sockaddr_len))
+ return -EINVAL;
+
+ if (addrw->state == SCTP_ADDR_NEW)
+ return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
+ else
+ return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
+}
+
/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
*
* API 8.1
@@ -3763,6 +3785,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
local_bh_disable();
percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ if (sctp_default_auto_asconf) {
+ list_add_tail(&sp->auto_asconf_list,
+ &sctp_auto_asconf_splist);
+ sp->do_auto_asconf = 1;
+ } else
+ sp->do_auto_asconf = 0;
local_bh_enable();
return 0;
@@ -3771,13 +3799,17 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
/* Cleanup any SCTP per socket resources. */
SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
{
- struct sctp_endpoint *ep;
+ struct sctp_sock *sp;
SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk);
/* Release our hold on the endpoint. */
- ep = sctp_sk(sk)->ep;
- sctp_endpoint_free(ep);
+ sp = sctp_sk(sk);
+ if (sp->do_auto_asconf) {
+ sp->do_auto_asconf = 0;
+ list_del(&sp->auto_asconf_list);
+ }
+ sctp_endpoint_free(sp->ep);
local_bh_disable();
percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
@@ -6512,6 +6544,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
struct sctp_bind_hashbucket *head;
+ struct list_head tmplist;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
@@ -6519,7 +6552,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
newsk->sk_sndbuf = oldsk->sk_sndbuf;
newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
/* Brute force copy old sctp opt. */
- inet_sk_copy_descendant(newsk, oldsk);
+ if (oldsp->do_auto_asconf) {
+ memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
+ inet_sk_copy_descendant(newsk, oldsk);
+ memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
+ } else
+ inet_sk_copy_descendant(newsk, oldsk);
/* Restore the ep value that was overwritten with the above structure
* copy.