aboutsummaryrefslogtreecommitdiff
path: root/net/sctp/socket.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/socket.c')
-rw-r--r--net/sctp/socket.c69
1 files changed, 57 insertions, 12 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 772fbfb4bfda..b9952425c79a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -107,23 +107,42 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
+extern struct kmem_cache *sctp_bucket_cachep;
+extern int sysctl_sctp_mem[3];
+extern int sysctl_sctp_rmem[3];
+extern int sysctl_sctp_wmem[3];
+
+int sctp_memory_pressure;
+atomic_t sctp_memory_allocated;
+atomic_t sctp_sockets_allocated;
+
+static void sctp_enter_memory_pressure(void)
+{
+ sctp_memory_pressure = 1;
+}
+
+
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
- struct sock *sk = asoc->base.sk;
- int amt = 0;
+ int amt;
- if (asoc->ep->sndbuf_policy) {
- /* make sure that no association uses more than sk_sndbuf */
- amt = sk->sk_sndbuf - asoc->sndbuf_used;
+ if (asoc->ep->sndbuf_policy)
+ amt = asoc->sndbuf_used;
+ else
+ amt = atomic_read(&asoc->base.sk->sk_wmem_alloc);
+
+ if (amt >= asoc->base.sk->sk_sndbuf) {
+ if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+ amt = 0;
+ else {
+ amt = sk_stream_wspace(asoc->base.sk);
+ if (amt < 0)
+ amt = 0;
+ }
} else {
- /* do socket level accounting */
- amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
+ amt = asoc->base.sk->sk_sndbuf - amt;
}
-
- if (amt < 0)
- amt = 0;
-
return amt;
}
@@ -155,6 +174,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
sizeof(struct sctp_chunk);
atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+ sk_charge_skb(sk, chunk->skb);
}
/* Verify that this is a valid address. */
@@ -3293,6 +3313,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock);
+ atomic_inc(&sctp_sockets_allocated);
return 0;
}
@@ -3306,7 +3327,7 @@ SCTP_STATIC int sctp_destroy_sock(struct sock *sk)
/* Release our hold on the endpoint. */
ep = sctp_sk(sk)->ep;
sctp_endpoint_free(ep);
-
+ atomic_dec(&sctp_sockets_allocated);
return 0;
}
@@ -5720,6 +5741,12 @@ static void sctp_wfree(struct sk_buff *skb)
atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+ /*
+ * This undoes what is done via sk_charge_skb
+ */
+ sk->sk_wmem_queued -= skb->truesize;
+ sk->sk_forward_alloc += skb->truesize;
+
sock_wfree(skb);
__sctp_write_space(asoc);
@@ -5737,6 +5764,11 @@ void sctp_sock_rfree(struct sk_buff *skb)
struct sctp_ulpevent *event = sctp_skb2event(skb);
atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
+
+ /*
+ * Mimic the behavior of sk_stream_rfree
+ */
+ sk->sk_forward_alloc += event->rmem_len;
}
@@ -6126,6 +6158,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_release_sock(newsk);
}
+
/* This proto struct describes the ULP interface for SCTP. */
struct proto sctp_prot = {
.name = "SCTP",
@@ -6148,6 +6181,12 @@ struct proto sctp_prot = {
.unhash = sctp_unhash,
.get_port = sctp_get_port,
.obj_size = sizeof(struct sctp_sock),
+ .sysctl_mem = sysctl_sctp_mem,
+ .sysctl_rmem = sysctl_sctp_rmem,
+ .sysctl_wmem = sysctl_sctp_wmem,
+ .memory_pressure = &sctp_memory_pressure,
+ .enter_memory_pressure = sctp_enter_memory_pressure,
+ .memory_allocated = &sctp_memory_allocated,
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -6172,5 +6211,11 @@ struct proto sctpv6_prot = {
.unhash = sctp_unhash,
.get_port = sctp_get_port,
.obj_size = sizeof(struct sctp6_sock),
+ .sysctl_mem = sysctl_sctp_mem,
+ .sysctl_rmem = sysctl_sctp_rmem,
+ .sysctl_wmem = sysctl_sctp_wmem,
+ .memory_pressure = &sctp_memory_pressure,
+ .enter_memory_pressure = sctp_enter_memory_pressure,
+ .memory_allocated = &sctp_memory_allocated,
};
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */