aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_netfilter.c17
-rw-r--r--net/ceph/osd_client.c33
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/tcp_output.c64
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/wep.c6
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c23
-rw-r--r--net/xfrm/xfrm_policy.c12
12 files changed, 142 insertions, 61 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 1a4f32c09ad5..f076a8ede00b 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -650,6 +650,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
struct net_device *in;
if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
+ int frag_max_size;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ frag_max_size = IPCB(skb)->frag_max_size;
+ BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
+ }
+
in = nf_bridge->physindev;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
@@ -709,8 +716,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
nf_bridge->mask |= BRNF_PKT_TYPE;
}
- if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
- return NF_DROP;
+ if (pf == NFPROTO_IPV4) {
+ int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
+
+ if (br_parse_ip_options(skb))
+ return NF_DROP;
+
+ IPCB(skb)->frag_max_size = frag_max;
+ }
/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index b0cf1f287aed..e242f96550bd 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1263,8 +1263,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
if (list_empty(&req->r_osd_item))
req->r_osd = NULL;
}
-
- list_del_init(&req->r_req_lru_item); /* can be on notarget */
ceph_osdc_put_request(req);
}
@@ -1974,20 +1972,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
err = __map_request(osdc, req,
force_resend || force_resend_writes);
dout("__map_request returned %d\n", err);
- if (err == 0)
- continue; /* no change and no osd was specified */
if (err < 0)
continue; /* hrm! */
- if (req->r_osd == NULL) {
- dout("tid %llu maps to no valid osd\n", req->r_tid);
- needmap++; /* request a newer map */
- continue;
- }
+ if (req->r_osd == NULL || err > 0) {
+ if (req->r_osd == NULL) {
+ dout("lingering %p tid %llu maps to no osd\n",
+ req, req->r_tid);
+ /*
+ * A homeless lingering request makes
+ * no sense, as it's job is to keep
+ * a particular OSD connection open.
+ * Request a newer map and kick the
+ * request, knowing that it won't be
+ * resent until we actually get a map
+ * that can tell us where to send it.
+ */
+ needmap++;
+ }
- dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
- __register_request(osdc, req);
- __unregister_linger_request(osdc, req);
+ dout("kicking lingering %p tid %llu osd%d\n", req,
+ req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
+ __register_request(osdc, req);
+ __unregister_linger_request(osdc, req);
+ }
}
reset_changed_osds(osdc);
mutex_unlock(&osdc->request_mutex);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e4e3b13cc729..eea9e99e3a0a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -279,13 +279,14 @@ nodata:
EXPORT_SYMBOL(__alloc_skb);
/**
- * build_skb - build a network buffer
+ * __build_skb - build a network buffer
* @data: data buffer provided by caller
- * @frag_size: size of fragment, or 0 if head was kmalloced
+ * @frag_size: size of data, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated by kmalloc() only if
- * @frag_size is 0, otherwise data should come from the page allocator.
+ * @frag_size is 0, otherwise data should come from the page allocator
+ * or vmalloc()
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
@@ -296,7 +297,7 @@ EXPORT_SYMBOL(__alloc_skb);
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
-struct sk_buff *build_skb(void *data, unsigned int frag_size)
+struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct skb_shared_info *shinfo;
struct sk_buff *skb;
@@ -310,7 +311,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
- skb->head_frag = frag_size != 0;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -327,6 +327,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
return skb;
}
+
+/* build_skb() is wrapper over __build_skb(), that specifically
+ * takes care of skb->head and skb->pfmemalloc
+ * This means that if @frag_size is not zero, then @data must be backed
+ * by a page fragment, not kmalloc() or vmalloc()
+ */
+struct sk_buff *build_skb(void *data, unsigned int frag_size)
+{
+ struct sk_buff *skb = __build_skb(data, frag_size);
+
+ if (skb && frag_size) {
+ skb->head_frag = 1;
+ if (virt_to_head_page(data)->pfmemalloc)
+ skb->pfmemalloc = 1;
+ }
+ return skb;
+}
EXPORT_SYMBOL(build_skb);
struct netdev_alloc_cache {
@@ -354,7 +371,8 @@ refill:
gfp_t gfp = gfp_mask;
if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN |
+ __GFP_NOMEMALLOC;
nc->frag.page = alloc_pages(gfp, order);
if (likely(nc->frag.page))
break;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 787b3c294ce6..d5410b57da19 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -81,6 +81,9 @@ int ip_forward(struct sk_buff *skb)
if (skb->pkt_type != PACKET_HOST)
goto drop;
+ if (unlikely(skb->sk))
+ goto drop;
+
if (skb_warn_if_lro(skb))
goto drop;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index a5c49d657ab1..64f4edb2dbf9 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
if (sk_hashed(sk)) {
write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
+ sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 32dcb4e05b6b..dc9f925b0cd5 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2717,39 +2717,65 @@ begin_fwd:
}
}
-/* Send a fin. The caller locks the socket for us. This cannot be
- * allowed to fail queueing a FIN frame under any circumstances.
+/* We allow to exceed memory limits for FIN packets to expedite
+ * connection tear down and (memory) recovery.
+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
+ * or even be forced to close flow without any FIN.
+ */
+static void sk_forced_wmem_schedule(struct sock *sk, int size)
+{
+ int amt, status;
+
+ if (size <= sk->sk_forward_alloc)
+ return;
+ amt = sk_mem_pages(size);
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+ sk_memory_allocated_add(sk, amt, &status);
+}
+
+/* Send a FIN. The caller locks the socket for us.
+ * We should try to send a FIN packet really hard, but eventually give up.
*/
void tcp_send_fin(struct sock *sk)
{
+ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = tcp_write_queue_tail(sk);
- int mss_now;
- /* Optimization, tack on the FIN if we have a queue of
- * unsent frames. But be careful about outgoing SACKS
- * and IP options.
+ /* Optimization, tack on the FIN if we have one skb in write queue and
+ * this skb was not yet sent, or we are under memory pressure.
+ * Note: in the latter case, FIN packet will be sent after a timeout,
+ * as TCP stack thinks it has already been transmitted.
*/
- mss_now = tcp_current_mss(sk);
-
- if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
- TCP_SKB_CB(skb)->end_seq++;
+ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+coalesce:
+ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
+ TCP_SKB_CB(tskb)->end_seq++;
tp->write_seq++;
+ if (!tcp_send_head(sk)) {
+ /* This means tskb was already sent.
+ * Pretend we included the FIN on previous transmit.
+ * We need to set tp->snd_nxt to the value it would have
+ * if FIN had been sent. This is because retransmit path
+ * does not change tp->snd_nxt.
+ */
+ tp->snd_nxt++;
+ return;
+ }
} else {
- /* Socket is locked, keep trying until memory is available. */
- for (;;) {
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
- if (skb)
- break;
- yield();
+ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
+ if (unlikely(!skb)) {
+ if (tskb)
+ goto coalesce;
+ return;
}
+ skb_reserve(skb, MAX_TCP_HEADER);
+ sk_forced_wmem_schedule(sk, skb->truesize);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
}
/* We get here when a process closes a file descriptor (either due to
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 91c1aca65ae9..aed656d18b23 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -204,6 +204,8 @@ enum ieee80211_packet_rx_flags {
* @IEEE80211_RX_CMNTR: received on cooked monitor already
* @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
* to cfg80211_report_obss_beacon().
+ * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
+ * reorder buffer timeout timer, not the normal RX path
*
* These flags are used across handling multiple interfaces
* for a single frame.
@@ -211,6 +213,7 @@ enum ieee80211_packet_rx_flags {
enum ieee80211_rx_flags {
IEEE80211_RX_CMNTR = BIT(0),
IEEE80211_RX_BEACON_REPORTED = BIT(1),
+ IEEE80211_RX_REORDER_TIMER = BIT(2),
};
struct ieee80211_rx_data {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index cfda615c6f78..86864c4e248f 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2000,7 +2000,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
/* deliver to local stack */
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
- if (rx->local->napi)
+ if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
+ rx->local->napi)
napi_gro_receive(rx->local->napi, skb);
else
netif_receive_skb(skb);
@@ -3070,7 +3071,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
/* This is OK -- must be QoS data frame */
.security_idx = tid,
.seqno_idx = tid,
- .flags = 0,
+ .flags = IEEE80211_RX_REORDER_TIMER,
};
struct tid_ampdu_rx *tid_agg_rx;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 9181fb6d6437..837b7b1bb310 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
- if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
- skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
+ if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
return NULL;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -169,6 +168,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
size_t len;
u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
+ if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
+ return -1;
+
iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
if (!iv)
return -1;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 79c965a51ab2..c0a418766f9c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1599,13 +1599,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
if (data == NULL)
return NULL;
- skb = build_skb(data, size);
+ skb = __build_skb(data, size);
if (skb == NULL)
vfree(data);
- else {
- skb->head_frag = 0;
+ else
skb->destructor = netlink_skb_destructor;
- }
return skb;
}
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 1ec19f6f0c2b..eeeba5adee6d 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
{
u32 value_follows;
int err;
+ struct page *scratch;
+
+ scratch = alloc_page(GFP_KERNEL);
+ if (!scratch)
+ return -ENOMEM;
+ xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
/* res->status */
err = gssx_dec_status(xdr, &res->status);
if (err)
- return err;
+ goto out_free;
/* res->context_handle */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
- return err;
+ goto out_free;
if (value_follows) {
err = gssx_dec_ctx(xdr, res->context_handle);
if (err)
- return err;
+ goto out_free;
} else {
res->context_handle = NULL;
}
@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
/* res->output_token */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
- return err;
+ goto out_free;
if (value_follows) {
err = gssx_dec_buffer(xdr, res->output_token);
if (err)
- return err;
+ goto out_free;
} else {
res->output_token = NULL;
}
@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
/* res->delegated_cred_handle */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
- return err;
+ goto out_free;
if (value_follows) {
/* we do not support upcall servers sending this data. */
- return -EINVAL;
+ err = -EINVAL;
+ goto out_free;
}
/* res->options */
err = gssx_dec_option_array(xdr, &res->options);
+out_free:
+ __free_page(scratch);
return err;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 88bf289abdc9..f62fd2c5d836 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2264,11 +2264,9 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
* have the xfrm_state's. We need to wait for KM to
* negotiate new SA's or bail out with error.*/
if (net->xfrm.sysctl_larval_drop) {
- dst_release(dst);
- xfrm_pols_put(pols, drop_pols);
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
-
- return ERR_PTR(-EREMOTE);
+ err = -EREMOTE;
+ goto error;
}
err = -EAGAIN;
@@ -2319,7 +2317,8 @@ nopol:
error:
dst_release(dst);
dropdst:
- dst_release(dst_orig);
+ if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
+ dst_release(dst_orig);
xfrm_pols_put(pols, drop_pols);
return ERR_PTR(err);
}
@@ -2333,7 +2332,8 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
struct sock *sk, int flags)
{
struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
- flags | XFRM_LOOKUP_QUEUE);
+ flags | XFRM_LOOKUP_QUEUE |
+ XFRM_LOOKUP_KEEP_DST_REF);
if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
return make_blackhole(net, dst_orig->ops->family, dst_orig);