aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 18:02:55 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 18:02:55 -0800
commitb6669737d3db7df79fad07180837c23dbe581db5 (patch)
tree671a9d13fe5ab00d6a3c7c5c5c466802ca96d38a /net
parent1cf0209c431fa7790253c532039d53b0773193aa (diff)
parentdc107402ae06286a9ed33c32daf3f35514a7cb8d (diff)
Merge branch 'for-3.9' of git://linux-nfs.org/~bfields/linux
Pull nfsd changes from J Bruce Fields: "Miscellaneous bugfixes, plus: - An overhaul of the DRC cache by Jeff Layton. The main effect is just to make it larger. This decreases the chances of intermittent errors especially in the UDP case. But we'll need to watch for any reports of performance regressions. - Containerized nfsd: with some limitations, we now support per-container nfs-service, thanks to extensive work from Stanislav Kinsbursky over the last year." Some notes about conflicts, since there were *two* non-data semantic conflicts here: - idr_remove_all() had been added by a memory leak fix, but has since become deprecated since idr_destroy() does it for us now. - xs_local_connect() had been added by this branch to make AF_LOCAL connections be synchronous, but in the meantime Trond had changed the calling convention in order to avoid a RCU dereference. There were a couple of more obvious actual source-level conflicts due to the hlist traversal changes and one just due to code changes next to each other, but those were trivial. * 'for-3.9' of git://linux-nfs.org/~bfields/linux: (49 commits) SUNRPC: make AF_LOCAL connect synchronous nfsd: fix compiler warning about ambiguous types in nfsd_cache_csum svcrpc: fix rpc server shutdown races svcrpc: make svc_age_temp_xprts enqueue under sv_lock lockd: nlmclnt_reclaim(): avoid stack overflow nfsd: enable NFSv4 state in containers nfsd: disable usermode helper client tracker in container nfsd: use proper net while reading "exports" file nfsd: containerize NFSd filesystem nfsd: fix comments on nfsd_cache_lookup SUNRPC: move cache_detail->cache_request callback call to cache_read() SUNRPC: remove "cache_request" argument in sunrpc_cache_pipe_upcall() function SUNRPC: rework cache upcall logic SUNRPC: introduce cache_detail->cache_request callback NFS: simplify and clean cache library NFS: use SUNRPC cache creation and destruction helper for DNS cache nfsd4: free_stid can be static nfsd: keep a checksum of the first 256 bytes of request sunrpc: trim off trailing checksum before returning decrypted or integrity authenticated buffer sunrpc: fix comment in struct xdr_buf definition ...
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/addr.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c18
-rw-r--r--net/sunrpc/cache.c51
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/rpcb_clnt.c1
-rw-r--r--net/sunrpc/svc.c13
-rw-r--r--net/sunrpc/svc_xprt.c81
-rw-r--r--net/sunrpc/svcauth_unix.c16
-rw-r--r--net/sunrpc/xdr.c41
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtsock.c35
12 files changed, 162 insertions, 101 deletions
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index d11418f97f1..a622ad64acd 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -17,7 +17,8 @@
*/
#include <net/ipv6.h>
-#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
+#include <linux/sunrpc/msg_prot.h>
#include <linux/slab.h>
#include <linux/export.h>
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 107c4528654..88edec929d7 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -574,6 +574,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+ /* Trim off the checksum blob */
+ xdr_buf_trim(buf, GSS_KRB5_TOK_HDR_LEN + tailskip);
return GSS_S_COMPLETE;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index ecd1d58bf61..f7d34e7b6f8 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -182,12 +182,6 @@ static void rsi_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
-static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
-}
-
-
static int rsi_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
@@ -275,7 +269,7 @@ static struct cache_detail rsi_cache_template = {
.hash_size = RSI_HASHMAX,
.name = "auth.rpcsec.init",
.cache_put = rsi_put,
- .cache_upcall = rsi_upcall,
+ .cache_request = rsi_request,
.cache_parse = rsi_parse,
.match = rsi_match,
.init = rsi_init,
@@ -825,13 +819,17 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
* The server uses base of head iovec as read pointer, while the
* client uses separate pointer. */
static int
-unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
+unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
{
int stat = -EINVAL;
u32 integ_len, maj_stat;
struct xdr_netobj mic;
struct xdr_buf integ_buf;
+ /* Did we already verify the signature on the original pass through? */
+ if (rqstp->rq_deferred)
+ return 0;
+
integ_len = svc_getnl(&buf->head[0]);
if (integ_len & 3)
return stat;
@@ -854,6 +852,8 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
goto out;
if (svc_getnl(&buf->head[0]) != seq)
goto out;
+ /* trim off the mic at the end before returning */
+ xdr_buf_trim(buf, mic.len + 4);
stat = 0;
out:
kfree(mic.data);
@@ -1198,7 +1198,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
/* placeholders for length and seq. number: */
svc_putnl(resv, 0);
svc_putnl(resv, 0);
- if (unwrap_integ_data(&rqstp->rq_arg,
+ if (unwrap_integ_data(rqstp, &rqstp->rq_arg,
gc->gc_seq, rsci->mechctx))
goto garbage_args;
break;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 39a4112faf5..25d58e76601 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -196,9 +196,9 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_update);
static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
{
- if (!cd->cache_upcall)
- return -EINVAL;
- return cd->cache_upcall(cd, h);
+ if (cd->cache_upcall)
+ return cd->cache_upcall(cd, h);
+ return sunrpc_cache_pipe_upcall(cd, h);
}
static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
@@ -750,6 +750,18 @@ struct cache_reader {
int offset; /* if non-0, we have a refcnt on next request */
};
+static int cache_request(struct cache_detail *detail,
+ struct cache_request *crq)
+{
+ char *bp = crq->buf;
+ int len = PAGE_SIZE;
+
+ detail->cache_request(detail, crq->item, &bp, &len);
+ if (len < 0)
+ return -EAGAIN;
+ return PAGE_SIZE - len;
+}
+
static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
loff_t *ppos, struct cache_detail *cd)
{
@@ -784,6 +796,13 @@ static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
rq->readers++;
spin_unlock(&queue_lock);
+ if (rq->len == 0) {
+ err = cache_request(cd, rq);
+ if (err < 0)
+ goto out;
+ rq->len = err;
+ }
+
if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
err = -EAGAIN;
spin_lock(&queue_lock);
@@ -1140,17 +1159,14 @@ static bool cache_listeners_exist(struct cache_detail *detail)
*
* Each request is at most one page long.
*/
-int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
- void (*cache_request)(struct cache_detail *,
- struct cache_head *,
- char **,
- int *))
+int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
{
char *buf;
struct cache_request *crq;
- char *bp;
- int len;
+
+ if (!detail->cache_request)
+ return -EINVAL;
if (!cache_listeners_exist(detail)) {
warn_no_listener(detail);
@@ -1167,19 +1183,10 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
return -EAGAIN;
}
- bp = buf; len = PAGE_SIZE;
-
- cache_request(detail, h, &bp, &len);
-
- if (len < 0) {
- kfree(buf);
- kfree(crq);
- return -EAGAIN;
- }
crq->q.reader = 0;
crq->item = cache_get(h);
crq->buf = buf;
- crq->len = PAGE_SIZE - len;
+ crq->len = 0;
crq->readers = 0;
spin_lock(&queue_lock);
list_add_tail(&crq->q.list, &detail->queue);
@@ -1605,7 +1612,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
if (p == NULL)
goto out_nomem;
- if (cd->cache_upcall || cd->cache_parse) {
+ if (cd->cache_request || cd->cache_parse) {
p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
cd->u.procfs.proc_ent,
&cache_file_operations_procfs, cd);
@@ -1614,7 +1621,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
goto out_nomem;
}
if (cd->cache_show) {
- p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
+ p = proc_create_data("content", S_IFREG|S_IRUSR,
cd->u.procfs.proc_ent,
&content_file_operations_procfs, cd);
cd->u.procfs.content_ent = p;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index a9f7906c1a6..d7a369e6108 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -33,6 +33,7 @@
#include <linux/rcupdate.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/metrics.h>
#include <linux/sunrpc/bc_xprt.h>
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 795a0f4e920..3df764dc330 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -26,6 +26,7 @@
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprtsock.h>
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index dbf12ac5ecb..89a588b4478 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled);
void svc_shutdown_net(struct svc_serv *serv, struct net *net)
{
- /*
- * The set of xprts (contained in the sv_tempsocks and
- * sv_permsocks lists) is now constant, since it is modified
- * only by accepting new sockets (done by service threads in
- * svc_recv) or aging old ones (done by sv_temptimer), or
- * configuration changes (excluded by whatever locking the
- * caller is using--nfsd_mutex in the case of nfsd). So it's
- * safe to traverse those lists and shut everything down:
- */
svc_close_net(serv, net);
if (serv->sv_shutdown)
@@ -1042,6 +1033,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
/*
* dprintk the given error with the address of the client that caused it.
*/
+#ifdef RPC_DEBUG
static __printf(2, 3)
void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
{
@@ -1058,6 +1050,9 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
va_end(args);
}
+#else
+static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
+#endif
/*
* Common routine for processing the RPC request.
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b8e47fac731..80a6640f329 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -499,7 +499,8 @@ void svc_wake_up(struct svc_serv *serv)
rqstp->rq_xprt = NULL;
*/
wake_up(&rqstp->rq_wait);
- }
+ } else
+ pool->sp_task_pending = 1;
spin_unlock_bh(&pool->sp_lock);
}
}
@@ -634,7 +635,13 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
* long for cache updates.
*/
rqstp->rq_chandle.thread_wait = 1*HZ;
+ pool->sp_task_pending = 0;
} else {
+ if (pool->sp_task_pending) {
+ pool->sp_task_pending = 0;
+ spin_unlock_bh(&pool->sp_lock);
+ return ERR_PTR(-EAGAIN);
+ }
/* No data pending. Go to sleep */
svc_thread_enqueue(pool, rqstp);
@@ -856,7 +863,6 @@ static void svc_age_temp_xprts(unsigned long closure)
struct svc_serv *serv = (struct svc_serv *)closure;
struct svc_xprt *xprt;
struct list_head *le, *next;
- LIST_HEAD(to_be_aged);
dprintk("svc_age_temp_xprts\n");
@@ -877,25 +883,15 @@ static void svc_age_temp_xprts(unsigned long closure)
if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
test_bit(XPT_BUSY, &xprt->xpt_flags))
continue;
- svc_xprt_get(xprt);
- list_move(le, &to_be_aged);
+ list_del_init(le);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
set_bit(XPT_DETACHED, &xprt->xpt_flags);
- }
- spin_unlock_bh(&serv->sv_lock);
-
- while (!list_empty(&to_be_aged)) {
- le = to_be_aged.next;
- /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
- list_del_init(le);
- xprt = list_entry(le, struct svc_xprt, xpt_list);
-
dprintk("queuing xprt %p for closing\n", xprt);
/* a thread will dequeue and close it soon */
svc_xprt_enqueue(xprt);
- svc_xprt_put(xprt);
}
+ spin_unlock_bh(&serv->sv_lock);
mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
}
@@ -959,21 +955,24 @@ void svc_close_xprt(struct svc_xprt *xprt)
}
EXPORT_SYMBOL_GPL(svc_close_xprt);
-static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
+static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
{
struct svc_xprt *xprt;
+ int ret = 0;
spin_lock(&serv->sv_lock);
list_for_each_entry(xprt, xprt_list, xpt_list) {
if (xprt->xpt_net != net)
continue;
+ ret++;
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- set_bit(XPT_BUSY, &xprt->xpt_flags);
+ svc_xprt_enqueue(xprt);
}
spin_unlock(&serv->sv_lock);
+ return ret;
}
-static void svc_clear_pools(struct svc_serv *serv, struct net *net)
+static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
{
struct svc_pool *pool;
struct svc_xprt *xprt;
@@ -988,42 +987,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net)
if (xprt->xpt_net != net)
continue;
list_del_init(&xprt->xpt_ready);
+ spin_unlock_bh(&pool->sp_lock);
+ return xprt;
}
spin_unlock_bh(&pool->sp_lock);
}
+ return NULL;
}
-static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
+static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
{
struct svc_xprt *xprt;
- struct svc_xprt *tmp;
- LIST_HEAD(victims);
- spin_lock(&serv->sv_lock);
- list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
- if (xprt->xpt_net != net)
- continue;
- list_move(&xprt->xpt_list, &victims);
- }
- spin_unlock(&serv->sv_lock);
-
- list_for_each_entry_safe(xprt, tmp, &victims, xpt_list)
+ while ((xprt = svc_dequeue_net(serv, net))) {
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
svc_delete_xprt(xprt);
+ }
}
+/*
+ * Server threads may still be running (especially in the case where the
+ * service is still running in other network namespaces).
+ *
+ * So we shut down sockets the same way we would on a running server, by
+ * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
+ * the close. In the case there are no such other threads,
+ * threads running, svc_clean_up_xprts() does a simple version of a
+ * server's main event loop, and in the case where there are other
+ * threads, we may need to wait a little while and then check again to
+ * see if they're done.
+ */
void svc_close_net(struct svc_serv *serv, struct net *net)
{
- svc_close_list(serv, &serv->sv_tempsocks, net);
- svc_close_list(serv, &serv->sv_permsocks, net);
+ int delay = 0;
- svc_clear_pools(serv, net);
- /*
- * At this point the sp_sockets lists will stay empty, since
- * svc_xprt_enqueue will not add new entries without taking the
- * sp_lock and checking XPT_BUSY.
- */
- svc_clear_list(serv, &serv->sv_tempsocks, net);
- svc_clear_list(serv, &serv->sv_permsocks, net);
+ while (svc_close_list(serv, &serv->sv_permsocks, net) +
+ svc_close_list(serv, &serv->sv_tempsocks, net)) {
+
+ svc_clean_up_xprts(serv, net);
+ msleep(delay++);
+ }
}
/*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index a1852e19ed0..c3f9e1ef7f5 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -6,6 +6,7 @@
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/addr.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <linux/hash.h>
@@ -17,7 +18,6 @@
#include <linux/user_namespace.h>
#define RPCDBG_FACILITY RPCDBG_AUTH
-#include <linux/sunrpc/clnt.h>
#include "netns.h"
@@ -157,11 +157,6 @@ static void ip_map_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
-static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
-}
-
static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
@@ -475,11 +470,6 @@ static void unix_gid_request(struct cache_detail *cd,
(*bpp)[-1] = '\n';
}
-static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
-}
-
static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
static int unix_gid_parse(struct cache_detail *cd,
@@ -586,7 +576,7 @@ static struct cache_detail unix_gid_cache_template = {
.hash_size = GID_HASHMAX,
.name = "auth.unix.gid",
.cache_put = unix_gid_put,
- .cache_upcall = unix_gid_upcall,
+ .cache_request = unix_gid_request,
.cache_parse = unix_gid_parse,
.cache_show = unix_gid_show,
.match = unix_gid_match,
@@ -885,7 +875,7 @@ static struct cache_detail ip_map_cache_template = {
.hash_size = IP_HASHMAX,
.name = "auth.unix.ip",
.cache_put = ip_map_put,
- .cache_upcall = ip_map_upcall,
+ .cache_request = ip_map_request,
.cache_parse = ip_map_parse,
.cache_show = ip_map_show,
.match = ip_map_match,
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 56055632f15..75edcfad6e2 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -879,6 +879,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
}
EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
+/**
+ * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
+ * @buf: buf to be trimmed
+ * @len: number of bytes to reduce "buf" by
+ *
+ * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
+ * that it's possible that we'll trim less than that amount if the xdr_buf is
+ * too small, or if (for instance) it's all in the head and the parser has
+ * already read too far into it.
+ */
+void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
+{
+ size_t cur;
+ unsigned int trim = len;
+
+ if (buf->tail[0].iov_len) {
+ cur = min_t(size_t, buf->tail[0].iov_len, trim);
+ buf->tail[0].iov_len -= cur;
+ trim -= cur;
+ if (!trim)
+ goto fix_len;
+ }
+
+ if (buf->page_len) {
+ cur = min_t(unsigned int, buf->page_len, trim);
+ buf->page_len -= cur;
+ trim -= cur;
+ if (!trim)
+ goto fix_len;
+ }
+
+ if (buf->head[0].iov_len) {
+ cur = min_t(size_t, buf->head[0].iov_len, trim);
+ buf->head[0].iov_len -= cur;
+ trim -= cur;
+ }
+fix_len:
+ buf->len -= (len - trim);
+}
+EXPORT_SYMBOL_GPL(xdr_buf_trim);
+
static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
{
unsigned int this_len;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index d0074289708..794312f22b9 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -51,6 +51,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
+#include <linux/sunrpc/addr.h>
#include "xprt_rdma.h"
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 37cbda63f45..c1d8476b769 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -33,6 +33,7 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprtsock.h>
@@ -1867,13 +1868,9 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
* @xprt: RPC transport to connect
* @transport: socket transport to connect
* @create_sock: function to create a socket of the correct type
- *
- * Invoked by a work queue tasklet.
*/
-static void xs_local_setup_socket(struct work_struct *work)
+static int xs_local_setup_socket(struct sock_xprt *transport)
{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
struct rpc_xprt *xprt = &transport->xprt;
struct socket *sock;
int status = -EIO;
@@ -1918,6 +1915,30 @@ out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
current->flags &= ~PF_FSTRANS;
+ return status;
+}
+
+static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ int ret;
+
+ if (RPC_IS_ASYNC(task)) {
+ /*
+ * We want the AF_LOCAL connect to be resolved in the
+ * filesystem namespace of the process making the rpc
+ * call. Thus we connect synchronously.
+ *
+ * If we want to support asynchronous AF_LOCAL calls,
+ * we'll need to figure out how to pass a namespace to
+ * connect.
+ */
+ rpc_exit(task, -ENOTCONN);
+ return;
+ }
+ ret = xs_local_setup_socket(transport);
+ if (ret && !RPC_IS_SOFTCONN(task))
+ msleep_interruptible(15000);
}
#ifdef CONFIG_SUNRPC_SWAP
@@ -2455,7 +2476,7 @@ static struct rpc_xprt_ops xs_local_ops = {
.alloc_slot = xprt_alloc_slot,
.rpcbind = xs_local_rpcbind,
.set_port = xs_local_set_port,
- .connect = xs_connect,
+ .connect = xs_local_connect,
.buf_alloc = rpc_malloc,
.buf_free = rpc_free,
.send_request = xs_local_send_request,
@@ -2628,8 +2649,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
goto out_err;
}
xprt_set_bound(xprt);
- INIT_DELAYED_WORK(&transport->connect_worker,
- xs_local_setup_socket);
xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
break;
default: