From cec56c8ff5e28f58ff13041dca7853738ae577a1 Mon Sep 17 00:00:00 2001 From: Tom Tucker Date: Wed, 15 Feb 2012 11:30:00 -0600 Subject: svcrdma: Cleanup sparse warnings in the svcrdma module The svcrdma transport was un-marshalling requests in-place. This resulted in sparse warnings due to __beXX data containing both NBO and HBO data. The code has been restructured to do byte-swapping as the header is parsed instead of when the header is validated immediately after receipt. Also moved extern declarations for the workqueue and memory pools to the private header file. Signed-off-by: Tom Tucker Signed-off-by: J. Bruce Fields --- net/sunrpc/xprtrdma/svc_rdma_marshal.c | 66 +++++++++------------------------- 1 file changed, 16 insertions(+), 50 deletions(-) (limited to 'net/sunrpc/xprtrdma/svc_rdma_marshal.c') diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c index 9530ef2d40d..8d2edddf48c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c +++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c @@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend) struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; while (ch->rc_discrim != xdr_zero) { - u64 ch_offset; - if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) > (unsigned long)vaend) { dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch); return NULL; } - - ch->rc_discrim = ntohl(ch->rc_discrim); - ch->rc_position = ntohl(ch->rc_position); - ch->rc_target.rs_handle = ntohl(ch->rc_target.rs_handle); - ch->rc_target.rs_length = ntohl(ch->rc_target.rs_length); - va = (u32 *)&ch->rc_target.rs_offset; - xdr_decode_hyper(va, &ch_offset); - put_unaligned(ch_offset, (u64 *)va); ch++; } return (u32 *)&ch->rc_position; @@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch, *byte_count = 0; *ch_count = 0; for (; ch->rc_discrim != 0; ch++) { - *byte_count = *byte_count + ch->rc_target.rs_length; + *byte_count = *byte_count + ntohl(ch->rc_target.rs_length); *ch_count = *ch_count + 1; } } @@ -108,7 +98,8 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch, */ static u32 *decode_write_list(u32 *va, u32 *vaend) { - int ch_no; + int nchunks; + struct rpcrdma_write_array *ary = (struct rpcrdma_write_array *)va; @@ -121,37 +112,24 @@ static u32 *decode_write_list(u32 *va, u32 *vaend) dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); return NULL; } - ary->wc_discrim = ntohl(ary->wc_discrim); - ary->wc_nchunks = ntohl(ary->wc_nchunks); + nchunks = ntohl(ary->wc_nchunks); if (((unsigned long)&ary->wc_array[0] + - (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > (unsigned long)vaend) { dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", - ary, ary->wc_nchunks, vaend); + ary, nchunks, vaend); return NULL; } - for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) { - u64 ch_offset; - - ary->wc_array[ch_no].wc_target.rs_handle = - ntohl(ary->wc_array[ch_no].wc_target.rs_handle); - ary->wc_array[ch_no].wc_target.rs_length = - ntohl(ary->wc_array[ch_no].wc_target.rs_length); - va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset; - xdr_decode_hyper(va, &ch_offset); - put_unaligned(ch_offset, (u64 *)va); - } - /* * rs_length is the 2nd 4B field in wc_target and taking its * address skips the list terminator */ - return (u32 *)&ary->wc_array[ch_no].wc_target.rs_length; + return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length; } static u32 *decode_reply_array(u32 *va, u32 *vaend) { - int ch_no; + int nchunks; struct rpcrdma_write_array *ary = (struct rpcrdma_write_array *)va; @@ -164,28 +142,15 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend) dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); return NULL; } - ary->wc_discrim = ntohl(ary->wc_discrim); - ary->wc_nchunks = ntohl(ary->wc_nchunks); + nchunks = ntohl(ary->wc_nchunks); if (((unsigned long)&ary->wc_array[0] + - (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > (unsigned long)vaend) { dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", - ary, ary->wc_nchunks, vaend); + ary, nchunks, vaend); return NULL; } - for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) { - u64 ch_offset; - - ary->wc_array[ch_no].wc_target.rs_handle = - ntohl(ary->wc_array[ch_no].wc_target.rs_handle); - ary->wc_array[ch_no].wc_target.rs_length = - ntohl(ary->wc_array[ch_no].wc_target.rs_length); - va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset; - xdr_decode_hyper(va, &ch_offset); - put_unaligned(ch_offset, (u64 *)va); - } - - return (u32 *)&ary->wc_array[ch_no]; + return (u32 *)&ary->wc_array[nchunks]; } int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req, @@ -386,13 +351,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary, void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, int chunk_no, - u32 rs_handle, u64 rs_offset, + __be32 rs_handle, + __be64 rs_offset, u32 write_len) { struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target; - seg->rs_handle = htonl(rs_handle); + seg->rs_handle = rs_handle; + seg->rs_offset = rs_offset; seg->rs_length = htonl(write_len); - xdr_encode_hyper((u32 *) &seg->rs_offset, rs_offset); } void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt, -- cgit v1.2.3