From 725f2865d4df31ac0768b13ae763beadc4bb8ce9 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:46 -0400 Subject: gss_krb5: Introduce encryption type framework Make the client and server code consistent regarding the extra buffer space made available for the auth code when wrapping data. Add some comments/documentation about the available buffer space in the xdr_buf head and tail when gss_wrap is called. Add a compile-time check to make sure we are not exceeding the available buffer space. Add a central function to shift head data. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 2 +- net/sunrpc/auth_gss/gss_krb5_crypto.c | 38 +++++++++++++++++++++++++++++++++++ net/sunrpc/auth_gss/gss_krb5_wrap.c | 6 ++---- 3 files changed, 41 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index c389ccf6437..75602ece58e 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -61,7 +61,7 @@ static const struct rpc_credops gss_nullops; # define RPCDBG_FACILITY RPCDBG_AUTH #endif -#define GSS_CRED_SLACK 1024 +#define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) /* length of a krb5 verifier (48), plus data added before arguments when * using integrity (two 4-byte integers): */ #define GSS_VERF_SLACK 100 diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index e9b63617668..746b3e139ae 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -325,3 +325,41 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); } + +/* + * This function makes the assumption that it was ultimately called + * from gss_wrap(). + * + * The client auth_gss code moves any existing tail data into a + * separate page before calling gss_wrap. + * The server svcauth_gss code ensures that both the head and the + * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. + * + * Even with that guarantee, this function may be called more than + * once in the processing of gss_wrap(). The best we can do is + * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the + * largest expected shift will fit within RPC_MAX_AUTH_SIZE. + * At run-time we can verify that a single invocation of this + * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. + */ + +int +xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) +{ + u8 *p; + + if (shiftlen == 0) + return 0; + + BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); + BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); + + p = buf->head[0].iov_base + base; + + memmove(p + shiftlen, p, buf->head[0].iov_len - base); + + buf->head[0].iov_len += shiftlen; + buf->len += shiftlen; + + return 0; +} diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index a6e905637e0..496281fabb9 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -155,11 +155,9 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, ptr = buf->head[0].iov_base + offset; /* shift data to make room for header. */ + xdr_extend_head(buf, offset, headlen); + /* XXX Would be cleverer to encrypt while copying. */ - /* XXX bounds checking, slack, etc. */ - memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); - buf->head[0].iov_len += headlen; - buf->len += headlen; BUG_ON((buf->len - offset - headlen) % blocksize); g_make_token_header(&kctx->mech_used, -- cgit v1.2.3 From 7561042fb7870be0b4ee57efddce68bda8968abf Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:47 -0400 Subject: gss_krb5: Added and improved code comments Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 12 +++++++++--- net/sunrpc/auth_gss/gss_mech_switch.c | 14 ++++++++++++++ net/sunrpc/auth_gss/svcauth_gss.c | 15 +++++++++++++++ 3 files changed, 38 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 75602ece58e..d64a58b8ed3 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1316,15 +1316,21 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, inpages = snd_buf->pages + first; snd_buf->pages = rqstp->rq_enc_pages; snd_buf->page_base -= first << PAGE_CACHE_SHIFT; - /* Give the tail its own page, in case we need extra space in the - * head when wrapping: */ + /* + * Give the tail its own page, in case we need extra space in the + * head when wrapping: + * + * call_allocate() allocates twice the slack space required + * by the authentication flavor to rq_callsize. + * For GSS, slack is GSS_CRED_SLACK. + */ if (snd_buf->page_len || snd_buf->tail[0].iov_len) { tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); snd_buf->tail[0].iov_base = tmp; } maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); - /* RPC_SLACK_SPACE should prevent this ever happening: */ + /* slack space should prevent this ever happening: */ BUG_ON(snd_buf->len > snd_buf->buflen); status = -EIO; /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 76e4c6f4ac3..28a84ef41d1 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -285,6 +285,20 @@ gss_verify_mic(struct gss_ctx *context_handle, mic_token); } +/* + * This function is called from both the client and server code. + * Each makes guarantees about how much "slack" space is available + * for the underlying function in "buf"'s head and tail while + * performing the wrap. + * + * The client and server code allocate RPC_MAX_AUTH_SIZE extra + * space in both the head and tail which is available for use by + * the wrap function. + * + * Underlying functions should verify they do not use more than + * RPC_MAX_AUTH_SIZE of extra space in either the head or tail + * when performing the wrap. + */ u32 gss_wrap(struct gss_ctx *ctx_id, int offset, diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index b81e790ef9f..1d9ac4ac818 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1315,6 +1315,14 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp) inpages = resbuf->pages; /* XXX: Would be better to write some xdr helper functions for * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ + + /* + * If there is currently tail data, make sure there is + * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in + * the page, and move the current tail data such that + * there is RPC_MAX_AUTH_SIZE slack space available in + * both the head and tail. + */ if (resbuf->tail[0].iov_base) { BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base + PAGE_SIZE); @@ -1327,6 +1335,13 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp) resbuf->tail[0].iov_len); resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; } + /* + * If there is no current tail data, make sure there is + * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the + * allotted page, and set up tail information such that there + * is RPC_MAX_AUTH_SIZE slack space available in both the + * head and tail. + */ if (resbuf->tail[0].iov_base == NULL) { if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) return -ENOMEM; -- cgit v1.2.3 From 54ec3d462f3c2a3fe48a7bd592160bee31360087 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Wed, 17 Mar 2010 13:02:48 -0400 Subject: gss_krb5: Don't expect blocksize to always be 8 when calculating padding Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_wrap.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 496281fabb9..5d6c3b12ea7 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -12,10 +12,7 @@ static inline int gss_krb5_padding(int blocksize, int length) { - /* Most of the code is block-size independent but currently we - * use only 8: */ - BUG_ON(blocksize != 8); - return 8 - (length & 7); + return blocksize - (length % blocksize); } static inline void -- cgit v1.2.3 From 1ac3719a2214c545c7e19d34e272a148ca9a24f1 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:49 -0400 Subject: gss_krb5: split up functions in preparation of adding new enctypes Add encryption type to the krb5 context structure and use it to switch to the correct functions depending on the encryption type. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_mech.c | 1 + net/sunrpc/auth_gss/gss_krb5_seal.c | 20 +++++++++++++++--- net/sunrpc/auth_gss/gss_krb5_unseal.c | 21 ++++++++++++++++--- net/sunrpc/auth_gss/gss_krb5_wrap.c | 38 +++++++++++++++++++++++++++++------ 4 files changed, 68 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 2deb0ed72ff..0cd940e897e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -139,6 +139,7 @@ gss_import_sec_context_kerberos(const void *p, p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) goto out_err_free_ctx; + ctx->enctype = ENCTYPE_DES_CBC_RAW; /* The downcall format was designed before we completely understood * the uses of the context fields; so it includes some stuff we * just give some minimal sanity-checking, and some we ignore diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 88fe6e75ed7..71c2014e7eb 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -70,11 +70,10 @@ DEFINE_SPINLOCK(krb5_seq_lock); -u32 -gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, +static u32 +gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) { - struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; char cksumdata[16]; struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; unsigned char *ptr, *msg_start; @@ -120,3 +119,18 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } + +u32 +gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, + struct xdr_netobj *token) +{ + struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; + + switch (ctx->enctype) { + default: + BUG(); + case ENCTYPE_DES_CBC_RAW: + return gss_get_mic_v1(ctx, text, token); + } +} + diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index ce6c247edad..069d4b59807 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -70,11 +70,10 @@ /* read_token is a mic token, and message_buffer is the data that the mic was * supposedly taken over. */ -u32 -gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, +static u32 +gss_verify_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *message_buffer, struct xdr_netobj *read_token) { - struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; int signalg; int sealalg; char cksumdata[16]; @@ -135,3 +134,19 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, return GSS_S_COMPLETE; } + +u32 +gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, + struct xdr_buf *message_buffer, + struct xdr_netobj *read_token) +{ + struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; + + switch (ctx->enctype) { + default: + BUG(); + case ENCTYPE_DES_CBC_RAW: + return gss_verify_mic_v1(ctx, message_buffer, read_token); + } +} + diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 5d6c3b12ea7..b45b59b17ae 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -124,11 +124,10 @@ make_confounder(char *p, u32 conflen) /* XXX factor out common code with seal/unseal. */ -u32 -gss_wrap_kerberos(struct gss_ctx *ctx, int offset, +static u32 +gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf, struct page **pages) { - struct krb5_ctx *kctx = ctx->internal_ctx_id; char cksumdata[16]; struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; int blocksize = 0, plainlen; @@ -203,10 +202,9 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } -u32 -gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) +static u32 +gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) { - struct krb5_ctx *kctx = ctx->internal_ctx_id; int signalg; int sealalg; char cksumdata[16]; @@ -294,3 +292,31 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) return GSS_S_COMPLETE; } + +u32 +gss_wrap_kerberos(struct gss_ctx *gctx, int offset, + struct xdr_buf *buf, struct page **pages) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + + switch (kctx->enctype) { + default: + BUG(); + case ENCTYPE_DES_CBC_RAW: + return gss_wrap_kerberos_v1(kctx, offset, buf, pages); + } +} + +u32 +gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) +{ + struct krb5_ctx *kctx = gctx->internal_ctx_id; + + switch (kctx->enctype) { + default: + BUG(); + case ENCTYPE_DES_CBC_RAW: + return gss_unwrap_kerberos_v1(kctx, offset, buf); + } +} + -- cgit v1.2.3 From a8cc1cb7d7a12b0e2855832d10cfbfaffebfad6c Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:50 -0400 Subject: gss_krb5: prepare for new context format Prepare for new context format by splitting out the old "v1" context processing function Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_mech.c | 63 +++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 0cd940e897e..afe09108e1b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -123,53 +123,47 @@ out_err: } static int -gss_import_sec_context_kerberos(const void *p, - size_t len, - struct gss_ctx *ctx_id) +gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) { - const void *end = (const void *)((const char *)p + len); - struct krb5_ctx *ctx; int tmp; - if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) { - p = ERR_PTR(-ENOMEM); - goto out_err; - } - p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; + + /* Old format supports only DES! Any other enctype uses new format */ ctx->enctype = ENCTYPE_DES_CBC_RAW; + /* The downcall format was designed before we completely understood * the uses of the context fields; so it includes some stuff we * just give some minimal sanity-checking, and some we ignore * completely (like the next twenty bytes): */ if (unlikely(p + 20 > end || p + 20 < p)) - goto out_err_free_ctx; + goto out_err; p += 20; p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; if (tmp != SGN_ALG_DES_MAC_MD5) { p = ERR_PTR(-ENOSYS); - goto out_err_free_ctx; + goto out_err; } p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; if (tmp != SEAL_ALG_DES) { p = ERR_PTR(-ENOSYS); - goto out_err_free_ctx; + goto out_err; } p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; p = simple_get_netobj(p, end, &ctx->mech_used); if (IS_ERR(p)) - goto out_err_free_ctx; + goto out_err; p = get_key(p, end, &ctx->enc); if (IS_ERR(p)) goto out_err_free_mech; @@ -181,9 +175,6 @@ gss_import_sec_context_kerberos(const void *p, goto out_err_free_key2; } - ctx_id->internal_ctx_id = ctx; - - dprintk("RPC: Successfully imported new context.\n"); return 0; out_err_free_key2: @@ -192,12 +183,36 @@ out_err_free_key1: crypto_free_blkcipher(ctx->enc); out_err_free_mech: kfree(ctx->mech_used.data); -out_err_free_ctx: - kfree(ctx); out_err: return PTR_ERR(p); } +static int +gss_import_sec_context_kerberos(const void *p, size_t len, + struct gss_ctx *ctx_id) +{ + const void *end = (const void *)((const char *)p + len); + struct krb5_ctx *ctx; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (ctx == NULL) + return -ENOMEM; + + if (len == 85) + ret = gss_import_v1_context(p, end, ctx); + else + ret = -EINVAL; + + if (ret == 0) + ctx_id->internal_ctx_id = ctx; + else + kfree(ctx); + + dprintk("RPC: %s: returning %d\n", __func__, ret); + return ret; +} + static void gss_delete_sec_context_kerberos(void *internal_ctx) { struct krb5_ctx *kctx = internal_ctx; -- cgit v1.2.3 From 81d4a4333a1dfd6070f046265d928bb4c79aff88 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:51 -0400 Subject: gss_krb5: introduce encryption type framework Add enctype framework and change functions to use the generic values from it rather than the values hard-coded for des. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 18 +++---- net/sunrpc/auth_gss/gss_krb5_mech.c | 90 ++++++++++++++++++++++++++++------- net/sunrpc/auth_gss/gss_krb5_seal.c | 49 +++++++++++-------- net/sunrpc/auth_gss/gss_krb5_unseal.c | 15 +++--- net/sunrpc/auth_gss/gss_krb5_wrap.c | 79 ++++++++++++++++++++++-------- 5 files changed, 182 insertions(+), 69 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 746b3e139ae..ccd5236953f 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -1,7 +1,7 @@ /* * linux/net/sunrpc/gss_krb5_crypto.c * - * Copyright (c) 2000 The Regents of the University of Michigan. + * Copyright (c) 2000-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson @@ -58,13 +58,13 @@ krb5_encrypt( { u32 ret = -EINVAL; struct scatterlist sg[1]; - u8 local_iv[16] = {0}; + u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; - if (crypto_blkcipher_ivsize(tfm) > 16) { + if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; @@ -92,13 +92,13 @@ krb5_decrypt( { u32 ret = -EINVAL; struct scatterlist sg[1]; - u8 local_iv[16] = {0}; + u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; - if (crypto_blkcipher_ivsize(tfm) > 16) { + if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", crypto_blkcipher_ivsize(tfm)); goto out; @@ -157,7 +157,7 @@ out: } struct encryptor_desc { - u8 iv[8]; /* XXX hard-coded blocksize */ + u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; struct blkcipher_desc desc; int pos; struct xdr_buf *outbuf; @@ -198,7 +198,7 @@ encryptor(struct scatterlist *sg, void *data) desc->fraglen += sg->length; desc->pos += sg->length; - fraglen = thislen & 7; /* XXX hardcoded blocksize */ + fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); thislen -= fraglen; if (thislen == 0) @@ -256,7 +256,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, } struct decryptor_desc { - u8 iv[8]; /* XXX hard-coded blocksize */ + u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; struct blkcipher_desc desc; struct scatterlist frags[4]; int fragno; @@ -278,7 +278,7 @@ decryptor(struct scatterlist *sg, void *data) desc->fragno++; desc->fraglen += sg->length; - fraglen = thislen & 7; /* XXX hardcoded blocksize */ + fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); thislen -= fraglen; if (thislen == 0) diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index afe09108e1b..a66eb706aeb 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -1,7 +1,7 @@ /* * linux/net/sunrpc/gss_krb5_mech.c * - * Copyright (c) 2001 The Regents of the University of Michigan. + * Copyright (c) 2001-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson @@ -48,6 +48,50 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif +static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { + /* + * DES (All DES enctypes are mapped to the same gss functionality) + */ + { + .etype = ENCTYPE_DES_CBC_RAW, + .ctype = CKSUMTYPE_RSA_MD5, + .name = "des-cbc-crc", + .encrypt_name = "cbc(des)", + .cksum_name = "md5", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .signalg = SGN_ALG_DES_MAC_MD5, + .sealalg = SEAL_ALG_DES, + .keybytes = 7, + .keylength = 8, + .blocksize = 8, + .cksumlength = 8, + }, +}; + +static const int num_supported_enctypes = + ARRAY_SIZE(supported_gss_krb5_enctypes); + +static int +supported_gss_krb5_enctype(int etype) +{ + int i; + for (i = 0; i < num_supported_enctypes; i++) + if (supported_gss_krb5_enctypes[i].etype == etype) + return 1; + return 0; +} + +static const struct gss_krb5_enctype * +get_gss_krb5_enctype(int etype) +{ + int i; + for (i = 0; i < num_supported_enctypes; i++) + if (supported_gss_krb5_enctypes[i].etype == etype) + return &supported_gss_krb5_enctypes[i]; + return NULL; +} + static const void * simple_get_bytes(const void *p, const void *end, void *res, int len) { @@ -78,35 +122,45 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) } static inline const void * -get_key(const void *p, const void *end, struct crypto_blkcipher **res) +get_key(const void *p, const void *end, + struct krb5_ctx *ctx, struct crypto_blkcipher **res) { struct xdr_netobj key; int alg; - char *alg_name; p = simple_get_bytes(p, end, &alg, sizeof(alg)); if (IS_ERR(p)) goto out_err; + + switch (alg) { + case ENCTYPE_DES_CBC_CRC: + case ENCTYPE_DES_CBC_MD4: + case ENCTYPE_DES_CBC_MD5: + /* Map all these key types to ENCTYPE_DES_CBC_RAW */ + alg = ENCTYPE_DES_CBC_RAW; + break; + } + + if (!supported_gss_krb5_enctype(alg)) { + printk(KERN_WARNING "gss_kerberos_mech: unsupported " + "encryption key algorithm %d\n", alg); + goto out_err; + } p = simple_get_netobj(p, end, &key); if (IS_ERR(p)) goto out_err; - switch (alg) { - case ENCTYPE_DES_CBC_RAW: - alg_name = "cbc(des)"; - break; - default: - printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); - goto out_err_free_key; - } - *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); + *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); if (IS_ERR(*res)) { - printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); + printk(KERN_WARNING "gss_kerberos_mech: unable to initialize " + "crypto algorithm %s\n", ctx->gk5e->encrypt_name); *res = NULL; goto out_err_free_key; } if (crypto_blkcipher_setkey(*res, key.data, key.len)) { - printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); + printk(KERN_WARNING "gss_kerberos_mech: error setting key for " + "crypto algorithm %s\n", ctx->gk5e->encrypt_name); goto out_err_free_tfm; } @@ -134,6 +188,10 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) /* Old format supports only DES! Any other enctype uses new format */ ctx->enctype = ENCTYPE_DES_CBC_RAW; + ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); + if (ctx->gk5e == NULL) + goto out_err; + /* The downcall format was designed before we completely understood * the uses of the context fields; so it includes some stuff we * just give some minimal sanity-checking, and some we ignore @@ -164,10 +222,10 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) p = simple_get_netobj(p, end, &ctx->mech_used); if (IS_ERR(p)) goto out_err; - p = get_key(p, end, &ctx->enc); + p = get_key(p, end, ctx, &ctx->enc); if (IS_ERR(p)) goto out_err_free_mech; - p = get_key(p, end, &ctx->seq); + p = get_key(p, end, ctx, &ctx->seq); if (IS_ERR(p)) goto out_err_free_key1; if (p != end) { diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 71c2014e7eb..46c6f44e5c3 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -3,7 +3,7 @@ * * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5seal.c * - * Copyright (c) 2000 The Regents of the University of Michigan. + * Copyright (c) 2000-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson @@ -70,36 +70,47 @@ DEFINE_SPINLOCK(krb5_seq_lock); +static char * +setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) +{ + __be16 *ptr, *krb5_hdr; + int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; + + token->len = g_token_size(&ctx->mech_used, body_size); + + ptr = (__be16 *)token->data; + g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr); + + /* ptr now at start of header described in rfc 1964, section 1.2.1: */ + krb5_hdr = ptr; + *ptr++ = KG_TOK_MIC_MSG; + *ptr++ = cpu_to_le16(ctx->gk5e->signalg); + *ptr++ = SEAL_ALG_NONE; + *ptr++ = 0xffff; + + return (char *)krb5_hdr; +} + static u32 gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) { - char cksumdata[16]; - struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; - unsigned char *ptr, *msg_start; + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), + .data = cksumdata}; + void *ptr; s32 now; u32 seq_send; - dprintk("RPC: gss_krb5_seal\n"); + dprintk("RPC: %s\n", __func__); BUG_ON(ctx == NULL); now = get_seconds(); - token->len = g_token_size(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8); - - ptr = token->data; - g_make_token_header(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8, &ptr); - - /* ptr now at header described in rfc 1964, section 1.2.1: */ - ptr[0] = (unsigned char) ((KG_TOK_MIC_MSG >> 8) & 0xff); - ptr[1] = (unsigned char) (KG_TOK_MIC_MSG & 0xff); - - msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8; - - *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); - memset(ptr + 4, 0xff, 4); + ptr = setup_token(ctx, token); - if (make_checksum("md5", ptr, 8, text, 0, &md5cksum)) + if (make_checksum((char *)ctx->gk5e->cksum_name, ptr, 8, + text, 0, &md5cksum)) return GSS_S_FAILURE; if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 069d4b59807..10ee641a39d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -3,7 +3,7 @@ * * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/k5unseal.c * - * Copyright (c) 2000 The Regents of the University of Michigan. + * Copyright (c) 2000-2008 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson @@ -76,8 +76,9 @@ gss_verify_mic_v1(struct krb5_ctx *ctx, { int signalg; int sealalg; - char cksumdata[16]; - struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), + .data = cksumdata}; s32 now; int direction; u32 seqnum; @@ -97,7 +98,7 @@ gss_verify_mic_v1(struct krb5_ctx *ctx, /* XXX sanity-check bodysize?? */ signalg = ptr[2] + (ptr[3] << 8); - if (signalg != SGN_ALG_DES_MAC_MD5) + if (signalg != ctx->gk5e->signalg) return GSS_S_DEFECTIVE_TOKEN; sealalg = ptr[4] + (ptr[5] << 8); @@ -107,13 +108,15 @@ gss_verify_mic_v1(struct krb5_ctx *ctx, if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) return GSS_S_DEFECTIVE_TOKEN; - if (make_checksum("md5", ptr, 8, message_buffer, 0, &md5cksum)) + if (make_checksum((char *)ctx->gk5e->cksum_name, ptr, 8, + message_buffer, 0, &md5cksum)) return GSS_S_FAILURE; if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16)) return GSS_S_FAILURE; - if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) + if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, + ctx->gk5e->cksumlength)) return GSS_S_BAD_SIG; /* it got through unscathed. Make sure the context is unexpired */ diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index b45b59b17ae..7188891bcc3 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -1,3 +1,33 @@ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + #include #include #include @@ -128,8 +158,9 @@ static u32 gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf, struct page **pages) { - char cksumdata[16]; - struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), + .data = cksumdata}; int blocksize = 0, plainlen; unsigned char *ptr, *msg_start; s32 now; @@ -137,7 +168,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct page **tmp_pages; u32 seq_send; - dprintk("RPC: gss_wrap_kerberos\n"); + dprintk("RPC: %s\n", __func__); now = get_seconds(); @@ -146,8 +177,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, BUG_ON((buf->len - offset) % blocksize); plainlen = blocksize + buf->len - offset; - headlen = g_token_size(&kctx->mech_used, 24 + plainlen) - - (buf->len - offset); + headlen = g_token_size(&kctx->mech_used, + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - + (buf->len - offset); ptr = buf->head[0].iov_base + offset; /* shift data to make room for header. */ @@ -157,25 +189,26 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, BUG_ON((buf->len - offset - headlen) % blocksize); g_make_token_header(&kctx->mech_used, - GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr); + GSS_KRB5_TOK_HDR_LEN + + kctx->gk5e->cksumlength + plainlen, &ptr); /* ptr now at header described in rfc 1964, section 1.2.1: */ ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); - msg_start = ptr + 24; + msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; - *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); + *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); memset(ptr + 4, 0xff, 4); - *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES); + *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); make_confounder(msg_start, blocksize); /* XXXJBF: UGH!: */ tmp_pages = buf->pages; buf->pages = pages; - if (make_checksum("md5", ptr, 8, buf, + if (make_checksum((char *)kctx->gk5e->cksum_name, ptr, 8, buf, offset + headlen - blocksize, &md5cksum)) return GSS_S_FAILURE; buf->pages = tmp_pages; @@ -207,8 +240,9 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) { int signalg; int sealalg; - char cksumdata[16]; - struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), + .data = cksumdata}; s32 now; int direction; s32 seqnum; @@ -217,6 +251,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) void *data_start, *orig_start; int data_len; int blocksize; + int crypt_offset; dprintk("RPC: gss_unwrap_kerberos\n"); @@ -234,22 +269,27 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) /* get the sign and seal algorithms */ signalg = ptr[2] + (ptr[3] << 8); - if (signalg != SGN_ALG_DES_MAC_MD5) + if (signalg != kctx->gk5e->signalg) return GSS_S_DEFECTIVE_TOKEN; sealalg = ptr[4] + (ptr[5] << 8); - if (sealalg != SEAL_ALG_DES) + if (sealalg != kctx->gk5e->sealalg) return GSS_S_DEFECTIVE_TOKEN; if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) return GSS_S_DEFECTIVE_TOKEN; - if (gss_decrypt_xdr_buf(kctx->enc, buf, - ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base)) + /* + * Data starts after token header and checksum. ptr points + * to the beginning of the token header + */ + crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - + (unsigned char *)buf->head[0].iov_base; + if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) return GSS_S_DEFECTIVE_TOKEN; - if (make_checksum("md5", ptr, 8, buf, - ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) + if (make_checksum((char *)kctx->gk5e->cksum_name, ptr, 8, buf, + crypt_offset, &md5cksum)) return GSS_S_FAILURE; if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, @@ -280,7 +320,8 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) * better to copy and encrypt at the same time. */ blocksize = crypto_blkcipher_blocksize(kctx->enc); - data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize; + data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + + blocksize; orig_start = buf->head[0].iov_base + offset; data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; memmove(orig_start, data_start, data_len); -- cgit v1.2.3 From e1f6c07b1160ef28e8754d12e6c03288dd9d5ca8 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:52 -0400 Subject: gss_krb5: add ability to have a keyed checksum (hmac) Encryption types besides DES may use a keyed checksum (hmac). Modify the make_checksum() function to allow for a key and take care of enctype-specific processing such as truncating the resulting hash. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 54 +++++++++++++++++++++++++++++------ net/sunrpc/auth_gss/gss_krb5_mech.c | 1 + net/sunrpc/auth_gss/gss_krb5_seal.c | 13 +++++---- net/sunrpc/auth_gss/gss_krb5_unseal.c | 13 +++++---- net/sunrpc/auth_gss/gss_krb5_wrap.c | 30 +++++++++++-------- 5 files changed, 80 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index ccd5236953f..cae04d7a45a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -123,21 +123,42 @@ checksummer(struct scatterlist *sg, void *data) return crypto_hash_update(desc, sg, sg->length); } -/* checksum the plaintext data and hdrlen bytes of the token header */ -s32 -make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, - int body_offset, struct xdr_netobj *cksum) +/* + * checksum the plaintext data and hdrlen bytes of the token header + * The checksum is performed over the first 8 bytes of the + * gss token header and then over the data body + */ +u32 +make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *cksumkey, + struct xdr_netobj *cksumout) { - struct hash_desc desc; /* XXX add to ctx? */ + struct hash_desc desc; struct scatterlist sg[1]; int err; + u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + unsigned int checksumlen; + + if (cksumout->len < kctx->gk5e->cksumlength) { + dprintk("%s: checksum buffer length, %u, too small for %s\n", + __func__, cksumout->len, kctx->gk5e->name); + return GSS_S_FAILURE; + } - desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); + desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) return GSS_S_FAILURE; - cksum->len = crypto_hash_digestsize(desc.tfm); desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + checksumlen = crypto_hash_digestsize(desc.tfm); + + if (cksumkey != NULL) { + err = crypto_hash_setkey(desc.tfm, cksumkey, + kctx->gk5e->keylength); + if (err) + goto out; + } + err = crypto_hash_init(&desc); if (err) goto out; @@ -149,8 +170,25 @@ make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, checksummer, &desc); if (err) goto out; - err = crypto_hash_final(&desc, cksum->data); + err = crypto_hash_final(&desc, checksumdata); + if (err) + goto out; + switch (kctx->gk5e->ctype) { + case CKSUMTYPE_RSA_MD5: + err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, + checksumdata, checksumlen); + if (err) + goto out; + memcpy(cksumout->data, + checksumdata + checksumlen - kctx->gk5e->cksumlength, + kctx->gk5e->cksumlength); + break; + default: + BUG(); + break; + } + cksumout->len = kctx->gk5e->cksumlength; out: crypto_free_hash(desc.tfm); return err ? GSS_S_FAILURE : 0; diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index a66eb706aeb..6f93f4752be 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -66,6 +66,7 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .keylength = 8, .blocksize = 8, .cksumlength = 8, + .keyed_cksum = 0, }, }; diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 46c6f44e5c3..cd512719092 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -101,6 +101,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, void *ptr; s32 now; u32 seq_send; + u8 *cksumkey; dprintk("RPC: %s\n", __func__); BUG_ON(ctx == NULL); @@ -109,15 +110,15 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, ptr = setup_token(ctx, token); - if (make_checksum((char *)ctx->gk5e->cksum_name, ptr, 8, - text, 0, &md5cksum)) - return GSS_S_FAILURE; + if (ctx->gk5e->keyed_cksum) + cksumkey = ctx->cksum; + else + cksumkey = NULL; - if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, - md5cksum.data, md5cksum.len)) + if (make_checksum(ctx, ptr, 8, text, 0, cksumkey, &md5cksum)) return GSS_S_FAILURE; - memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); + memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); spin_lock(&krb5_seq_lock); seq_send = ctx->seq_send++; diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 10ee641a39d..7515bffddf1 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -84,6 +84,7 @@ gss_verify_mic_v1(struct krb5_ctx *ctx, u32 seqnum; unsigned char *ptr = (unsigned char *)read_token->data; int bodysize; + u8 *cksumkey; dprintk("RPC: krb5_read_token\n"); @@ -108,14 +109,16 @@ gss_verify_mic_v1(struct krb5_ctx *ctx, if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) return GSS_S_DEFECTIVE_TOKEN; - if (make_checksum((char *)ctx->gk5e->cksum_name, ptr, 8, - message_buffer, 0, &md5cksum)) - return GSS_S_FAILURE; + if (ctx->gk5e->keyed_cksum) + cksumkey = ctx->cksum; + else + cksumkey = NULL; - if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16)) + if (make_checksum(ctx, ptr, 8, message_buffer, 0, + cksumkey, &md5cksum)) return GSS_S_FAILURE; - if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, + if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, ctx->gk5e->cksumlength)) return GSS_S_BAD_SIG; diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 7188891bcc3..2eb3046a84e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -167,6 +167,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int headlen; struct page **tmp_pages; u32 seq_send; + u8 *cksumkey; dprintk("RPC: %s\n", __func__); @@ -205,18 +206,20 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, make_confounder(msg_start, blocksize); + if (kctx->gk5e->keyed_cksum) + cksumkey = kctx->cksum; + else + cksumkey = NULL; + /* XXXJBF: UGH!: */ tmp_pages = buf->pages; buf->pages = pages; - if (make_checksum((char *)kctx->gk5e->cksum_name, ptr, 8, buf, - offset + headlen - blocksize, &md5cksum)) + if (make_checksum(kctx, ptr, 8, buf, offset + headlen - blocksize, + cksumkey, &md5cksum)) return GSS_S_FAILURE; buf->pages = tmp_pages; - if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, - md5cksum.data, md5cksum.len)) - return GSS_S_FAILURE; - memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); + memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); spin_lock(&krb5_seq_lock); seq_send = kctx->seq_send++; @@ -252,6 +255,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) int data_len; int blocksize; int crypt_offset; + u8 *cksumkey; dprintk("RPC: gss_unwrap_kerberos\n"); @@ -288,15 +292,17 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) return GSS_S_DEFECTIVE_TOKEN; - if (make_checksum((char *)kctx->gk5e->cksum_name, ptr, 8, buf, - crypt_offset, &md5cksum)) - return GSS_S_FAILURE; + if (kctx->gk5e->keyed_cksum) + cksumkey = kctx->cksum; + else + cksumkey = NULL; - if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, - md5cksum.data, md5cksum.len)) + if (make_checksum(kctx, ptr, 8, buf, crypt_offset, + cksumkey, &md5cksum)) return GSS_S_FAILURE; - if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) + if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, + kctx->gk5e->cksumlength)) return GSS_S_BAD_SIG; /* it got through unscathed. Make sure the context is unexpired */ -- cgit v1.2.3 From 4891f2d008e4343eedea39ba1fe74864f1d32be0 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:53 -0400 Subject: gss_krb5: import functionality to derive keys into the kernel Import the code to derive Kerberos keys from a base key into the kernel. This will allow us to change the format of the context information sent down from gssd to include only a single key. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/Makefile | 2 +- net/sunrpc/auth_gss/gss_krb5_keys.c | 252 ++++++++++++++++++++++++++++++++++++ net/sunrpc/auth_gss/gss_krb5_mech.c | 1 + 3 files changed, 254 insertions(+), 1 deletion(-) create mode 100644 net/sunrpc/auth_gss/gss_krb5_keys.c (limited to 'net') diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index 4de8bcf26fa..74a231735f6 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile @@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ - gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o + gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c new file mode 100644 index 00000000000..832ce901bf6 --- /dev/null +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -0,0 +1,252 @@ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + +/* + * Copyright (C) 1998 by the FundsXpress, INC. + * + * All rights reserved. + * + * Export of this software from the United States of America may require + * a specific license from the United States Government. It is the + * responsibility of any person or organization contemplating export to + * obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of FundsXpress. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. FundsXpress makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include +#include +#include +#include +#include + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_AUTH +#endif + +/* + * This is the n-fold function as described in rfc3961, sec 5.1 + * Taken from MIT Kerberos and modified. + */ + +static void krb5_nfold(u32 inbits, const u8 *in, + u32 outbits, u8 *out) +{ + int a, b, c, lcm; + int byte, i, msbit; + + /* the code below is more readable if I make these bytes + instead of bits */ + + inbits >>= 3; + outbits >>= 3; + + /* first compute lcm(n,k) */ + + a = outbits; + b = inbits; + + while (b != 0) { + c = b; + b = a%b; + a = c; + } + + lcm = outbits*inbits/a; + + /* now do the real work */ + + memset(out, 0, outbits); + byte = 0; + + /* this will end up cycling through k lcm(k,n)/k times, which + is correct */ + for (i = lcm-1; i >= 0; i--) { + /* compute the msbit in k which gets added into this byte */ + msbit = ( + /* first, start with the msbit in the first, + * unrotated byte */ + ((inbits << 3) - 1) + /* then, for each byte, shift to the right + * for each repetition */ + + (((inbits << 3) + 13) * (i/inbits)) + /* last, pick out the correct byte within + * that shifted repetition */ + + ((inbits - (i % inbits)) << 3) + ) % (inbits << 3); + + /* pull out the byte value itself */ + byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)| + (in[((inbits) - (msbit >> 3)) % inbits])) + >> ((msbit & 7) + 1)) & 0xff; + + /* do the addition */ + byte += out[i % outbits]; + out[i % outbits] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + + } + + /* if there's a carry bit left over, add it back in */ + if (byte) { + for (i = outbits - 1; i >= 0; i--) { + /* do the addition */ + byte += out[i]; + out[i] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + } + } +} + +/* + * This is the DK (derive_key) function as described in rfc3961, sec 5.1 + * Taken from MIT Kerberos and modified. + */ + +u32 krb5_derive_key(struct gss_krb5_enctype *gk5e, + const struct xdr_netobj *inkey, + struct xdr_netobj *outkey, + const struct xdr_netobj *in_constant) +{ + size_t blocksize, keybytes, keylength, n; + unsigned char *inblockdata, *outblockdata, *rawkey; + struct xdr_netobj inblock, outblock; + struct crypto_blkcipher *cipher; + u32 ret = EINVAL; + + blocksize = gk5e->blocksize; + keybytes = gk5e->keybytes; + keylength = gk5e->keylength; + + if ((inkey->len != keylength) || (outkey->len != keylength)) + goto err_return; + + cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + goto err_return; + if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len)) + goto err_return; + + /* allocate and set up buffers */ + + ret = ENOMEM; + inblockdata = kmalloc(blocksize, GFP_KERNEL); + if (inblockdata == NULL) + goto err_free_cipher; + + outblockdata = kmalloc(blocksize, GFP_KERNEL); + if (outblockdata == NULL) + goto err_free_in; + + rawkey = kmalloc(keybytes, GFP_KERNEL); + if (rawkey == NULL) + goto err_free_out; + + inblock.data = (char *) inblockdata; + inblock.len = blocksize; + + outblock.data = (char *) outblockdata; + outblock.len = blocksize; + + /* initialize the input block */ + + if (in_constant->len == inblock.len) { + memcpy(inblock.data, in_constant->data, inblock.len); + } else { + krb5_nfold(in_constant->len * 8, in_constant->data, + inblock.len * 8, inblock.data); + } + + /* loop encrypting the blocks until enough key bytes are generated */ + + n = 0; + while (n < keybytes) { + (*(gk5e->encrypt))(cipher, NULL, inblock.data, + outblock.data, inblock.len); + + if ((keybytes - n) <= outblock.len) { + memcpy(rawkey + n, outblock.data, (keybytes - n)); + break; + } + + memcpy(rawkey + n, outblock.data, outblock.len); + memcpy(inblock.data, outblock.data, outblock.len); + n += outblock.len; + } + + /* postprocess the key */ + + inblock.data = (char *) rawkey; + inblock.len = keybytes; + + BUG_ON(gk5e->mk_key == NULL); + ret = (*(gk5e->mk_key))(gk5e, &inblock, outkey); + if (ret) { + dprintk("%s: got %d from mk_key function for '%s'\n", + __func__, ret, gk5e->encrypt_name); + goto err_free_raw; + } + + /* clean memory, free resources and exit */ + + ret = 0; + +err_free_raw: + memset(rawkey, 0, keybytes); + kfree(rawkey); +err_free_out: + memset(outblockdata, 0, blocksize); + kfree(outblockdata); +err_free_in: + memset(inblockdata, 0, blocksize); + kfree(inblockdata); +err_free_cipher: + crypto_free_blkcipher(cipher); +err_return: + return ret; +} diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 6f93f4752be..fdf0eb2057a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -60,6 +60,7 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .cksum_name = "md5", .encrypt = krb5_encrypt, .decrypt = krb5_decrypt, + .mk_key = NULL, .signalg = SGN_ALG_DES_MAC_MD5, .sealalg = SEAL_ALG_DES, .keybytes = 7, -- cgit v1.2.3 From 47d84807762966c3611c38adecec6ea703ddda7a Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:54 -0400 Subject: gss_krb5: handle new context format from gssd For encryption types other than DES, gssd sends down context information in a new format. This new format includes the information needed to support the new Kerberos GSS-API tokens defined in rfc4121. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_keys.c | 2 +- net/sunrpc/auth_gss/gss_krb5_mech.c | 237 +++++++++++++++++++++++++++++++++++- 2 files changed, 237 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index 832ce901bf6..253b4149584 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -147,7 +147,7 @@ static void krb5_nfold(u32 inbits, const u8 *in, * Taken from MIT Kerberos and modified. */ -u32 krb5_derive_key(struct gss_krb5_enctype *gk5e, +u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, const struct xdr_netobj *in_constant) diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index fdf0eb2057a..8b612e73356 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -48,6 +48,8 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif +static struct gss_api_mech gss_kerberos_mech; /* forward declaration */ + static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { /* * DES (All DES enctypes are mapped to the same gss functionality) @@ -247,6 +249,237 @@ out_err: return PTR_ERR(p); } +struct crypto_blkcipher * +context_v2_alloc_cipher(struct krb5_ctx *ctx, u8 *key) +{ + struct crypto_blkcipher *cp; + + cp = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, + 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(cp)) { + dprintk("gss_kerberos_mech: unable to initialize " + "crypto algorithm %s\n", ctx->gk5e->encrypt_name); + return NULL; + } + if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) { + dprintk("gss_kerberos_mech: error setting key for " + "crypto algorithm %s\n", ctx->gk5e->encrypt_name); + crypto_free_blkcipher(cp); + return NULL; + } + return cp; +} + +static inline void +set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed) +{ + cdata[0] = (usage>>24)&0xff; + cdata[1] = (usage>>16)&0xff; + cdata[2] = (usage>>8)&0xff; + cdata[3] = usage&0xff; + cdata[4] = seed; +} + +static int +context_derive_keys_des3(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) +{ + struct xdr_netobj c, keyin, keyout; + u8 cdata[GSS_KRB5_K5CLENGTH]; + u32 err; + + c.len = GSS_KRB5_K5CLENGTH; + c.data = cdata; + + keyin.data = rawkey; + keyin.len = keylen; + keyout.len = keylen; + + /* seq uses the raw key */ + ctx->seq = context_v2_alloc_cipher(ctx, rawkey); + if (ctx->seq == NULL) + goto out_err; + + ctx->enc = context_v2_alloc_cipher(ctx, rawkey); + if (ctx->enc == NULL) + goto out_free_seq; + + /* derive cksum */ + set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM); + keyout.data = ctx->cksum; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + if (err) { + dprintk("%s: Error %d deriving cksum key\n", + __func__, err); + goto out_free_enc; + } + + return 0; + +out_free_enc: + crypto_free_blkcipher(ctx->enc); +out_free_seq: + crypto_free_blkcipher(ctx->seq); +out_err: + return -EINVAL; +} + +static int +context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) +{ + struct xdr_netobj c, keyin, keyout; + u8 cdata[GSS_KRB5_K5CLENGTH]; + u32 err; + + c.len = GSS_KRB5_K5CLENGTH; + c.data = cdata; + + keyin.data = rawkey; + keyin.len = keylen; + keyout.len = keylen; + + /* initiator seal encryption */ + set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); + keyout.data = ctx->initiator_seal; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + if (err) { + dprintk("%s: Error %d deriving initiator_seal key\n", + __func__, err); + goto out_err; + } + ctx->initiator_enc = context_v2_alloc_cipher(ctx, ctx->initiator_seal); + if (ctx->initiator_enc == NULL) + goto out_err; + + /* acceptor seal encryption */ + set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); + keyout.data = ctx->acceptor_seal; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + if (err) { + dprintk("%s: Error %d deriving acceptor_seal key\n", + __func__, err); + goto out_free_initiator_enc; + } + ctx->acceptor_enc = context_v2_alloc_cipher(ctx, ctx->acceptor_seal); + if (ctx->acceptor_enc == NULL) + goto out_free_initiator_enc; + + /* initiator sign checksum */ + set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM); + keyout.data = ctx->initiator_sign; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + if (err) { + dprintk("%s: Error %d deriving initiator_sign key\n", + __func__, err); + goto out_free_acceptor_enc; + } + + /* acceptor sign checksum */ + set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM); + keyout.data = ctx->acceptor_sign; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + if (err) { + dprintk("%s: Error %d deriving acceptor_sign key\n", + __func__, err); + goto out_free_acceptor_enc; + } + + /* initiator seal integrity */ + set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY); + keyout.data = ctx->initiator_integ; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + if (err) { + dprintk("%s: Error %d deriving initiator_integ key\n", + __func__, err); + goto out_free_acceptor_enc; + } + + /* acceptor seal integrity */ + set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY); + keyout.data = ctx->acceptor_integ; + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + if (err) { + dprintk("%s: Error %d deriving acceptor_integ key\n", + __func__, err); + goto out_free_acceptor_enc; + } + + return 0; + +out_free_acceptor_enc: + crypto_free_blkcipher(ctx->acceptor_enc); +out_free_initiator_enc: + crypto_free_blkcipher(ctx->initiator_enc); +out_err: + return -EINVAL; +} + +static int +gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) +{ + u8 rawkey[GSS_KRB5_MAX_KEYLEN]; + int keylen; + + p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); + if (IS_ERR(p)) + goto out_err; + ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR; + + p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); + if (IS_ERR(p)) + goto out_err; + p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64)); + if (IS_ERR(p)) + goto out_err; + /* set seq_send for use by "older" enctypes */ + ctx->seq_send = ctx->seq_send64; + if (ctx->seq_send64 != ctx->seq_send) { + dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, + (long unsigned)ctx->seq_send64, ctx->seq_send); + goto out_err; + } + p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); + if (IS_ERR(p)) + goto out_err; + ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); + if (ctx->gk5e == NULL) { + dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n", + ctx->enctype); + p = ERR_PTR(-EINVAL); + goto out_err; + } + keylen = ctx->gk5e->keylength; + + p = simple_get_bytes(p, end, rawkey, keylen); + if (IS_ERR(p)) + goto out_err; + + if (p != end) { + p = ERR_PTR(-EINVAL); + goto out_err; + } + + ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data, + gss_kerberos_mech.gm_oid.len, GFP_KERNEL); + if (unlikely(ctx->mech_used.data == NULL)) { + p = ERR_PTR(-ENOMEM); + goto out_err; + } + ctx->mech_used.len = gss_kerberos_mech.gm_oid.len; + + switch (ctx->enctype) { + case ENCTYPE_DES3_CBC_RAW: + return context_derive_keys_des3(ctx, rawkey, keylen); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return context_derive_keys_new(ctx, rawkey, keylen); + default: + return -EINVAL; + } + +out_err: + return PTR_ERR(p); +} + static int gss_import_sec_context_kerberos(const void *p, size_t len, struct gss_ctx *ctx_id) @@ -262,7 +495,7 @@ gss_import_sec_context_kerberos(const void *p, size_t len, if (len == 85) ret = gss_import_v1_context(p, end, ctx); else - ret = -EINVAL; + ret = gss_import_v2_context(p, end, ctx); if (ret == 0) ctx_id->internal_ctx_id = ctx; @@ -279,6 +512,8 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { crypto_free_blkcipher(kctx->seq); crypto_free_blkcipher(kctx->enc); + crypto_free_blkcipher(kctx->acceptor_enc); + crypto_free_blkcipher(kctx->initiator_enc); kfree(kctx->mech_used.data); kfree(kctx); } -- cgit v1.2.3 From 683ac6656cb05b6e83593770ffc049eee4a4d119 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 8 Apr 2010 14:09:58 -0400 Subject: gss_krb5: Add upcall info indicating supported kerberos enctypes The text based upcall now indicates which Kerberos encryption types are supported by the kernel rpcsecgss code. This is used by gssd to determine which encryption types it should attempt to negotiate when creating a context with a server. The server principal's database and keytab encryption types are what limits what it should negotiate. Therefore, its keytab should be created with only the enctypes listed by this file. Currently we support des-cbc-crc, des-cbc-md4 and des-cbc-md5 Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 8 +++++++- net/sunrpc/auth_gss/gss_krb5_mech.c | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index d64a58b8ed3..6654c8534d3 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -377,11 +377,12 @@ static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, struct rpc_clnt *clnt, int machine_cred) { + struct gss_api_mech *mech = gss_msg->auth->mech; char *p = gss_msg->databuf; int len = 0; gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", - gss_msg->auth->mech->gm_name, + mech->gm_name, gss_msg->uid); p += gss_msg->msg.len; if (clnt->cl_principal) { @@ -398,6 +399,11 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, p += len; gss_msg->msg.len += len; } + if (mech->gm_upcall_enctypes) { + len = sprintf(p, mech->gm_upcall_enctypes); + p += len; + gss_msg->msg.len += len; + } len = sprintf(p, "\n"); gss_msg->msg.len += len; diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 8b612e73356..03f1dcddbd2 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -552,6 +552,7 @@ static struct gss_api_mech gss_kerberos_mech = { .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, + .gm_upcall_enctypes = "enctypes=3,1,2 ", }; static int __init init_kerberos_module(void) -- cgit v1.2.3 From 958142e97e04d6c266ae093739bbbbd03afcd497 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:55 -0400 Subject: gss_krb5: add support for triple-des encryption Add the final pieces to support the triple-des encryption type. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 3 ++ net/sunrpc/auth_gss/gss_krb5_keys.c | 53 +++++++++++++++++++++++++++++++++++ net/sunrpc/auth_gss/gss_krb5_mech.c | 23 +++++++++++++++ net/sunrpc/auth_gss/gss_krb5_seal.c | 1 + net/sunrpc/auth_gss/gss_krb5_unseal.c | 1 + net/sunrpc/auth_gss/gss_krb5_wrap.c | 2 ++ 6 files changed, 83 insertions(+) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index cae04d7a45a..bb76873aa01 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -184,6 +184,9 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, checksumdata + checksumlen - kctx->gk5e->cksumlength, kctx->gk5e->cksumlength); break; + case CKSUMTYPE_HMAC_SHA1_DES3: + memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); + break; default: BUG(); break; diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index 253b4149584..d54668790f0 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -250,3 +250,56 @@ err_free_cipher: err_return: return ret; } + +#define smask(step) ((1<>step)&smask(step))) +#define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1) + +static void mit_des_fixup_key_parity(u8 key[8]) +{ + int i; + for (i = 0; i < 8; i++) { + key[i] &= 0xfe; + key[i] |= 1^parity_char(key[i]); + } +} + +/* + * This is the des3 key derivation postprocess function + */ +u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *randombits, + struct xdr_netobj *key) +{ + int i; + u32 ret = EINVAL; + + if (key->len != 24) { + dprintk("%s: key->len is %d\n", __func__, key->len); + goto err_out; + } + if (randombits->len != 21) { + dprintk("%s: randombits->len is %d\n", + __func__, randombits->len); + goto err_out; + } + + /* take the seven bytes, move them around into the top 7 bits of the + 8 key bytes, then compute the parity bits. Do this three times. */ + + for (i = 0; i < 3; i++) { + memcpy(key->data + i*8, randombits->data + i*7, 7); + key->data[i*8+7] = (((key->data[i*8]&1)<<1) | + ((key->data[i*8+1]&1)<<2) | + ((key->data[i*8+2]&1)<<3) | + ((key->data[i*8+3]&1)<<4) | + ((key->data[i*8+4]&1)<<5) | + ((key->data[i*8+5]&1)<<6) | + ((key->data[i*8+6]&1)<<7)); + + mit_des_fixup_key_parity(key->data + i*8); + } + ret = 0; +err_out: + return ret; +} diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 03f1dcddbd2..7cebdf84326 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -71,6 +71,26 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .cksumlength = 8, .keyed_cksum = 0, }, + /* + * 3DES + */ + { + .etype = ENCTYPE_DES3_CBC_RAW, + .ctype = CKSUMTYPE_HMAC_SHA1_DES3, + .name = "des3-hmac-sha1", + .encrypt_name = "cbc(des3_ede)", + .cksum_name = "hmac(sha1)", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = gss_krb5_des3_make_key, + .signalg = SGN_ALG_HMAC_SHA1_DES3_KD, + .sealalg = SEAL_ALG_DES3KD, + .keybytes = 21, + .keylength = 24, + .blocksize = 8, + .cksumlength = 20, + .keyed_cksum = 1, + }, }; static const int num_supported_enctypes = @@ -440,6 +460,9 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); if (IS_ERR(p)) goto out_err; + /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */ + if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1) + ctx->enctype = ENCTYPE_DES3_CBC_RAW; ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); if (ctx->gk5e == NULL) { dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n", diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index cd512719092..7ede900049a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -142,6 +142,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, default: BUG(); case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: return gss_get_mic_v1(ctx, text, token); } } diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 7515bffddf1..3e15bdb5a9e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -152,6 +152,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, default: BUG(); case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: return gss_verify_mic_v1(ctx, message_buffer, read_token); } } diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 2eb3046a84e..1c8ebd3dbd3 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -350,6 +350,7 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset, default: BUG(); case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: return gss_wrap_kerberos_v1(kctx, offset, buf, pages); } } @@ -363,6 +364,7 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) default: BUG(); case ENCTYPE_DES_CBC_RAW: + case ENCTYPE_DES3_CBC_RAW: return gss_unwrap_kerberos_v1(kctx, offset, buf); } } -- cgit v1.2.3 From 4018bf3eec5ff6bf1234a602a4e72518757a7f55 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 8 Apr 2010 14:21:12 -0400 Subject: gss_krb5: Advertise triple-des enctype support in the rpcsec_gss/krb5 upcall Update the upcall info indicating which Kerberos enctypes the kernel supports. Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_mech.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 7cebdf84326..ce80f996758 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -575,7 +575,7 @@ static struct gss_api_mech gss_kerberos_mech = { .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, - .gm_upcall_enctypes = "enctypes=3,1,2 ", + .gm_upcall_enctypes = "enctypes=16,3,1,2 ", }; static int __init init_kerberos_module(void) -- cgit v1.2.3 From c43abaedaff92a7bcbfe04b593164bb5faba3078 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:58 -0400 Subject: xdr: Add an export for the helper function write_bytes_to_xdr_buf() Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/xdr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 2763fde8849..a1f82a87d34 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -762,6 +762,7 @@ int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, un __write_bytes_to_xdr_buf(&subbuf, obj, len); return 0; } +EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); int xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) -- cgit v1.2.3 From de9c17eb4a912c9028f7b470eb80815144883b26 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:02:59 -0400 Subject: gss_krb5: add support for new token formats in rfc4121 This is a step toward support for AES encryption types which are required to use the new token formats defined in rfc4121. Signed-off-by: Kevin Coffman [SteveD: Fixed a typo in gss_verify_mic_v2()] Signed-off-by: Steve Dickson [Trond: Got rid of the TEST_ROTATE/TEST_EXTRA_COUNT crap] Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 74 +++++++++++++++ net/sunrpc/auth_gss/gss_krb5_seal.c | 69 ++++++++++++++ net/sunrpc/auth_gss/gss_krb5_unseal.c | 61 ++++++++++++ net/sunrpc/auth_gss/gss_krb5_wrap.c | 174 ++++++++++++++++++++++++++++++++++ 4 files changed, 378 insertions(+) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index bb76873aa01..ca52ac28a53 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -197,6 +197,80 @@ out: return err ? GSS_S_FAILURE : 0; } +/* + * checksum the plaintext data and hdrlen bytes of the token header + * Per rfc4121, sec. 4.2.4, the checksum is performed over the data + * body then over the first 16 octets of the MIC token + * Inclusion of the header data in the calculation of the + * checksum is optional. + */ +u32 +make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *cksumkey, + struct xdr_netobj *cksumout) +{ + struct hash_desc desc; + struct scatterlist sg[1]; + int err; + u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + unsigned int checksumlen; + + if (kctx->gk5e->keyed_cksum == 0) { + dprintk("%s: expected keyed hash for %s\n", + __func__, kctx->gk5e->name); + return GSS_S_FAILURE; + } + if (cksumkey == NULL) { + dprintk("%s: no key supplied for %s\n", + __func__, kctx->gk5e->name); + return GSS_S_FAILURE; + } + + desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(desc.tfm)) + return GSS_S_FAILURE; + checksumlen = crypto_hash_digestsize(desc.tfm); + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength); + if (err) + goto out; + + err = crypto_hash_init(&desc); + if (err) + goto out; + err = xdr_process_buf(body, body_offset, body->len - body_offset, + checksummer, &desc); + if (err) + goto out; + if (header != NULL) { + sg_init_one(sg, header, hdrlen); + err = crypto_hash_update(&desc, sg, hdrlen); + if (err) + goto out; + } + err = crypto_hash_final(&desc, checksumdata); + if (err) + goto out; + + cksumout->len = kctx->gk5e->cksumlength; + + switch (kctx->gk5e->ctype) { + case CKSUMTYPE_HMAC_SHA1_96_AES128: + case CKSUMTYPE_HMAC_SHA1_96_AES256: + /* note that this truncates the hash */ + memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); + break; + default: + BUG(); + break; + } +out: + crypto_free_hash(desc.tfm); + return err ? GSS_S_FAILURE : 0; +} + struct encryptor_desc { u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; struct blkcipher_desc desc; diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 7ede900049a..477a546d19b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -91,6 +91,33 @@ setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) return (char *)krb5_hdr; } +static void * +setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) +{ + __be16 *ptr, *krb5_hdr; + u8 *p, flags = 0x00; + + if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) + flags |= 0x01; + if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) + flags |= 0x04; + + /* Per rfc 4121, sec 4.2.6.1, there is no header, + * just start the token */ + krb5_hdr = ptr = (__be16 *)token->data; + + *ptr++ = KG2_TOK_MIC; + p = (u8 *)ptr; + *p++ = flags; + *p++ = 0xff; + ptr = (__be16 *)p; + *ptr++ = 0xffff; + *ptr++ = 0xffff; + + token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; + return krb5_hdr; +} + static u32 gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token) @@ -132,6 +159,45 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } +u32 +gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, + struct xdr_netobj *token) +{ + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj cksumobj = { .len = sizeof(cksumdata), + .data = cksumdata}; + void *krb5_hdr; + s32 now; + u64 seq_send; + u8 *cksumkey; + + dprintk("RPC: %s\n", __func__); + + krb5_hdr = setup_token_v2(ctx, token); + + /* Set up the sequence number. Now 64-bits in clear + * text and w/o direction indicator */ + spin_lock(&krb5_seq_lock); + seq_send = ctx->seq_send64++; + spin_unlock(&krb5_seq_lock); + *((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send); + + if (ctx->initiate) + cksumkey = ctx->initiator_sign; + else + cksumkey = ctx->acceptor_sign; + + if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN, + text, 0, cksumkey, &cksumobj)) + return GSS_S_FAILURE; + + memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len); + + now = get_seconds(); + + return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; +} + u32 gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, struct xdr_netobj *token) @@ -144,6 +210,9 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: return gss_get_mic_v1(ctx, text, token); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return gss_get_mic_v2(ctx, text, token); } } diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 3e15bdb5a9e..4ede4cc4391 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -141,6 +141,64 @@ gss_verify_mic_v1(struct krb5_ctx *ctx, return GSS_S_COMPLETE; } +static u32 +gss_verify_mic_v2(struct krb5_ctx *ctx, + struct xdr_buf *message_buffer, struct xdr_netobj *read_token) +{ + char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + struct xdr_netobj cksumobj = {.len = sizeof(cksumdata), + .data = cksumdata}; + s32 now; + u64 seqnum; + u8 *ptr = read_token->data; + u8 *cksumkey; + u8 flags; + int i; + + dprintk("RPC: %s\n", __func__); + + if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_MIC) + return GSS_S_DEFECTIVE_TOKEN; + + flags = ptr[2]; + if ((!ctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || + (ctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) + return GSS_S_BAD_SIG; + + if (flags & KG2_TOKEN_FLAG_SEALED) { + dprintk("%s: token has unexpected sealed flag\n", __func__); + return GSS_S_FAILURE; + } + + for (i = 3; i < 8; i++) + if (ptr[i] != 0xff) + return GSS_S_DEFECTIVE_TOKEN; + + if (ctx->initiate) + cksumkey = ctx->acceptor_sign; + else + cksumkey = ctx->initiator_sign; + + if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0, + cksumkey, &cksumobj)) + return GSS_S_FAILURE; + + if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN, + ctx->gk5e->cksumlength)) + return GSS_S_BAD_SIG; + + /* it got through unscathed. Make sure the context is unexpired */ + now = get_seconds(); + if (now > ctx->endtime) + return GSS_S_CONTEXT_EXPIRED; + + /* do sequencing checks */ + + seqnum = be64_to_cpup((__be64 *)ptr + 8); + + return GSS_S_COMPLETE; +} + u32 gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *message_buffer, @@ -154,6 +212,9 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: return gss_verify_mic_v1(ctx, message_buffer, read_token); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return gss_verify_mic_v2(ctx, message_buffer, read_token); } } diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 1c8ebd3dbd3..4aa46b28298 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -340,6 +340,174 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) return GSS_S_COMPLETE; } +/* + * We cannot currently handle tokens with rotated data. We need a + * generalized routine to rotate the data in place. It is anticipated + * that we won't encounter rotated data in the general case. + */ +static u32 +rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc) +{ + unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN); + + if (realrrc == 0) + return 0; + + dprintk("%s: cannot process token with rotated data: " + "rrc %u, realrrc %u\n", __func__, rrc, realrrc); + return 1; +} + +static u32 +gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, struct page **pages) +{ + int blocksize; + u8 *ptr, *plainhdr; + s32 now; + u8 flags = 0x00; + __be16 *be16ptr, ec = 0; + __be64 *be64ptr; + u32 err; + + dprintk("RPC: %s\n", __func__); + + if (kctx->gk5e->encrypt_v2 == NULL) + return GSS_S_FAILURE; + + /* make room for gss token header */ + if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN)) + return GSS_S_FAILURE; + + /* construct gss token header */ + ptr = plainhdr = buf->head[0].iov_base + offset; + *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); + *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); + + if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) + flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR; + if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0) + flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY; + /* We always do confidentiality in wrap tokens */ + flags |= KG2_TOKEN_FLAG_SEALED; + + *ptr++ = flags; + *ptr++ = 0xff; + be16ptr = (__be16 *)ptr; + + blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc); + *be16ptr++ = cpu_to_be16(ec); + /* "inner" token header always uses 0 for RRC */ + *be16ptr++ = cpu_to_be16(0); + + be64ptr = (__be64 *)be16ptr; + spin_lock(&krb5_seq_lock); + *be64ptr = cpu_to_be64(kctx->seq_send64++); + spin_unlock(&krb5_seq_lock); + + err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages); + if (err) + return err; + + now = get_seconds(); + return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; +} + +static u32 +gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) +{ + s32 now; + u64 seqnum; + u8 *ptr; + u8 flags = 0x00; + u16 ec, rrc; + int err; + u32 headskip, tailskip; + u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN]; + unsigned int movelen; + + + dprintk("RPC: %s\n", __func__); + + if (kctx->gk5e->decrypt_v2 == NULL) + return GSS_S_FAILURE; + + ptr = buf->head[0].iov_base + offset; + + if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP) + return GSS_S_DEFECTIVE_TOKEN; + + flags = ptr[2]; + if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || + (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) + return GSS_S_BAD_SIG; + + if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) { + dprintk("%s: token missing expected sealed flag\n", __func__); + return GSS_S_DEFECTIVE_TOKEN; + } + + if (ptr[3] != 0xff) + return GSS_S_DEFECTIVE_TOKEN; + + ec = be16_to_cpup((__be16 *)(ptr + 4)); + rrc = be16_to_cpup((__be16 *)(ptr + 6)); + + seqnum = be64_to_cpup((__be64 *)(ptr + 8)); + + if (rrc != 0) { + err = rotate_left(kctx, offset, buf, rrc); + if (err) + return GSS_S_FAILURE; + } + + err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, + &headskip, &tailskip); + if (err) + return GSS_S_FAILURE; + + /* + * Retrieve the decrypted gss token header and verify + * it against the original + */ + err = read_bytes_from_xdr_buf(buf, + buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, + decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); + if (err) { + dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); + return GSS_S_FAILURE; + } + if (memcmp(ptr, decrypted_hdr, 6) + || memcmp(ptr + 8, decrypted_hdr + 8, 8)) { + dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__); + return GSS_S_FAILURE; + } + + /* do sequencing checks */ + + /* it got through unscathed. Make sure the context is unexpired */ + now = get_seconds(); + if (now > kctx->endtime) + return GSS_S_CONTEXT_EXPIRED; + + /* + * Move the head data back to the right position in xdr_buf. + * We ignore any "ec" data since it might be in the head or + * the tail, and we really don't need to deal with it. + * Note that buf->head[0].iov_len may indicate the available + * head buffer space rather than that actually occupied. + */ + movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); + movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; + BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > + buf->head[0].iov_len); + memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); + buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; + buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; + + return GSS_S_COMPLETE; +} + u32 gss_wrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf, struct page **pages) @@ -352,6 +520,9 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset, case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: return gss_wrap_kerberos_v1(kctx, offset, buf, pages); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return gss_wrap_kerberos_v2(kctx, offset, buf, pages); } } @@ -366,6 +537,9 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: return gss_unwrap_kerberos_v1(kctx, offset, buf); + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + return gss_unwrap_kerberos_v2(kctx, offset, buf); } } -- cgit v1.2.3 From 934a95aa1c9c6ad77838800b79c306e982437605 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:03:00 -0400 Subject: gss_krb5: add remaining pieces to enable AES encryption support Add the remaining pieces to enable support for Kerberos AES encryption types. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 248 ++++++++++++++++++++++++++++++++++ net/sunrpc/auth_gss/gss_krb5_keys.c | 30 ++++ net/sunrpc/auth_gss/gss_krb5_mech.c | 86 ++++++++++-- net/sunrpc/auth_gss/gss_krb5_wrap.c | 6 +- 4 files changed, 358 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index ca52ac28a53..967484a914f 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -478,3 +479,250 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) return 0; } + +static u32 +gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf, + u32 offset, u8 *iv, struct page **pages, int encrypt) +{ + u32 ret; + struct scatterlist sg[1]; + struct blkcipher_desc desc = { .tfm = cipher, .info = iv }; + u8 data[crypto_blkcipher_blocksize(cipher) * 2]; + struct page **save_pages; + u32 len = buf->len - offset; + + BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2); + + /* + * For encryption, we want to read from the cleartext + * page cache pages, and write the encrypted data to + * the supplied xdr_buf pages. + */ + save_pages = buf->pages; + if (encrypt) + buf->pages = pages; + + ret = read_bytes_from_xdr_buf(buf, offset, data, len); + buf->pages = save_pages; + if (ret) + goto out; + + sg_init_one(sg, data, len); + + if (encrypt) + ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); + else + ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len); + + if (ret) + goto out; + + ret = write_bytes_to_xdr_buf(buf, offset, data, len); + +out: + return ret; +} + +u32 +gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, + struct xdr_buf *buf, int ec, struct page **pages) +{ + u32 err; + struct xdr_netobj hmac; + u8 *cksumkey; + u8 *ecptr; + struct crypto_blkcipher *cipher, *aux_cipher; + int blocksize; + struct page **save_pages; + int nblocks, nbytes; + struct encryptor_desc desc; + u32 cbcbytes; + + if (kctx->initiate) { + cipher = kctx->initiator_enc; + aux_cipher = kctx->initiator_enc_aux; + cksumkey = kctx->initiator_integ; + } else { + cipher = kctx->acceptor_enc; + aux_cipher = kctx->acceptor_enc_aux; + cksumkey = kctx->acceptor_integ; + } + blocksize = crypto_blkcipher_blocksize(cipher); + + /* hide the gss token header and insert the confounder */ + offset += GSS_KRB5_TOK_HDR_LEN; + if (xdr_extend_head(buf, offset, blocksize)) + return GSS_S_FAILURE; + gss_krb5_make_confounder(buf->head[0].iov_base + offset, blocksize); + offset -= GSS_KRB5_TOK_HDR_LEN; + + if (buf->tail[0].iov_base != NULL) { + ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; + } else { + buf->tail[0].iov_base = buf->head[0].iov_base + + buf->head[0].iov_len; + buf->tail[0].iov_len = 0; + ecptr = buf->tail[0].iov_base; + } + + memset(ecptr, 'X', ec); + buf->tail[0].iov_len += ec; + buf->len += ec; + + /* copy plaintext gss token header after filler (if any) */ + memcpy(ecptr + ec, buf->head[0].iov_base + offset, + GSS_KRB5_TOK_HDR_LEN); + buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; + buf->len += GSS_KRB5_TOK_HDR_LEN; + + /* Do the HMAC */ + hmac.len = GSS_KRB5_MAX_CKSUM_LEN; + hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; + + /* + * When we are called, pages points to the real page cache + * data -- which we can't go and encrypt! buf->pages points + * to scratch pages which we are going to send off to the + * client/server. Swap in the plaintext pages to calculate + * the hmac. + */ + save_pages = buf->pages; + buf->pages = pages; + + err = make_checksum_v2(kctx, NULL, 0, buf, + offset + GSS_KRB5_TOK_HDR_LEN, cksumkey, &hmac); + buf->pages = save_pages; + if (err) + return GSS_S_FAILURE; + + nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; + nblocks = (nbytes + blocksize - 1) / blocksize; + cbcbytes = 0; + if (nblocks > 2) + cbcbytes = (nblocks - 2) * blocksize; + + memset(desc.iv, 0, sizeof(desc.iv)); + + if (cbcbytes) { + desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; + desc.fragno = 0; + desc.fraglen = 0; + desc.pages = pages; + desc.outbuf = buf; + desc.desc.info = desc.iv; + desc.desc.flags = 0; + desc.desc.tfm = aux_cipher; + + sg_init_table(desc.infrags, 4); + sg_init_table(desc.outfrags, 4); + + err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, + cbcbytes, encryptor, &desc); + if (err) + goto out_err; + } + + /* Make sure IV carries forward from any CBC results. */ + err = gss_krb5_cts_crypt(cipher, buf, + offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, + desc.iv, pages, 1); + if (err) { + err = GSS_S_FAILURE; + goto out_err; + } + + /* Now update buf to account for HMAC */ + buf->tail[0].iov_len += kctx->gk5e->cksumlength; + buf->len += kctx->gk5e->cksumlength; + +out_err: + if (err) + err = GSS_S_FAILURE; + return err; +} + +u32 +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, + u32 *headskip, u32 *tailskip) +{ + struct xdr_buf subbuf; + u32 ret = 0; + u8 *cksum_key; + struct crypto_blkcipher *cipher, *aux_cipher; + struct xdr_netobj our_hmac_obj; + u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; + u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; + int nblocks, blocksize, cbcbytes; + struct decryptor_desc desc; + + if (kctx->initiate) { + cipher = kctx->acceptor_enc; + aux_cipher = kctx->acceptor_enc_aux; + cksum_key = kctx->acceptor_integ; + } else { + cipher = kctx->initiator_enc; + aux_cipher = kctx->initiator_enc_aux; + cksum_key = kctx->initiator_integ; + } + blocksize = crypto_blkcipher_blocksize(cipher); + + + /* create a segment skipping the header and leaving out the checksum */ + xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, + (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - + kctx->gk5e->cksumlength)); + + nblocks = (subbuf.len + blocksize - 1) / blocksize; + + cbcbytes = 0; + if (nblocks > 2) + cbcbytes = (nblocks - 2) * blocksize; + + memset(desc.iv, 0, sizeof(desc.iv)); + + if (cbcbytes) { + desc.fragno = 0; + desc.fraglen = 0; + desc.desc.info = desc.iv; + desc.desc.flags = 0; + desc.desc.tfm = aux_cipher; + + sg_init_table(desc.frags, 4); + + ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); + if (ret) + goto out_err; + } + + /* Make sure IV carries forward from any CBC results. */ + ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); + if (ret) + goto out_err; + + + /* Calculate our hmac over the plaintext data */ + our_hmac_obj.len = sizeof(our_hmac); + our_hmac_obj.data = our_hmac; + + ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, + cksum_key, &our_hmac_obj); + if (ret) + goto out_err; + + /* Get the packet's hmac value */ + ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, + pkt_hmac, kctx->gk5e->cksumlength); + if (ret) + goto out_err; + + if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { + ret = GSS_S_BAD_SIG; + goto out_err; + } + *headskip = crypto_blkcipher_blocksize(cipher); + *tailskip = kctx->gk5e->cksumlength; +out_err: + if (ret && ret != GSS_S_BAD_SIG) + ret = GSS_S_FAILURE; + return ret; +} diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index d54668790f0..33b87f04b30 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -303,3 +303,33 @@ u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e, err_out: return ret; } + +/* + * This is the aes key derivation postprocess function + */ +u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e, + struct xdr_netobj *randombits, + struct xdr_netobj *key) +{ + u32 ret = EINVAL; + + if (key->len != 16 && key->len != 32) { + dprintk("%s: key->len is %d\n", __func__, key->len); + goto err_out; + } + if (randombits->len != 16 && randombits->len != 32) { + dprintk("%s: randombits->len is %d\n", + __func__, randombits->len); + goto err_out; + } + if (randombits->len != key->len) { + dprintk("%s: randombits->len is %d, key->len is %d\n", + __func__, randombits->len, key->len); + goto err_out; + } + memcpy(key->data, randombits->data, key->len); + ret = 0; +err_out: + return ret; +} + diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index ce80f996758..694ad77c86b 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -91,6 +91,50 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .cksumlength = 20, .keyed_cksum = 1, }, + /* + * AES128 + */ + { + .etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96, + .ctype = CKSUMTYPE_HMAC_SHA1_96_AES128, + .name = "aes128-cts", + .encrypt_name = "cts(cbc(aes))", + .cksum_name = "hmac(sha1)", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = gss_krb5_aes_make_key, + .encrypt_v2 = gss_krb5_aes_encrypt, + .decrypt_v2 = gss_krb5_aes_decrypt, + .signalg = -1, + .sealalg = -1, + .keybytes = 16, + .keylength = 16, + .blocksize = 16, + .cksumlength = 12, + .keyed_cksum = 1, + }, + /* + * AES256 + */ + { + .etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96, + .ctype = CKSUMTYPE_HMAC_SHA1_96_AES256, + .name = "aes256-cts", + .encrypt_name = "cts(cbc(aes))", + .cksum_name = "hmac(sha1)", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = gss_krb5_aes_make_key, + .encrypt_v2 = gss_krb5_aes_encrypt, + .decrypt_v2 = gss_krb5_aes_decrypt, + .signalg = -1, + .sealalg = -1, + .keybytes = 32, + .keylength = 32, + .blocksize = 16, + .cksumlength = 12, + .keyed_cksum = 1, + }, }; static const int num_supported_enctypes = @@ -270,20 +314,19 @@ out_err: } struct crypto_blkcipher * -context_v2_alloc_cipher(struct krb5_ctx *ctx, u8 *key) +context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) { struct crypto_blkcipher *cp; - cp = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, - 0, CRYPTO_ALG_ASYNC); + cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(cp)) { dprintk("gss_kerberos_mech: unable to initialize " - "crypto algorithm %s\n", ctx->gk5e->encrypt_name); + "crypto algorithm %s\n", cname); return NULL; } if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) { dprintk("gss_kerberos_mech: error setting key for " - "crypto algorithm %s\n", ctx->gk5e->encrypt_name); + "crypto algorithm %s\n", cname); crypto_free_blkcipher(cp); return NULL; } @@ -315,11 +358,13 @@ context_derive_keys_des3(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) keyout.len = keylen; /* seq uses the raw key */ - ctx->seq = context_v2_alloc_cipher(ctx, rawkey); + ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, + rawkey); if (ctx->seq == NULL) goto out_err; - ctx->enc = context_v2_alloc_cipher(ctx, rawkey); + ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, + rawkey); if (ctx->enc == NULL) goto out_free_seq; @@ -366,7 +411,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) __func__, err); goto out_err; } - ctx->initiator_enc = context_v2_alloc_cipher(ctx, ctx->initiator_seal); + ctx->initiator_enc = context_v2_alloc_cipher(ctx, + ctx->gk5e->encrypt_name, + ctx->initiator_seal); if (ctx->initiator_enc == NULL) goto out_err; @@ -379,7 +426,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) __func__, err); goto out_free_initiator_enc; } - ctx->acceptor_enc = context_v2_alloc_cipher(ctx, ctx->acceptor_seal); + ctx->acceptor_enc = context_v2_alloc_cipher(ctx, + ctx->gk5e->encrypt_name, + ctx->acceptor_seal); if (ctx->acceptor_enc == NULL) goto out_free_initiator_enc; @@ -423,6 +472,23 @@ context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) goto out_free_acceptor_enc; } + switch (ctx->enctype) { + case ENCTYPE_AES128_CTS_HMAC_SHA1_96: + case ENCTYPE_AES256_CTS_HMAC_SHA1_96: + ctx->initiator_enc_aux = + context_v2_alloc_cipher(ctx, "cbc(aes)", + ctx->initiator_seal); + if (ctx->initiator_enc_aux == NULL) + goto out_free_acceptor_enc; + ctx->acceptor_enc_aux = + context_v2_alloc_cipher(ctx, "cbc(aes)", + ctx->acceptor_seal); + if (ctx->acceptor_enc_aux == NULL) { + crypto_free_blkcipher(ctx->initiator_enc_aux); + goto out_free_acceptor_enc; + } + } + return 0; out_free_acceptor_enc: @@ -537,6 +603,8 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { crypto_free_blkcipher(kctx->enc); crypto_free_blkcipher(kctx->acceptor_enc); crypto_free_blkcipher(kctx->initiator_enc); + crypto_free_blkcipher(kctx->acceptor_enc_aux); + crypto_free_blkcipher(kctx->initiator_enc_aux); kfree(kctx->mech_used.data); kfree(kctx); } diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 4aa46b28298..a1a3585fa76 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -113,8 +113,8 @@ out: return 0; } -static void -make_confounder(char *p, u32 conflen) +void +gss_krb5_make_confounder(char *p, u32 conflen) { static u64 i = 0; u64 *q = (u64 *)p; @@ -204,7 +204,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, memset(ptr + 4, 0xff, 4); *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); - make_confounder(msg_start, blocksize); + gss_krb5_make_confounder(msg_start, blocksize); if (kctx->gk5e->keyed_cksum) cksumkey = kctx->cksum; -- cgit v1.2.3 From bf6d359c508cf83401c942262a9749752598394d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 8 Apr 2010 14:23:06 -0400 Subject: gss_krb5: Advertise AES enctype support in the rpcsec_gss/krb5 upcall Update upcall info indicating which Kerberos enctypes the kernel supports Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_mech.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 694ad77c86b..506a2e7d4fa 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -643,7 +643,7 @@ static struct gss_api_mech gss_kerberos_mech = { .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, - .gm_upcall_enctypes = "enctypes=16,3,1,2 ", + .gm_upcall_enctypes = "enctypes=18,17,16,3,1,2 ", }; static int __init init_kerberos_module(void) -- cgit v1.2.3 From 8b23707612cffdba694dcd18aa8a018918aa86dc Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:03:02 -0400 Subject: gssd_krb5: arcfour-hmac support For arcfour-hmac support, the make_checksum function needs a usage field to correctly calculate the checksum differently for MIC and WRAP tokens. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 15 +++++++++++---- net/sunrpc/auth_gss/gss_krb5_seal.c | 13 +++++++++---- net/sunrpc/auth_gss/gss_krb5_unseal.c | 12 ++++++++---- net/sunrpc/auth_gss/gss_krb5_wrap.c | 4 ++-- 4 files changed, 30 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 967484a914f..33ae7023cf3 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -132,7 +132,7 @@ checksummer(struct scatterlist *sg, void *data) u32 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, struct xdr_buf *body, int body_offset, u8 *cksumkey, - struct xdr_netobj *cksumout) + unsigned int usage, struct xdr_netobj *cksumout) { struct hash_desc desc; struct scatterlist sg[1]; @@ -208,7 +208,7 @@ out: u32 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, struct xdr_buf *body, int body_offset, u8 *cksumkey, - struct xdr_netobj *cksumout) + unsigned int usage, struct xdr_netobj *cksumout) { struct hash_desc desc; struct scatterlist sg[1]; @@ -537,15 +537,18 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, int nblocks, nbytes; struct encryptor_desc desc; u32 cbcbytes; + unsigned int usage; if (kctx->initiate) { cipher = kctx->initiator_enc; aux_cipher = kctx->initiator_enc_aux; cksumkey = kctx->initiator_integ; + usage = KG_USAGE_INITIATOR_SEAL; } else { cipher = kctx->acceptor_enc; aux_cipher = kctx->acceptor_enc_aux; cksumkey = kctx->acceptor_integ; + usage = KG_USAGE_ACCEPTOR_SEAL; } blocksize = crypto_blkcipher_blocksize(cipher); @@ -590,7 +593,8 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, buf->pages = pages; err = make_checksum_v2(kctx, NULL, 0, buf, - offset + GSS_KRB5_TOK_HDR_LEN, cksumkey, &hmac); + offset + GSS_KRB5_TOK_HDR_LEN, + cksumkey, usage, &hmac); buf->pages = save_pages; if (err) return GSS_S_FAILURE; @@ -654,15 +658,18 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; int nblocks, blocksize, cbcbytes; struct decryptor_desc desc; + unsigned int usage; if (kctx->initiate) { cipher = kctx->acceptor_enc; aux_cipher = kctx->acceptor_enc_aux; cksum_key = kctx->acceptor_integ; + usage = KG_USAGE_ACCEPTOR_SEAL; } else { cipher = kctx->initiator_enc; aux_cipher = kctx->initiator_enc_aux; cksum_key = kctx->initiator_integ; + usage = KG_USAGE_INITIATOR_SEAL; } blocksize = crypto_blkcipher_blocksize(cipher); @@ -705,7 +712,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, our_hmac_obj.data = our_hmac; ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, - cksum_key, &our_hmac_obj); + cksum_key, usage, &our_hmac_obj); if (ret) goto out_err; diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 477a546d19b..e22fed3d9a1 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -142,7 +142,8 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, else cksumkey = NULL; - if (make_checksum(ctx, ptr, 8, text, 0, cksumkey, &md5cksum)) + if (make_checksum(ctx, ptr, 8, text, 0, cksumkey, + KG_USAGE_SIGN, &md5cksum)) return GSS_S_FAILURE; memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); @@ -170,6 +171,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, s32 now; u64 seq_send; u8 *cksumkey; + unsigned int cksum_usage; dprintk("RPC: %s\n", __func__); @@ -182,13 +184,16 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, spin_unlock(&krb5_seq_lock); *((u64 *)(krb5_hdr + 8)) = cpu_to_be64(seq_send); - if (ctx->initiate) + if (ctx->initiate) { cksumkey = ctx->initiator_sign; - else + cksum_usage = KG_USAGE_INITIATOR_SIGN; + } else { cksumkey = ctx->acceptor_sign; + cksum_usage = KG_USAGE_ACCEPTOR_SIGN; + } if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN, - text, 0, cksumkey, &cksumobj)) + text, 0, cksumkey, cksum_usage, &cksumobj)) return GSS_S_FAILURE; memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len); diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 4ede4cc4391..ef91366e3de 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -115,7 +115,7 @@ gss_verify_mic_v1(struct krb5_ctx *ctx, cksumkey = NULL; if (make_checksum(ctx, ptr, 8, message_buffer, 0, - cksumkey, &md5cksum)) + cksumkey, KG_USAGE_SIGN, &md5cksum)) return GSS_S_FAILURE; if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, @@ -154,6 +154,7 @@ gss_verify_mic_v2(struct krb5_ctx *ctx, u8 *cksumkey; u8 flags; int i; + unsigned int cksum_usage; dprintk("RPC: %s\n", __func__); @@ -174,13 +175,16 @@ gss_verify_mic_v2(struct krb5_ctx *ctx, if (ptr[i] != 0xff) return GSS_S_DEFECTIVE_TOKEN; - if (ctx->initiate) + if (ctx->initiate) { cksumkey = ctx->acceptor_sign; - else + cksum_usage = KG_USAGE_ACCEPTOR_SIGN; + } else { cksumkey = ctx->initiator_sign; + cksum_usage = KG_USAGE_INITIATOR_SIGN; + } if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0, - cksumkey, &cksumobj)) + cksumkey, cksum_usage, &cksumobj)) return GSS_S_FAILURE; if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN, diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index a1a3585fa76..097cc27494c 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -215,7 +215,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, tmp_pages = buf->pages; buf->pages = pages; if (make_checksum(kctx, ptr, 8, buf, offset + headlen - blocksize, - cksumkey, &md5cksum)) + cksumkey, KG_USAGE_SEAL, &md5cksum)) return GSS_S_FAILURE; buf->pages = tmp_pages; @@ -298,7 +298,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) cksumkey = NULL; if (make_checksum(kctx, ptr, 8, buf, crypt_offset, - cksumkey, &md5cksum)) + cksumkey, KG_USAGE_SEAL, &md5cksum)) return GSS_S_FAILURE; if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, -- cgit v1.2.3 From fc263a917afad3bda7b823a6edc803a40e7f6015 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:03:03 -0400 Subject: gss_krb5: Save the raw session key in the context This is needed for deriving arcfour-hmac keys "on the fly" using the sequence number or checksu Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_mech.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 506a2e7d4fa..893fad71e30 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -344,7 +344,7 @@ set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed) } static int -context_derive_keys_des3(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) +context_derive_keys_des3(struct krb5_ctx *ctx) { struct xdr_netobj c, keyin, keyout; u8 cdata[GSS_KRB5_K5CLENGTH]; @@ -353,18 +353,18 @@ context_derive_keys_des3(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) c.len = GSS_KRB5_K5CLENGTH; c.data = cdata; - keyin.data = rawkey; - keyin.len = keylen; - keyout.len = keylen; + keyin.data = ctx->Ksess; + keyin.len = ctx->gk5e->keylength; + keyout.len = ctx->gk5e->keylength; /* seq uses the raw key */ ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, - rawkey); + ctx->Ksess); if (ctx->seq == NULL) goto out_err; ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, - rawkey); + ctx->Ksess); if (ctx->enc == NULL) goto out_free_seq; @@ -389,7 +389,7 @@ out_err: } static int -context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) +context_derive_keys_new(struct krb5_ctx *ctx) { struct xdr_netobj c, keyin, keyout; u8 cdata[GSS_KRB5_K5CLENGTH]; @@ -398,9 +398,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, u8 *rawkey, u32 keylen) c.len = GSS_KRB5_K5CLENGTH; c.data = cdata; - keyin.data = rawkey; - keyin.len = keylen; - keyout.len = keylen; + keyin.data = ctx->Ksess; + keyin.len = ctx->gk5e->keylength; + keyout.len = ctx->gk5e->keylength; /* initiator seal encryption */ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); @@ -502,7 +502,6 @@ out_err: static int gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) { - u8 rawkey[GSS_KRB5_MAX_KEYLEN]; int keylen; p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); @@ -538,7 +537,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) } keylen = ctx->gk5e->keylength; - p = simple_get_bytes(p, end, rawkey, keylen); + p = simple_get_bytes(p, end, ctx->Ksess, keylen); if (IS_ERR(p)) goto out_err; @@ -557,10 +556,10 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) switch (ctx->enctype) { case ENCTYPE_DES3_CBC_RAW: - return context_derive_keys_des3(ctx, rawkey, keylen); + return context_derive_keys_des3(ctx); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: - return context_derive_keys_new(ctx, rawkey, keylen); + return context_derive_keys_new(ctx); default: return -EINVAL; } -- cgit v1.2.3 From 1dbd9029f3024d058da1cf6c6658c28aac2e4e1c Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:03:04 -0400 Subject: gssd_krb5: More arcfour-hmac support For the arcfour-hmac support, the make_seq_num and get_seq_num functions need access to the kerberos context structure. This will be used in a later patch. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_seal.c | 5 ++--- net/sunrpc/auth_gss/gss_krb5_seqnum.c | 6 ++++-- net/sunrpc/auth_gss/gss_krb5_unseal.c | 3 ++- net/sunrpc/auth_gss/gss_krb5_wrap.c | 6 +++--- 4 files changed, 11 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index e22fed3d9a1..36fe487d93d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -152,9 +152,8 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, seq_send = ctx->seq_send++; spin_unlock(&krb5_seq_lock); - if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, - seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, - ptr + 8)) + if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, + seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) return GSS_S_FAILURE; return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 6331cd6866e..83b59308497 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c @@ -40,7 +40,8 @@ #endif s32 -krb5_make_seq_num(struct crypto_blkcipher *key, +krb5_make_seq_num(struct krb5_ctx *kctx, + struct crypto_blkcipher *key, int direction, u32 seqnum, unsigned char *cksum, unsigned char *buf) @@ -61,13 +62,14 @@ krb5_make_seq_num(struct crypto_blkcipher *key, } s32 -krb5_get_seq_num(struct crypto_blkcipher *key, +krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, unsigned char *buf, int *direction, u32 *seqnum) { s32 code; unsigned char plain[8]; + struct crypto_blkcipher *key = kctx->seq; dprintk("RPC: krb5_get_seq_num:\n"); diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index ef91366e3de..97eb91b8c70 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -131,7 +131,8 @@ gss_verify_mic_v1(struct krb5_ctx *ctx, /* do sequencing checks */ - if (krb5_get_seq_num(ctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum)) + if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, + &direction, &seqnum)) return GSS_S_FAILURE; if ((ctx->initiate && direction != 0xff) || diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 097cc27494c..a95e7e0ac0e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -227,7 +227,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, /* XXX would probably be more efficient to compute checksum * and encrypt at the same time: */ - if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, + if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff, seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) return GSS_S_FAILURE; @@ -314,8 +314,8 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) /* do sequencing checks */ - if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, - &direction, &seqnum)) + if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, + ptr + 8, &direction, &seqnum)) return GSS_S_BAD_SIG; if ((kctx->initiate && direction != 0xff) || -- cgit v1.2.3 From 5af46547ec451918f3ba51efe59b317d33adf701 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:03:05 -0400 Subject: gss_krb5: Use confounder length in wrap code All encryption types use a confounder at the beginning of the wrap token. In all encryption types except arcfour-hmac, the confounder is the same as the blocksize. arcfour-hmac has a blocksize of one, but uses an eight byte confounder. Add an entry to the crypto framework definitions for the confounder length and change the wrap/unwrap code to use the confounder length rather than assuming it is always the blocksize. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 6 +++--- net/sunrpc/auth_gss/gss_krb5_mech.c | 4 ++++ net/sunrpc/auth_gss/gss_krb5_wrap.c | 12 +++++++----- 3 files changed, 14 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 33ae7023cf3..ed4106a3daf 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -554,9 +554,9 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, /* hide the gss token header and insert the confounder */ offset += GSS_KRB5_TOK_HDR_LEN; - if (xdr_extend_head(buf, offset, blocksize)) + if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) return GSS_S_FAILURE; - gss_krb5_make_confounder(buf->head[0].iov_base + offset, blocksize); + gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); offset -= GSS_KRB5_TOK_HDR_LEN; if (buf->tail[0].iov_base != NULL) { @@ -726,7 +726,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, ret = GSS_S_BAD_SIG; goto out_err; } - *headskip = crypto_blkcipher_blocksize(cipher); + *headskip = kctx->gk5e->conflen; *tailskip = kctx->gk5e->cksumlength; out_err: if (ret && ret != GSS_S_BAD_SIG) diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 893fad71e30..ef6b3134904 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -68,6 +68,7 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .keybytes = 7, .keylength = 8, .blocksize = 8, + .conflen = 8, .cksumlength = 8, .keyed_cksum = 0, }, @@ -88,6 +89,7 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .keybytes = 21, .keylength = 24, .blocksize = 8, + .conflen = 8, .cksumlength = 20, .keyed_cksum = 1, }, @@ -110,6 +112,7 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .keybytes = 16, .keylength = 16, .blocksize = 16, + .conflen = 16, .cksumlength = 12, .keyed_cksum = 1, }, @@ -132,6 +135,7 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .keybytes = 32, .keylength = 32, .blocksize = 16, + .conflen = 16, .cksumlength = 12, .keyed_cksum = 1, }, diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index a95e7e0ac0e..383db891c83 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -168,6 +168,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct page **tmp_pages; u32 seq_send; u8 *cksumkey; + u32 conflen = kctx->gk5e->conflen; dprintk("RPC: %s\n", __func__); @@ -176,7 +177,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, blocksize = crypto_blkcipher_blocksize(kctx->enc); gss_krb5_add_padding(buf, offset, blocksize); BUG_ON((buf->len - offset) % blocksize); - plainlen = blocksize + buf->len - offset; + plainlen = conflen + buf->len - offset; headlen = g_token_size(&kctx->mech_used, GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - @@ -204,7 +205,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, memset(ptr + 4, 0xff, 4); *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); - gss_krb5_make_confounder(msg_start, blocksize); + gss_krb5_make_confounder(msg_start, conflen); if (kctx->gk5e->keyed_cksum) cksumkey = kctx->cksum; @@ -214,7 +215,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, /* XXXJBF: UGH!: */ tmp_pages = buf->pages; buf->pages = pages; - if (make_checksum(kctx, ptr, 8, buf, offset + headlen - blocksize, + if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen, cksumkey, KG_USAGE_SEAL, &md5cksum)) return GSS_S_FAILURE; buf->pages = tmp_pages; @@ -231,7 +232,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) return GSS_S_FAILURE; - if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, + if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - conflen, pages)) return GSS_S_FAILURE; @@ -254,6 +255,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) void *data_start, *orig_start; int data_len; int blocksize; + u32 conflen = kctx->gk5e->conflen; int crypt_offset; u8 *cksumkey; @@ -327,7 +329,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) blocksize = crypto_blkcipher_blocksize(kctx->enc); data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + - blocksize; + conflen; orig_start = buf->head[0].iov_base + offset; data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; memmove(orig_start, data_start, data_len); -- cgit v1.2.3 From fffdaef2eb4a7333952e55cf97f1fc0fcc35f981 Mon Sep 17 00:00:00 2001 From: Kevin Coffman Date: Wed, 17 Mar 2010 13:03:06 -0400 Subject: gss_krb5: Add support for rc4-hmac encryption Add necessary changes to add kernel support for the rc4-hmac Kerberos encryption type used by Microsoft and described in rfc4757. Signed-off-by: Kevin Coffman Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_crypto.c | 255 ++++++++++++++++++++++++++++++++++ net/sunrpc/auth_gss/gss_krb5_mech.c | 96 +++++++++++++ net/sunrpc/auth_gss/gss_krb5_seal.c | 1 + net/sunrpc/auth_gss/gss_krb5_seqnum.c | 77 ++++++++++ net/sunrpc/auth_gss/gss_krb5_unseal.c | 1 + net/sunrpc/auth_gss/gss_krb5_wrap.c | 66 +++++++-- 6 files changed, 483 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index ed4106a3daf..75ee993ea05 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -124,6 +124,114 @@ checksummer(struct scatterlist *sg, void *data) return crypto_hash_update(desc, sg, sg->length); } +static int +arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) +{ + unsigned int ms_usage; + + switch (usage) { + case KG_USAGE_SIGN: + ms_usage = 15; + break; + case KG_USAGE_SEAL: + ms_usage = 13; + break; + default: + return EINVAL;; + } + salt[0] = (ms_usage >> 0) & 0xff; + salt[1] = (ms_usage >> 8) & 0xff; + salt[2] = (ms_usage >> 16) & 0xff; + salt[3] = (ms_usage >> 24) & 0xff; + + return 0; +} + +static u32 +make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, + struct xdr_buf *body, int body_offset, u8 *cksumkey, + unsigned int usage, struct xdr_netobj *cksumout) +{ + struct hash_desc desc; + struct scatterlist sg[1]; + int err; + u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; + u8 rc4salt[4]; + struct crypto_hash *md5; + struct crypto_hash *hmac_md5; + + if (cksumkey == NULL) + return GSS_S_FAILURE; + + if (cksumout->len < kctx->gk5e->cksumlength) { + dprintk("%s: checksum buffer length, %u, too small for %s\n", + __func__, cksumout->len, kctx->gk5e->name); + return GSS_S_FAILURE; + } + + if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { + dprintk("%s: invalid usage value %u\n", __func__, usage); + return GSS_S_FAILURE; + } + + md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(md5)) + return GSS_S_FAILURE; + + hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(hmac_md5)) { + crypto_free_hash(md5); + return GSS_S_FAILURE; + } + + desc.tfm = md5; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_init(&desc); + if (err) + goto out; + sg_init_one(sg, rc4salt, 4); + err = crypto_hash_update(&desc, sg, 4); + if (err) + goto out; + + sg_init_one(sg, header, hdrlen); + err = crypto_hash_update(&desc, sg, hdrlen); + if (err) + goto out; + err = xdr_process_buf(body, body_offset, body->len - body_offset, + checksummer, &desc); + if (err) + goto out; + err = crypto_hash_final(&desc, checksumdata); + if (err) + goto out; + + desc.tfm = hmac_md5; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + err = crypto_hash_init(&desc); + if (err) + goto out; + err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); + if (err) + goto out; + + sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5)); + err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5), + checksumdata); + if (err) + goto out; + + memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); + cksumout->len = kctx->gk5e->cksumlength; +out: + crypto_free_hash(md5); + crypto_free_hash(hmac_md5); + return err ? GSS_S_FAILURE : 0; +} + /* * checksum the plaintext data and hdrlen bytes of the token header * The checksum is performed over the first 8 bytes of the @@ -140,6 +248,11 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; unsigned int checksumlen; + if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) + return make_checksum_hmac_md5(kctx, header, hdrlen, + body, body_offset, + cksumkey, usage, cksumout); + if (cksumout->len < kctx->gk5e->cksumlength) { dprintk("%s: checksum buffer length, %u, too small for %s\n", __func__, cksumout->len, kctx->gk5e->name); @@ -733,3 +846,145 @@ out_err: ret = GSS_S_FAILURE; return ret; } + +/* + * Compute Kseq given the initial session key and the checksum. + * Set the key of the given cipher. + */ +int +krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher, + unsigned char *cksum) +{ + struct crypto_hash *hmac; + struct hash_desc desc; + struct scatterlist sg[1]; + u8 Kseq[GSS_KRB5_MAX_KEYLEN]; + u32 zeroconstant = 0; + int err; + + dprintk("%s: entered\n", __func__); + + hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hmac)) { + dprintk("%s: error %ld, allocating hash '%s'\n", + __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); + return PTR_ERR(hmac); + } + + desc.tfm = hmac; + desc.flags = 0; + + err = crypto_hash_init(&desc); + if (err) + goto out_err; + + /* Compute intermediate Kseq from session key */ + err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); + if (err) + goto out_err; + + sg_init_table(sg, 1); + sg_set_buf(sg, &zeroconstant, 4); + + err = crypto_hash_digest(&desc, sg, 4, Kseq); + if (err) + goto out_err; + + /* Compute final Kseq from the checksum and intermediate Kseq */ + err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength); + if (err) + goto out_err; + + sg_set_buf(sg, cksum, 8); + + err = crypto_hash_digest(&desc, sg, 8, Kseq); + if (err) + goto out_err; + + err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); + if (err) + goto out_err; + + err = 0; + +out_err: + crypto_free_hash(hmac); + dprintk("%s: returning %d\n", __func__, err); + return err; +} + +/* + * Compute Kcrypt given the initial session key and the plaintext seqnum. + * Set the key of cipher kctx->enc. + */ +int +krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher, + s32 seqnum) +{ + struct crypto_hash *hmac; + struct hash_desc desc; + struct scatterlist sg[1]; + u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; + u8 zeroconstant[4] = {0}; + u8 seqnumarray[4]; + int err, i; + + dprintk("%s: entered, seqnum %u\n", __func__, seqnum); + + hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hmac)) { + dprintk("%s: error %ld, allocating hash '%s'\n", + __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); + return PTR_ERR(hmac); + } + + desc.tfm = hmac; + desc.flags = 0; + + err = crypto_hash_init(&desc); + if (err) + goto out_err; + + /* Compute intermediate Kcrypt from session key */ + for (i = 0; i < kctx->gk5e->keylength; i++) + Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; + + err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); + if (err) + goto out_err; + + sg_init_table(sg, 1); + sg_set_buf(sg, zeroconstant, 4); + + err = crypto_hash_digest(&desc, sg, 4, Kcrypt); + if (err) + goto out_err; + + /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ + err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); + if (err) + goto out_err; + + seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); + seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); + seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); + seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); + + sg_set_buf(sg, seqnumarray, 4); + + err = crypto_hash_digest(&desc, sg, 4, Kcrypt); + if (err) + goto out_err; + + err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); + if (err) + goto out_err; + + err = 0; + +out_err: + crypto_free_hash(hmac); + dprintk("%s: returning %d\n", __func__, err); + return err; +} + diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index ef6b3134904..54eda5f0c58 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -72,6 +72,27 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .cksumlength = 8, .keyed_cksum = 0, }, + /* + * RC4-HMAC + */ + { + .etype = ENCTYPE_ARCFOUR_HMAC, + .ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR, + .name = "rc4-hmac", + .encrypt_name = "ecb(arc4)", + .cksum_name = "hmac(md5)", + .encrypt = krb5_encrypt, + .decrypt = krb5_decrypt, + .mk_key = NULL, + .signalg = SGN_ALG_HMAC_MD5, + .sealalg = SEAL_ALG_MICROSOFT_RC4, + .keybytes = 16, + .keylength = 16, + .blocksize = 1, + .conflen = 8, + .cksumlength = 8, + .keyed_cksum = 1, + }, /* * 3DES */ @@ -392,6 +413,79 @@ out_err: return -EINVAL; } +/* + * Note that RC4 depends on deriving keys using the sequence + * number or the checksum of a token. Therefore, the final keys + * cannot be calculated until the token is being constructed! + */ +static int +context_derive_keys_rc4(struct krb5_ctx *ctx) +{ + struct crypto_hash *hmac; + char sigkeyconstant[] = "signaturekey"; + int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ + struct hash_desc desc; + struct scatterlist sg[1]; + int err; + + dprintk("RPC: %s: entered\n", __func__); + /* + * derive cksum (aka Ksign) key + */ + hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hmac)) { + dprintk("%s: error %ld allocating hash '%s'\n", + __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name); + err = PTR_ERR(hmac); + goto out_err; + } + + err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength); + if (err) + goto out_err_free_hmac; + + sg_init_table(sg, 1); + sg_set_buf(sg, sigkeyconstant, slen); + + desc.tfm = hmac; + desc.flags = 0; + + err = crypto_hash_init(&desc); + if (err) + goto out_err_free_hmac; + + err = crypto_hash_digest(&desc, sg, slen, ctx->cksum); + if (err) + goto out_err_free_hmac; + /* + * allocate hash, and blkciphers for data and seqnum encryption + */ + ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(ctx->enc)) { + err = PTR_ERR(ctx->enc); + goto out_err_free_hmac; + } + + ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(ctx->seq)) { + crypto_free_blkcipher(ctx->enc); + err = PTR_ERR(ctx->seq); + goto out_err_free_hmac; + } + + dprintk("RPC: %s: returning success\n", __func__); + + err = 0; + +out_err_free_hmac: + crypto_free_hash(hmac); +out_err: + dprintk("RPC: %s: returning %d\n", __func__, err); + return err; +} + static int context_derive_keys_new(struct krb5_ctx *ctx) { @@ -561,6 +655,8 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) switch (ctx->enctype) { case ENCTYPE_DES3_CBC_RAW: return context_derive_keys_des3(ctx); + case ENCTYPE_ARCFOUR_HMAC: + return context_derive_keys_rc4(ctx); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: return context_derive_keys_new(ctx); diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 36fe487d93d..d7941eab779 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -213,6 +213,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: return gss_get_mic_v1(ctx, text, token); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 83b59308497..415c013ba38 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c @@ -39,6 +39,38 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif +static s32 +krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, + unsigned char *cksum, unsigned char *buf) +{ + struct crypto_blkcipher *cipher; + unsigned char plain[8]; + s32 code; + + dprintk("RPC: %s:\n", __func__); + cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); + plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); + plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); + plain[3] = (unsigned char) ((seqnum >> 0) & 0xff); + plain[4] = direction; + plain[5] = direction; + plain[6] = direction; + plain[7] = direction; + + code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); + if (code) + goto out; + + code = krb5_encrypt(cipher, cksum, plain, buf, 8); +out: + crypto_free_blkcipher(cipher); + return code; +} s32 krb5_make_seq_num(struct krb5_ctx *kctx, struct crypto_blkcipher *key, @@ -48,6 +80,10 @@ krb5_make_seq_num(struct krb5_ctx *kctx, { unsigned char plain[8]; + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) + return krb5_make_rc4_seq_num(kctx, direction, seqnum, + cksum, buf); + plain[0] = (unsigned char) (seqnum & 0xff); plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); @@ -61,6 +97,43 @@ krb5_make_seq_num(struct krb5_ctx *kctx, return krb5_encrypt(key, cksum, plain, buf, 8); } +static s32 +krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, + unsigned char *buf, int *direction, s32 *seqnum) +{ + struct crypto_blkcipher *cipher; + unsigned char plain[8]; + s32 code; + + dprintk("RPC: %s:\n", __func__); + cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); + if (code) + goto out; + + code = krb5_decrypt(cipher, cksum, buf, plain, 8); + if (code) + goto out; + + if ((plain[4] != plain[5]) || (plain[4] != plain[6]) + || (plain[4] != plain[7])) { + code = (s32)KG_BAD_SEQ; + goto out; + } + + *direction = plain[4]; + + *seqnum = ((plain[0] << 24) | (plain[1] << 16) | + (plain[2] << 8) | (plain[3])); +out: + crypto_free_blkcipher(cipher); + return code; +} + s32 krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, @@ -73,6 +146,10 @@ krb5_get_seq_num(struct krb5_ctx *kctx, dprintk("RPC: krb5_get_seq_num:\n"); + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) + return krb5_get_rc4_seq_num(kctx, cksum, buf, + direction, seqnum); + if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) return code; diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 97eb91b8c70..6cd930f3678 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -216,6 +216,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: return gss_verify_mic_v1(ctx, message_buffer, read_token); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 383db891c83..2763e3e48db 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -232,9 +232,26 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) return GSS_S_FAILURE; - if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - conflen, - pages)) - return GSS_S_FAILURE; + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { + struct crypto_blkcipher *cipher; + int err; + cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + return GSS_S_FAILURE; + + krb5_rc4_setup_enc_key(kctx, cipher, seq_send); + + err = gss_encrypt_xdr_buf(cipher, buf, + offset + headlen - conflen, pages); + crypto_free_blkcipher(cipher); + if (err) + return GSS_S_FAILURE; + } else { + if (gss_encrypt_xdr_buf(kctx->enc, buf, + offset + headlen - conflen, pages)) + return GSS_S_FAILURE; + } return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } @@ -291,8 +308,37 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) */ crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - (unsigned char *)buf->head[0].iov_base; - if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) - return GSS_S_DEFECTIVE_TOKEN; + + /* + * Need plaintext seqnum to derive encryption key for arcfour-hmac + */ + if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, + ptr + 8, &direction, &seqnum)) + return GSS_S_BAD_SIG; + + if ((kctx->initiate && direction != 0xff) || + (!kctx->initiate && direction != 0)) + return GSS_S_BAD_SIG; + + if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { + struct crypto_blkcipher *cipher; + int err; + + cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(cipher)) + return GSS_S_FAILURE; + + krb5_rc4_setup_enc_key(kctx, cipher, seqnum); + + err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset); + crypto_free_blkcipher(cipher); + if (err) + return GSS_S_DEFECTIVE_TOKEN; + } else { + if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) + return GSS_S_DEFECTIVE_TOKEN; + } if (kctx->gk5e->keyed_cksum) cksumkey = kctx->cksum; @@ -316,14 +362,6 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) /* do sequencing checks */ - if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, - ptr + 8, &direction, &seqnum)) - return GSS_S_BAD_SIG; - - if ((kctx->initiate && direction != 0xff) || - (!kctx->initiate && direction != 0)) - return GSS_S_BAD_SIG; - /* Copy the data back to the right position. XXX: Would probably be * better to copy and encrypt at the same time. */ @@ -521,6 +559,7 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset, BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: return gss_wrap_kerberos_v1(kctx, offset, buf, pages); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: @@ -538,6 +577,7 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: + case ENCTYPE_ARCFOUR_HMAC: return gss_unwrap_kerberos_v1(kctx, offset, buf); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: -- cgit v1.2.3 From fc54a0c65fc8cae6b0355512f0b619c1515e7d7f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 8 Apr 2010 14:25:20 -0400 Subject: gss_krb5: Advertise rc4-hmac enctype support in the rpcsec_gss/krb5 upcall Update the upcall info indicating which Kerberos enctypes the kernel supports Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/gss_krb5_mech.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 54eda5f0c58..7c249a3f9a0 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -742,7 +742,7 @@ static struct gss_api_mech gss_kerberos_mech = { .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, - .gm_upcall_enctypes = "enctypes=18,17,16,3,1,2 ", + .gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ", }; static int __init init_kerberos_module(void) -- cgit v1.2.3 From ee5ebe851ed60206f150d3f189416f9c63245b66 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Apr 2010 16:37:01 -0400 Subject: SUNRPC: Clean up xprt_release() Reviewed-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 42f09ade004..18415cc37c0 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -62,7 +62,6 @@ * Local functions */ static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); -static inline void do_xprt_reserve(struct rpc_task *); static void xprt_connect_status(struct rpc_task *task); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); @@ -935,7 +934,7 @@ void xprt_transmit(struct rpc_task *task) spin_unlock_bh(&xprt->transport_lock); } -static inline void do_xprt_reserve(struct rpc_task *task) +static void xprt_alloc_slot(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -955,6 +954,16 @@ static inline void do_xprt_reserve(struct rpc_task *task) rpc_sleep_on(&xprt->backlog, task, NULL); } +static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) +{ + memset(req, 0, sizeof(*req)); /* mark unused */ + + spin_lock(&xprt->reserve_lock); + list_add(&req->rq_list, &xprt->free); + rpc_wake_up_next(&xprt->backlog); + spin_unlock(&xprt->reserve_lock); +} + /** * xprt_reserve - allocate an RPC request slot * @task: RPC task requesting a slot allocation @@ -968,7 +977,7 @@ void xprt_reserve(struct rpc_task *task) task->tk_status = -EIO; spin_lock(&xprt->reserve_lock); - do_xprt_reserve(task); + xprt_alloc_slot(task); spin_unlock(&xprt->reserve_lock); } @@ -1006,14 +1015,10 @@ void xprt_release(struct rpc_task *task) { struct rpc_xprt *xprt; struct rpc_rqst *req; - int is_bc_request; if (!(req = task->tk_rqstp)) return; - /* Preallocated backchannel request? */ - is_bc_request = bc_prealloc(req); - xprt = req->rq_xprt; rpc_count_iostats(task); spin_lock_bh(&xprt->transport_lock); @@ -1027,21 +1032,16 @@ void xprt_release(struct rpc_task *task) mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); spin_unlock_bh(&xprt->transport_lock); - if (!bc_prealloc(req)) + if (req->rq_buffer) xprt->ops->buf_free(req->rq_buffer); task->tk_rqstp = NULL; if (req->rq_release_snd_buf) req->rq_release_snd_buf(req); dprintk("RPC: %5u release request %p\n", task->tk_pid, req); - if (likely(!is_bc_request)) { - memset(req, 0, sizeof(*req)); /* mark unused */ - - spin_lock(&xprt->reserve_lock); - list_add(&req->rq_list, &xprt->free); - rpc_wake_up_next(&xprt->backlog); - spin_unlock(&xprt->reserve_lock); - } else + if (likely(!bc_prealloc(req))) + xprt_free_slot(xprt, req); + else xprt_free_bc_request(req); } -- cgit v1.2.3 From 19445b99b6d66af661c586c052de23110731a502 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Apr 2010 16:41:10 -0400 Subject: SUNRPC: Cleanup - make rpc_new_task() call rpc_release_calldata on failure Also have it return an ERR_PTR(-ENOMEM) instead of a null pointer. Reviewed-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/clnt.c | 19 ++++--------------- net/sunrpc/sched.c | 13 ++++++++++--- 2 files changed, 14 insertions(+), 18 deletions(-) (limited to 'net') diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 19c9983d536..8c7b5433022 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -556,26 +556,16 @@ static const struct rpc_call_ops rpc_default_ops = { */ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) { - struct rpc_task *task, *ret; + struct rpc_task *task; task = rpc_new_task(task_setup_data); - if (task == NULL) { - rpc_release_calldata(task_setup_data->callback_ops, - task_setup_data->callback_data); - ret = ERR_PTR(-ENOMEM); + if (IS_ERR(task)) goto out; - } - if (task->tk_status != 0) { - ret = ERR_PTR(task->tk_status); - rpc_put_task(task); - goto out; - } atomic_inc(&task->tk_count); rpc_execute(task); - ret = task; out: - return ret; + return task; } EXPORT_SYMBOL_GPL(rpc_run_task); @@ -657,9 +647,8 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, * Create an rpc_task to send the data */ task = rpc_new_task(&task_setup_data); - if (!task) { + if (IS_ERR(task)) { xprt_free_bc_request(req); - task = ERR_PTR(-ENOMEM); goto out; } task->tk_rqstp = req; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index aae6907fd54..c8979ce5d88 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -856,16 +856,23 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data) if (task == NULL) { task = rpc_alloc_task(); - if (task == NULL) - goto out; + if (task == NULL) { + rpc_release_calldata(setup_data->callback_ops, + setup_data->callback_data); + return ERR_PTR(-ENOMEM); + } flags = RPC_TASK_DYNAMIC; } rpc_init_task(task, setup_data); + if (task->tk_status < 0) { + int err = task->tk_status; + rpc_put_task(task); + return ERR_PTR(err); + } task->tk_flags |= flags; dprintk("RPC: allocated task %p\n", task); -out: return task; } -- cgit v1.2.3 From 0b9e79431377df452348e78262dd5a3dc359eeef Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Apr 2010 16:41:57 -0400 Subject: SUNRPC: Move the test for XPRT_CONNECTING into xprt_connect() This fixes a bug with setting xprt->stat.connect_start. Reviewed-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 6 +++++- net/sunrpc/xprtrdma/transport.c | 28 +++++++++++++--------------- net/sunrpc/xprtsock.c | 15 +-------------- 3 files changed, 19 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 18415cc37c0..c71d835165e 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -712,10 +712,14 @@ void xprt_connect(struct rpc_task *task) task->tk_timeout = xprt->connect_timeout; rpc_sleep_on(&xprt->pending, task, xprt_connect_status); + + if (test_bit(XPRT_CLOSING, &xprt->state)) + return; + if (xprt_test_and_set_connecting(xprt)) + return; xprt->stat.connect_start = jiffies; xprt->ops->connect(task); } - return; } static void xprt_connect_status(struct rpc_task *task) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 187257b1d88..0607b9aaae9 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -449,21 +449,19 @@ xprt_rdma_connect(struct rpc_task *task) struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); - if (!xprt_test_and_set_connecting(xprt)) { - if (r_xprt->rx_ep.rep_connected != 0) { - /* Reconnect */ - schedule_delayed_work(&r_xprt->rdma_connect, - xprt->reestablish_timeout); - xprt->reestablish_timeout <<= 1; - if (xprt->reestablish_timeout > (30 * HZ)) - xprt->reestablish_timeout = (30 * HZ); - else if (xprt->reestablish_timeout < (5 * HZ)) - xprt->reestablish_timeout = (5 * HZ); - } else { - schedule_delayed_work(&r_xprt->rdma_connect, 0); - if (!RPC_IS_ASYNC(task)) - flush_scheduled_work(); - } + if (r_xprt->rx_ep.rep_connected != 0) { + /* Reconnect */ + schedule_delayed_work(&r_xprt->rdma_connect, + xprt->reestablish_timeout); + xprt->reestablish_timeout <<= 1; + if (xprt->reestablish_timeout > (30 * HZ)) + xprt->reestablish_timeout = (30 * HZ); + else if (xprt->reestablish_timeout < (5 * HZ)) + xprt->reestablish_timeout = (5 * HZ); + } else { + schedule_delayed_work(&r_xprt->rdma_connect, 0); + if (!RPC_IS_ASYNC(task)) + flush_scheduled_work(); } } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9847c30b500..d138afa3bb3 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2016,9 +2016,6 @@ static void xs_connect(struct rpc_task *task) struct rpc_xprt *xprt = task->tk_xprt; struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); - if (xprt_test_and_set_connecting(xprt)) - return; - if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { dprintk("RPC: xs_connect delayed xprt %p for %lu " "seconds\n", @@ -2038,16 +2035,6 @@ static void xs_connect(struct rpc_task *task) } } -static void xs_tcp_connect(struct rpc_task *task) -{ - struct rpc_xprt *xprt = task->tk_xprt; - - /* Exit if we need to wait for socket shutdown to complete */ - if (test_bit(XPRT_CLOSING, &xprt->state)) - return; - xs_connect(task); -} - /** * xs_udp_print_stats - display UDP socket-specifc stats * @xprt: rpc_xprt struct containing statistics @@ -2246,7 +2233,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { .release_xprt = xs_tcp_release_xprt, .rpcbind = rpcb_getport_async, .set_port = xs_set_port, - .connect = xs_tcp_connect, + .connect = xs_connect, .buf_alloc = rpc_malloc, .buf_free = rpc_free, .send_request = xs_tcp_send_request, -- cgit v1.2.3 From a8ce4a8f37fef0a09a1e920c2e09f67a80426c7e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 16 Apr 2010 16:42:12 -0400 Subject: SUNRPC: Fail over more quickly on connect errors We should not allow soft tasks to wait for longer than the major timeout period when waiting for a reconnect to occur. Remove the field xprt->connect_timeout since it has been obsoleted by xprt->reestablish_timeout. Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 2 +- net/sunrpc/xprtrdma/transport.c | 1 - net/sunrpc/xprtsock.c | 17 ----------------- 3 files changed, 1 insertion(+), 19 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index c71d835165e..6c9997ef386 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -710,7 +710,7 @@ void xprt_connect(struct rpc_task *task) if (task->tk_rqstp) task->tk_rqstp->rq_bytes_sent = 0; - task->tk_timeout = xprt->connect_timeout; + task->tk_timeout = task->tk_rqstp->rq_timeout; rpc_sleep_on(&xprt->pending, task, xprt_connect_status); if (test_bit(XPRT_CLOSING, &xprt->state)) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 0607b9aaae9..3f3b38c5642 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -305,7 +305,6 @@ xprt_setup_rdma(struct xprt_create *args) /* 60 second timeout, no retries */ xprt->timeout = &xprt_rdma_default_timeout; xprt->bind_timeout = (60U * HZ); - xprt->connect_timeout = (60U * HZ); xprt->reestablish_timeout = (5U * HZ); xprt->idle_timeout = (5U * 60 * HZ); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d138afa3bb3..790a8f31b0b 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -137,20 +137,6 @@ static ctl_table sunrpc_table[] = { #endif -/* - * Time out for an RPC UDP socket connect. UDP socket connects are - * synchronous, but we set a timeout anyway in case of resource - * exhaustion on the local host. - */ -#define XS_UDP_CONN_TO (5U * HZ) - -/* - * Wait duration for an RPC TCP connection to be established. Solaris - * NFS over TCP uses 60 seconds, for example, which is in line with how - * long a server takes to reboot. - */ -#define XS_TCP_CONN_TO (60U * HZ) - /* * Wait duration for a reply from the RPC portmapper. */ @@ -2324,7 +2310,6 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); xprt->bind_timeout = XS_BIND_TO; - xprt->connect_timeout = XS_UDP_CONN_TO; xprt->reestablish_timeout = XS_UDP_REEST_TO; xprt->idle_timeout = XS_IDLE_DISC_TO; @@ -2399,7 +2384,6 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; xprt->bind_timeout = XS_BIND_TO; - xprt->connect_timeout = XS_TCP_CONN_TO; xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; xprt->idle_timeout = XS_IDLE_DISC_TO; @@ -2475,7 +2459,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) /* backchannel */ xprt_set_bound(xprt); xprt->bind_timeout = 0; - xprt->connect_timeout = 0; xprt->reestablish_timeout = 0; xprt->idle_timeout = 0; -- cgit v1.2.3 From bbc72cea58f671665b6362be0d4e391813ac0eee Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 7 May 2010 13:34:27 -0400 Subject: SUNRPC: RPC metrics and RTT estimator should use same RTT value Compute an RPC request's RTT once, and use that value both for reporting RPC metrics, and for adjusting the RTT context used by the RPC client's RTT estimator algorithm. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/xprt.c | 13 ++++--------- net/sunrpc/xprtsock.c | 1 - 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 6c9997ef386..698c6271229 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -774,12 +774,7 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) } EXPORT_SYMBOL_GPL(xprt_lookup_rqst); -/** - * xprt_update_rtt - update an RPC client's RTT state after receiving a reply - * @task: RPC request that recently completed - * - */ -void xprt_update_rtt(struct rpc_task *task) +static void xprt_update_rtt(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_rtt *rtt = task->tk_client->cl_rtt; @@ -787,12 +782,10 @@ void xprt_update_rtt(struct rpc_task *task) if (timer) { if (req->rq_ntrans == 1) - rpc_update_rtt(rtt, timer, - (long)jiffies - req->rq_xtime); + rpc_update_rtt(rtt, timer, task->tk_rtt); rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); } } -EXPORT_SYMBOL_GPL(xprt_update_rtt); /** * xprt_complete_rqst - called when reply processing is complete @@ -811,6 +804,8 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) xprt->stat.recvs++; task->tk_rtt = (long)jiffies - req->rq_xtime; + if (xprt->ops->timer != NULL) + xprt_update_rtt(task); list_del_init(&req->rq_list); req->rq_private_buf.len = copied; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 790a8f31b0b..3d1dcdf2aef 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -844,7 +844,6 @@ static void xs_udp_data_ready(struct sock *sk, int len) dst_confirm(skb_dst(skb)); xprt_adjust_cwnd(task, copied); - xprt_update_rtt(task); xprt_complete_rqst(task, copied); out_unlock: -- cgit v1.2.3 From ff8399709e41bf72b4cb145612a0f9a9f7283c83 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 7 May 2010 13:34:47 -0400 Subject: SUNRPC: Replace jiffies-based metrics with ktime-based metrics Currently RPC performance metrics that tabulate elapsed time use jiffies time values. This is problematic on systems that use slow jiffies (for instance 100HZ systems built for paravirtualized environments). It is also a problem for computing precise latency statistics for advanced network transports, such as InfiniBand, that can have round-trip latencies significanly faster than a single clock tick. For the RPC client, adopt the high resolution time stamp mechanism already used by the network layer and blktrace: ktime. We use ktime format time stamps for all internal computations, and convert to milliseconds for presentation. As a result, we need only addition operations in the performance critical paths; multiply/divide is required only for presentation. We could report RTT metrics in microseconds. In fact the mountstats format is versioned to accomodate exactly this kind of interface improvement. For now, however, we'll stay with millisecond precision for presentation to maintain backwards compatibility with the handful of currently deployed user space tools. At a later point, we'll move to an API such as BDI_STATS where a finer timestamp precision can be reported. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/sched.c | 2 +- net/sunrpc/stats.c | 27 +++++++++------------------ net/sunrpc/xprt.c | 8 +++++--- 3 files changed, 15 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index c8979ce5d88..aa7b07ef5d5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -834,7 +834,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta } /* starting timestamp */ - task->tk_start = jiffies; + task->tk_start = ktime_get(); dprintk("RPC: new task initialized, procpid %u\n", task_pid_nr(current)); diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 5785d2037f4..aacd95f0dce 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -144,7 +144,7 @@ void rpc_count_iostats(struct rpc_task *task) struct rpc_rqst *req = task->tk_rqstp; struct rpc_iostats *stats; struct rpc_iostats *op_metrics; - long rtt, execute, queue; + ktime_t delta; if (!task->tk_client || !task->tk_client->cl_metrics || !req) return; @@ -159,20 +159,13 @@ void rpc_count_iostats(struct rpc_task *task) op_metrics->om_bytes_sent += task->tk_bytes_sent; op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd; - queue = (long)req->rq_xtime - task->tk_start; - if (queue < 0) - queue = -queue; - op_metrics->om_queue += queue; + delta = ktime_sub(req->rq_xtime, task->tk_start); + op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta); - rtt = task->tk_rtt; - if (rtt < 0) - rtt = -rtt; - op_metrics->om_rtt += rtt; + op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, task->tk_rtt); - execute = (long)jiffies - task->tk_start; - if (execute < 0) - execute = -execute; - op_metrics->om_execute += execute; + delta = ktime_sub(ktime_get(), task->tk_start); + op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta); } static void _print_name(struct seq_file *seq, unsigned int op, @@ -186,8 +179,6 @@ static void _print_name(struct seq_file *seq, unsigned int op, seq_printf(seq, "\t%12u: ", op); } -#define MILLISECS_PER_JIFFY (1000 / HZ) - void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) { struct rpc_iostats *stats = clnt->cl_metrics; @@ -214,9 +205,9 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) metrics->om_timeouts, metrics->om_bytes_sent, metrics->om_bytes_recv, - metrics->om_queue * MILLISECS_PER_JIFFY, - metrics->om_rtt * MILLISECS_PER_JIFFY, - metrics->om_execute * MILLISECS_PER_JIFFY); + ktime_to_ms(metrics->om_queue), + ktime_to_ms(metrics->om_rtt), + ktime_to_ms(metrics->om_execute)); } } EXPORT_SYMBOL_GPL(rpc_print_iostats); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 698c6271229..8986b1b8286 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -779,10 +780,11 @@ static void xprt_update_rtt(struct rpc_task *task) struct rpc_rqst *req = task->tk_rqstp; struct rpc_rtt *rtt = task->tk_client->cl_rtt; unsigned timer = task->tk_msg.rpc_proc->p_timer; + long m = usecs_to_jiffies(ktime_to_us(task->tk_rtt)); if (timer) { if (req->rq_ntrans == 1) - rpc_update_rtt(rtt, timer, task->tk_rtt); + rpc_update_rtt(rtt, timer, m); rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); } } @@ -803,7 +805,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) task->tk_pid, ntohl(req->rq_xid), copied); xprt->stat.recvs++; - task->tk_rtt = (long)jiffies - req->rq_xtime; + task->tk_rtt = ktime_sub(ktime_get(), req->rq_xtime); if (xprt->ops->timer != NULL) xprt_update_rtt(task); @@ -904,7 +906,7 @@ void xprt_transmit(struct rpc_task *task) return; req->rq_connect_cookie = xprt->connect_cookie; - req->rq_xtime = jiffies; + req->rq_xtime = ktime_get(); status = xprt->ops->send_request(task); if (status != 0) { task->tk_status = status; -- cgit v1.2.3 From 712a4338669d7d57f952244abb608e6ac07e39da Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 12 May 2010 17:50:23 -0400 Subject: SUNRPC: Fix xs_setup_bc_tcp() It is a BUG for anybody to call this function without setting args->bc_xprt. Trying to return an error value is just wrong, since the user cannot fix this: it is a programming error, not a user error. Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 3d1dcdf2aef..beefa7a3a90 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2442,9 +2442,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) struct sock_xprt *transport; struct svc_sock *bc_sock; - if (!args->bc_xprt) - ERR_PTR(-EINVAL); - xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); if (IS_ERR(xprt)) return xprt; -- cgit v1.2.3 From 1f4c86c0be9064ab4eebd9e67c84606c1cfeec4b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 13 May 2010 12:51:02 -0400 Subject: NFS: Don't use GFP_KERNEL in rpcsec_gss downcalls Again, we can deadlock if the memory reclaim triggers a writeback that requires a rpcsec_gss credential lookup. Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 2 +- net/sunrpc/auth_gss/gss_krb5_keys.c | 9 +++++---- net/sunrpc/auth_gss/gss_krb5_mech.c | 34 ++++++++++++++++++---------------- net/sunrpc/auth_gss/gss_mech_switch.c | 7 ++++--- net/sunrpc/auth_gss/gss_spkm3_mech.c | 5 +++-- net/sunrpc/auth_gss/svcauth_gss.c | 2 +- 6 files changed, 32 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 6654c8534d3..48a7939dc9e 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -229,7 +229,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct p = ERR_PTR(-EFAULT); goto err; } - ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx); + ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, GFP_NOFS); if (ret < 0) { p = ERR_PTR(ret); goto err; diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index 33b87f04b30..76e42e6be75 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -150,7 +150,8 @@ static void krb5_nfold(u32 inbits, const u8 *in, u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, - const struct xdr_netobj *in_constant) + const struct xdr_netobj *in_constant, + gfp_t gfp_mask) { size_t blocksize, keybytes, keylength, n; unsigned char *inblockdata, *outblockdata, *rawkey; @@ -175,15 +176,15 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e, /* allocate and set up buffers */ ret = ENOMEM; - inblockdata = kmalloc(blocksize, GFP_KERNEL); + inblockdata = kmalloc(blocksize, gfp_mask); if (inblockdata == NULL) goto err_free_cipher; - outblockdata = kmalloc(blocksize, GFP_KERNEL); + outblockdata = kmalloc(blocksize, gfp_mask); if (outblockdata == NULL) goto err_free_in; - rawkey = kmalloc(keybytes, GFP_KERNEL); + rawkey = kmalloc(keybytes, gfp_mask); if (rawkey == NULL) goto err_free_out; diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 7c249a3f9a0..03264461052 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -369,7 +369,7 @@ set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed) } static int -context_derive_keys_des3(struct krb5_ctx *ctx) +context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) { struct xdr_netobj c, keyin, keyout; u8 cdata[GSS_KRB5_K5CLENGTH]; @@ -396,7 +396,7 @@ context_derive_keys_des3(struct krb5_ctx *ctx) /* derive cksum */ set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM); keyout.data = ctx->cksum; - err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving cksum key\n", __func__, err); @@ -487,7 +487,7 @@ out_err: } static int -context_derive_keys_new(struct krb5_ctx *ctx) +context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) { struct xdr_netobj c, keyin, keyout; u8 cdata[GSS_KRB5_K5CLENGTH]; @@ -503,7 +503,7 @@ context_derive_keys_new(struct krb5_ctx *ctx) /* initiator seal encryption */ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); keyout.data = ctx->initiator_seal; - err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving initiator_seal key\n", __func__, err); @@ -518,7 +518,7 @@ context_derive_keys_new(struct krb5_ctx *ctx) /* acceptor seal encryption */ set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION); keyout.data = ctx->acceptor_seal; - err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving acceptor_seal key\n", __func__, err); @@ -533,7 +533,7 @@ context_derive_keys_new(struct krb5_ctx *ctx) /* initiator sign checksum */ set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM); keyout.data = ctx->initiator_sign; - err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving initiator_sign key\n", __func__, err); @@ -543,7 +543,7 @@ context_derive_keys_new(struct krb5_ctx *ctx) /* acceptor sign checksum */ set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM); keyout.data = ctx->acceptor_sign; - err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving acceptor_sign key\n", __func__, err); @@ -553,7 +553,7 @@ context_derive_keys_new(struct krb5_ctx *ctx) /* initiator seal integrity */ set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY); keyout.data = ctx->initiator_integ; - err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving initiator_integ key\n", __func__, err); @@ -563,7 +563,7 @@ context_derive_keys_new(struct krb5_ctx *ctx) /* acceptor seal integrity */ set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY); keyout.data = ctx->acceptor_integ; - err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c); + err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); if (err) { dprintk("%s: Error %d deriving acceptor_integ key\n", __func__, err); @@ -598,7 +598,8 @@ out_err: } static int -gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) +gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, + gfp_t gfp_mask) { int keylen; @@ -645,7 +646,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) } ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data, - gss_kerberos_mech.gm_oid.len, GFP_KERNEL); + gss_kerberos_mech.gm_oid.len, gfp_mask); if (unlikely(ctx->mech_used.data == NULL)) { p = ERR_PTR(-ENOMEM); goto out_err; @@ -654,12 +655,12 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx) switch (ctx->enctype) { case ENCTYPE_DES3_CBC_RAW: - return context_derive_keys_des3(ctx); + return context_derive_keys_des3(ctx, gfp_mask); case ENCTYPE_ARCFOUR_HMAC: return context_derive_keys_rc4(ctx); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: - return context_derive_keys_new(ctx); + return context_derive_keys_new(ctx, gfp_mask); default: return -EINVAL; } @@ -670,20 +671,21 @@ out_err: static int gss_import_sec_context_kerberos(const void *p, size_t len, - struct gss_ctx *ctx_id) + struct gss_ctx *ctx_id, + gfp_t gfp_mask) { const void *end = (const void *)((const char *)p + len); struct krb5_ctx *ctx; int ret; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = kzalloc(sizeof(*ctx), gfp_mask); if (ctx == NULL) return -ENOMEM; if (len == 85) ret = gss_import_v1_context(p, end, ctx); else - ret = gss_import_v2_context(p, end, ctx); + ret = gss_import_v2_context(p, end, ctx, gfp_mask); if (ret == 0) ctx_id->internal_ctx_id = ctx; diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 28a84ef41d1..2689de39dc7 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -249,14 +249,15 @@ EXPORT_SYMBOL_GPL(gss_mech_put); int gss_import_sec_context(const void *input_token, size_t bufsize, struct gss_api_mech *mech, - struct gss_ctx **ctx_id) + struct gss_ctx **ctx_id, + gfp_t gfp_mask) { - if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) + if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask))) return -ENOMEM; (*ctx_id)->mech_type = gss_mech_get(mech); return mech->gm_ops - ->gss_import_sec_context(input_token, bufsize, *ctx_id); + ->gss_import_sec_context(input_token, bufsize, *ctx_id, gfp_mask); } /* gss_get_mic: compute a mic over message and return mic_token. */ diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 035e1dd6af1..dc3f1f5ed86 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -84,13 +84,14 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) static int gss_import_sec_context_spkm3(const void *p, size_t len, - struct gss_ctx *ctx_id) + struct gss_ctx *ctx_id, + gfp_t gfp_mask) { const void *end = (const void *)((const char *)p + len); struct spkm3_ctx *ctx; int version; - if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) + if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask))) goto out_err; p = simple_get_bytes(p, end, &version, sizeof(version)); diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 1d9ac4ac818..cc385b3a59c 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -494,7 +494,7 @@ static int rsc_parse(struct cache_detail *cd, len = qword_get(&mesg, buf, mlen); if (len < 0) goto out; - status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); + status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, GFP_KERNEL); if (status) goto out; -- cgit v1.2.3 From d300a41ef1c39cc5e6b90fd8834ea7ab16b5c48f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 13 May 2010 12:51:03 -0400 Subject: SUNRPC: Dont run rpcauth_cache_shrinker() when gfp_mask is GFP_NOFS Under some circumstances, put_rpccred() can end up allocating memory, so check the gfp_mask to prevent deadlocks. Signed-off-by: Trond Myklebust --- net/sunrpc/auth.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 95afe79dd9d..0667a36aee7 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -270,6 +270,8 @@ rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) LIST_HEAD(free); int res; + if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) + return (nr_to_scan == 0) ? 0 : -1; if (list_empty(&cred_unused)) return 0; spin_lock(&rpc_credcache_lock); -- cgit v1.2.3 From 93a05e65c090dda9cbd79d0cf57b65c4dbd8da55 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 13 May 2010 12:51:06 -0400 Subject: SUNRPC: Ensure memory shrinker doesn't waste time in rpcauth_prune_expired() The 'cred_unused' list, that is traversed by rpcauth_cache_shrinker is ordered by time. If we hit a credential that is under the 60 second garbage collection moratorium, we should exit because we know at that point that all successive credentials are subject to the same moratorium... Signed-off-by: Trond Myklebust --- net/sunrpc/auth.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 0667a36aee7..c40856f589f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -236,10 +236,13 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { - /* Enforce a 60 second garbage collection moratorium */ + /* + * Enforce a 60 second garbage collection moratorium + * Note that the cred_unused list must be time-ordered. + */ if (time_in_range(cred->cr_expire, expired, jiffies) && test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) - continue; + return 0; list_del_init(&cred->cr_lru); number_cred_unused--; @@ -258,7 +261,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) if (nr_to_scan == 0) break; } - return nr_to_scan; + return (number_cred_unused / 100) * sysctl_vfs_cache_pressure; } /* @@ -275,8 +278,7 @@ rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) if (list_empty(&cred_unused)) return 0; spin_lock(&rpc_credcache_lock); - nr_to_scan = rpcauth_prune_expired(&free, nr_to_scan); - res = (number_cred_unused / 100) * sysctl_vfs_cache_pressure; + res = rpcauth_prune_expired(&free, nr_to_scan); spin_unlock(&rpc_credcache_lock); rpcauth_destroy_credlist(&free); return res; -- cgit v1.2.3 From 20673406534176ead9b984a84b662928110f77b1 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 13 May 2010 12:51:06 -0400 Subject: SUNRPC: Ensure rpcauth_prune_expired() respects the nr_to_scan parameter Signed-off-by: Trond Myklebust --- net/sunrpc/auth.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index c40856f589f..73affb8624f 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -236,6 +236,8 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { + if (nr_to_scan-- == 0) + break; /* * Enforce a 60 second garbage collection moratorium * Note that the cred_unused list must be time-ordered. @@ -255,11 +257,8 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) get_rpccred(cred); list_add_tail(&cred->cr_lru, free); rpcauth_unhash_cred_locked(cred); - nr_to_scan--; } spin_unlock(cache_lock); - if (nr_to_scan == 0) - break; } return (number_cred_unused / 100) * sysctl_vfs_cache_pressure; } -- cgit v1.2.3 From d60dbb20a74c2cfa142be0a34dac3c6547ea086c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 13 May 2010 12:51:49 -0400 Subject: SUNRPC: Move the task->tk_bytes_sent and tk_rtt to struct rpc_rqst It seems strange to maintain stats for bytes_sent in one structure, and bytes received in another. Try to assemble all the RPC request-related stats in struct rpc_rqst Signed-off-by: Trond Myklebust --- net/sunrpc/stats.c | 4 ++-- net/sunrpc/xprt.c | 4 ++-- net/sunrpc/xprtrdma/transport.c | 2 +- net/sunrpc/xprtsock.c | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index aacd95f0dce..ea1046f3f9a 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -156,13 +156,13 @@ void rpc_count_iostats(struct rpc_task *task) op_metrics->om_ntrans += req->rq_ntrans; op_metrics->om_timeouts += task->tk_timeouts; - op_metrics->om_bytes_sent += task->tk_bytes_sent; + op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent; op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd; delta = ktime_sub(req->rq_xtime, task->tk_start); op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta); - op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, task->tk_rtt); + op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt); delta = ktime_sub(ktime_get(), task->tk_start); op_metrics->om_execute = ktime_add(op_metrics->om_execute, delta); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 8986b1b8286..65fe2e4e7cb 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -780,7 +780,7 @@ static void xprt_update_rtt(struct rpc_task *task) struct rpc_rqst *req = task->tk_rqstp; struct rpc_rtt *rtt = task->tk_client->cl_rtt; unsigned timer = task->tk_msg.rpc_proc->p_timer; - long m = usecs_to_jiffies(ktime_to_us(task->tk_rtt)); + long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); if (timer) { if (req->rq_ntrans == 1) @@ -805,7 +805,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied) task->tk_pid, ntohl(req->rq_xid), copied); xprt->stat.recvs++; - task->tk_rtt = ktime_sub(ktime_get(), req->rq_xtime); + req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime); if (xprt->ops->timer != NULL) xprt_update_rtt(task); diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 3f3b38c5642..a85e866a77f 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -674,7 +674,7 @@ xprt_rdma_send_request(struct rpc_task *task) if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) goto drop_connection; - task->tk_bytes_sent += rqst->rq_snd_buf.len; + rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len; rqst->rq_bytes_sent = 0; return 0; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index beefa7a3a90..02fc7f04dd1 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -528,7 +528,7 @@ static int xs_udp_send_request(struct rpc_task *task) xdr->len - req->rq_bytes_sent, status); if (status >= 0) { - task->tk_bytes_sent += status; + req->rq_xmit_bytes_sent += status; if (status >= req->rq_slen) return 0; /* Still some bytes left; set up for a retry later. */ @@ -624,7 +624,7 @@ static int xs_tcp_send_request(struct rpc_task *task) /* If we've sent the entire packet, immediately * reset the count of bytes sent. */ req->rq_bytes_sent += status; - task->tk_bytes_sent += status; + req->rq_xmit_bytes_sent += status; if (likely(req->rq_bytes_sent >= req->rq_slen)) { req->rq_bytes_sent = 0; return 0; -- cgit v1.2.3 From d72b6cec8d42eb7c2a249b613abf2c2b7a6eeb47 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 13 May 2010 12:51:50 -0400 Subject: SUNRPC: Remove the 'tk_magic' debugging field It has not triggered in almost a decade. Time to get rid of it... Signed-off-by: Trond Myklebust --- net/sunrpc/sched.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'net') diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index aa7b07ef5d5..4a843b883b8 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -25,7 +25,6 @@ #ifdef RPC_DEBUG #define RPCDBG_FACILITY RPCDBG_SCHED -#define RPC_TASK_MAGIC_ID 0xf00baa #endif /* @@ -237,7 +236,6 @@ static void rpc_task_set_debuginfo(struct rpc_task *task) { static atomic_t rpc_pid; - task->tk_magic = RPC_TASK_MAGIC_ID; task->tk_pid = atomic_inc_return(&rpc_pid); } #else @@ -360,9 +358,6 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", task->tk_pid, jiffies); -#ifdef RPC_DEBUG - BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); -#endif /* Has the task been executed yet? If not, we cannot wake it up! */ if (!RPC_IS_ACTIVATED(task)) { printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); @@ -916,9 +911,6 @@ EXPORT_SYMBOL_GPL(rpc_put_task); static void rpc_release_task(struct rpc_task *task) { -#ifdef RPC_DEBUG - BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); -#endif dprintk("RPC: %5u release task\n", task->tk_pid); if (!list_empty(&task->tk_task)) { @@ -930,9 +922,6 @@ static void rpc_release_task(struct rpc_task *task) } BUG_ON (RPC_IS_QUEUED(task)); -#ifdef RPC_DEBUG - task->tk_magic = 0; -#endif /* Wake up anyone who is waiting for task completion */ rpc_mark_complete_task(task); -- cgit v1.2.3 From 126e216a8730532dfb685205309275f87e3d133e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 13 May 2010 12:55:38 -0400 Subject: SUNRPC: Don't spam gssd with upcall requests when the kerberos key expired Now that the rpc.gssd daemon can explicitly tell us that the key expired, we should cache that information to avoid spamming gssd. Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 65 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 48a7939dc9e..8da2a0e6857 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -57,6 +57,9 @@ static const struct rpc_authops authgss_ops; static const struct rpc_credops gss_credops; static const struct rpc_credops gss_nullops; +#define GSS_RETRY_EXPIRED 5 +static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; + #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif @@ -349,6 +352,24 @@ gss_unhash_msg(struct gss_upcall_msg *gss_msg) spin_unlock(&inode->i_lock); } +static void +gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) +{ + switch (gss_msg->msg.errno) { + case 0: + if (gss_msg->ctx == NULL) + break; + clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); + gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); + break; + case -EKEYEXPIRED: + set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); + } + gss_cred->gc_upcall_timestamp = jiffies; + gss_cred->gc_upcall = NULL; + rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); +} + static void gss_upcall_callback(struct rpc_task *task) { @@ -358,13 +379,9 @@ gss_upcall_callback(struct rpc_task *task) struct inode *inode = &gss_msg->inode->vfs_inode; spin_lock(&inode->i_lock); - if (gss_msg->ctx) - gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx); - else - task->tk_status = gss_msg->msg.errno; - gss_cred->gc_upcall = NULL; - rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); + gss_handle_downcall_result(gss_cred, gss_msg); spin_unlock(&inode->i_lock); + task->tk_status = gss_msg->msg.errno; gss_release_msg(gss_msg); } @@ -513,18 +530,16 @@ gss_refresh_upcall(struct rpc_task *task) spin_lock(&inode->i_lock); if (gss_cred->gc_upcall != NULL) rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); - else if (gss_msg->ctx != NULL) { - gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx); - gss_cred->gc_upcall = NULL; - rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); - } else if (gss_msg->msg.errno >= 0) { + else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { task->tk_timeout = 0; gss_cred->gc_upcall = gss_msg; /* gss_upcall_callback will release the reference to gss_upcall_msg */ atomic_inc(&gss_msg->count); rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); - } else + } else { + gss_handle_downcall_result(gss_cred, gss_msg); err = gss_msg->msg.errno; + } spin_unlock(&inode->i_lock); gss_release_msg(gss_msg); out: @@ -1123,6 +1138,23 @@ static int gss_renew_cred(struct rpc_task *task) return 0; } +static int gss_cred_is_negative_entry(struct rpc_cred *cred) +{ + if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { + unsigned long now = jiffies; + unsigned long begin, expire; + struct gss_cred *gss_cred; + + gss_cred = container_of(cred, struct gss_cred, gc_base); + begin = gss_cred->gc_upcall_timestamp; + expire = begin + gss_expired_cred_retry_delay * HZ; + + if (time_in_range_open(now, begin, expire)) + return 1; + } + return 0; +} + /* * Refresh credentials. XXX - finish */ @@ -1132,6 +1164,9 @@ gss_refresh(struct rpc_task *task) struct rpc_cred *cred = task->tk_msg.rpc_cred; int ret = 0; + if (gss_cred_is_negative_entry(cred)) + return -EKEYEXPIRED; + if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { ret = gss_renew_cred(task); @@ -1585,5 +1620,11 @@ static void __exit exit_rpcsec_gss(void) } MODULE_LICENSE("GPL"); +module_param_named(expired_cred_retry_delay, + gss_expired_cred_retry_delay, + uint, 0644); +MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " + "the RPC engine retries an expired credential"); + module_init(init_rpcsec_gss) module_exit(exit_rpcsec_gss) -- cgit v1.2.3