diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2020-04-18 14:38:19 -0400 |
---|---|---|
committer | Chuck Lever <chuck.lever@oracle.com> | 2020-04-27 10:58:30 -0400 |
commit | a7e429a6fa6d612d1dacde96c885dc1bb4a9f400 (patch) | |
tree | 6806da98b5a7441ae94f7e70da7e34d5c2d299f7 /net/sunrpc/auth_gss/gss_krb5_wrap.c | |
parent | 31c9590ae468478fe47dc0f5f0d3562b2f69450e (diff) |
SUNRPC: Fix GSS privacy computation of auth->au_ralign
When the au_ralign field was added to gss_unwrap_resp_priv, the
wrong calculation was used. Setting au_rslack == au_ralign is
probably correct for kerberos_v1 privacy, but kerberos_v2 privacy
adds additional GSS data after the clear text RPC message.
au_ralign needs to be smaller than au_rslack in that fairly common
case.
When xdr_buf_trim() is restored to gss_unwrap_kerberos_v2(), it does
exactly what I feared it would: it trims off part of the clear text
RPC message. However, that's because rpc_prepare_reply_pages() does
not set up the rq_rcv_buf's tail correctly because au_ralign is too
large.
Fixing the au_ralign computation also corrects the alignment of
rq_rcv_buf->pages so that the client does not have to shift reply
data payloads after they are received.
Fixes: 35e77d21baa0 ("SUNRPC: Add rpc_auth::au_ralign field")
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/auth_gss/gss_krb5_wrap.c')
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_wrap.c | 19 |
1 files changed, 15 insertions, 4 deletions
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index c7589e35d5d9..4905652e7567 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -262,7 +262,8 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, static u32 gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len, - struct xdr_buf *buf) + struct xdr_buf *buf, unsigned int *slack, + unsigned int *align) { int signalg; int sealalg; @@ -280,6 +281,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len, u32 conflen = kctx->gk5e->conflen; int crypt_offset; u8 *cksumkey; + unsigned int saved_len = buf->len; dprintk("RPC: gss_unwrap_kerberos\n"); @@ -383,6 +385,10 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len, if (gss_krb5_remove_padding(buf, blocksize)) return GSS_S_DEFECTIVE_TOKEN; + /* slack must include room for krb5 padding */ + *slack = XDR_QUADLEN(saved_len - buf->len); + /* The GSS blob always precedes the RPC message payload */ + *align = *slack; return GSS_S_COMPLETE; } @@ -489,7 +495,8 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, static u32 gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len, - struct xdr_buf *buf) + struct xdr_buf *buf, unsigned int *slack, + unsigned int *align) { time64_t now; u8 *ptr; @@ -583,6 +590,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len, /* Trim off the trailing "extra count" and checksum blob */ buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip; + *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip); + *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip); return GSS_S_COMPLETE; } @@ -617,9 +626,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: - return gss_unwrap_kerberos_v1(kctx, offset, len, buf); + return gss_unwrap_kerberos_v1(kctx, offset, len, buf, + &gctx->slack, &gctx->align); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: - return gss_unwrap_kerberos_v2(kctx, offset, len, buf); + return gss_unwrap_kerberos_v2(kctx, offset, len, buf, + &gctx->slack, &gctx->align); } } |