diff options
Diffstat (limited to 'drivers/crypto/nx/nx.c')
-rw-r--r-- | drivers/crypto/nx/nx.c | 35 |
1 files changed, 28 insertions, 7 deletions
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index bbdab6e5ccf0..5533fe31c90d 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, do { rc = vio_h_cop_sync(viodev, op); - } while ((rc == -EBUSY && !may_sleep && retries--) || - (rc == -EBUSY && may_sleep && cond_resched())); + } while (rc == -EBUSY && !may_sleep && retries--); if (rc) { dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " @@ -114,13 +113,29 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, * have been described (or @sgmax elements have been written), the * loop ends. min_t is used to ensure @end_addr falls on the same page * as sg_addr, if not, we need to create another nx_sg element for the - * data on the next page */ + * data on the next page. + * + * Also when using vmalloc'ed data, every time that a system page + * boundary is crossed the physical address needs to be re-calculated. + */ for (sg = sg_head; sg_len < len; sg++) { + u64 next_page; + sg->addr = sg_addr; - sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr); - sg->len = sg_addr - sg->addr; + sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), + end_addr); + + next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; + sg->len = min_t(u64, sg_addr, next_page) - sg->addr; sg_len += sg->len; + if (sg_addr >= next_page && + is_vmalloc_addr(start_addr + sg_len)) { + sg_addr = page_to_phys(vmalloc_to_page( + start_addr + sg_len)); + end_addr = sg_addr + len - sg_len; + } + if ((sg - sg_head) == sgmax) { pr_err("nx: scatter/gather list overflow, pid: %d\n", current->pid); @@ -196,6 +211,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, * @dst: destination scatterlist * @src: source scatterlist * @nbytes: length of data described in the scatterlists + * @offset: number of bytes to fast-forward past at the beginning of + * scatterlists. * @iv: destination for the iv data, if the algorithm requires it * * This is common code shared by all the AES algorithms. It uses the block @@ -207,6 +224,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, + unsigned int offset, u8 *iv) { struct nx_sg *nx_insg = nx_ctx->in_sg; @@ -215,8 +233,10 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, if (iv) memcpy(iv, desc->info, AES_BLOCK_SIZE); - nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); - nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); + nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, + offset, nbytes); + nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, + offset, nbytes); /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear @@ -235,6 +255,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, */ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) { + spin_lock_init(&nx_ctx->lock); memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; |