diff options
author | Jiri Kosina <jkosina@suse.cz> | 2013-12-19 15:08:03 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2013-12-19 15:08:32 +0100 |
commit | e23c34bb41da65f354fb7eee04300c56ee48f60c (patch) | |
tree | 549fbe449d55273b81ef104a9755109bf4ae7817 /drivers/crypto/omap-aes.c | |
parent | b481c2cb3534c85dca625973b33eba15f9af3e4c (diff) | |
parent | 319e2e3f63c348a9b66db4667efa73178e18b17d (diff) |
Merge branch 'master' into for-next
Sync with Linus' tree to be able to apply fixes on top of newer things
in tree (efi-stub).
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'drivers/crypto/omap-aes.c')
-rw-r--r-- | drivers/crypto/omap-aes.c | 470 |
1 files changed, 262 insertions, 208 deletions
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 5f7980586850..a9ccbf14096e 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -13,7 +13,9 @@ * */ -#define pr_fmt(fmt) "%s: " fmt, __func__ +#define pr_fmt(fmt) "%20s: " fmt, __func__ +#define prn(num) pr_debug(#num "=%d\n", num) +#define prx(num) pr_debug(#num "=%x\n", num) #include <linux/err.h> #include <linux/module.h> @@ -38,6 +40,8 @@ #define DST_MAXBURST 4 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) +#define _calc_walked(inout) (dd->inout##_walk.offset - dd->inout##_sg->offset) + /* OMAP TRM gives bitfields as start:end, where start is the higher bit number. For example 7:0 */ #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) @@ -74,6 +78,10 @@ #define AES_REG_LENGTH_N(x) (0x54 + ((x) * 0x04)) +#define AES_REG_IRQ_STATUS(dd) ((dd)->pdata->irq_status_ofs) +#define AES_REG_IRQ_ENABLE(dd) ((dd)->pdata->irq_enable_ofs) +#define AES_REG_IRQ_DATA_IN BIT(1) +#define AES_REG_IRQ_DATA_OUT BIT(2) #define DEFAULT_TIMEOUT (5*HZ) #define FLAGS_MODE_MASK 0x000f @@ -86,6 +94,8 @@ #define FLAGS_FAST BIT(5) #define FLAGS_BUSY BIT(6) +#define AES_BLOCK_WORDS (AES_BLOCK_SIZE >> 2) + struct omap_aes_ctx { struct omap_aes_dev *dd; @@ -119,6 +129,8 @@ struct omap_aes_pdata { u32 data_ofs; u32 rev_ofs; u32 mask_ofs; + u32 irq_enable_ofs; + u32 irq_status_ofs; u32 dma_enable_in; u32 dma_enable_out; @@ -146,25 +158,32 @@ struct omap_aes_dev { struct tasklet_struct queue_task; struct ablkcipher_request *req; + + /* + * total is used by PIO mode for book keeping so introduce + * variable total_save as need it to calc page_order + */ size_t total; + size_t total_save; + struct scatterlist *in_sg; - struct scatterlist in_sgl; - size_t in_offset; struct scatterlist *out_sg; + + /* Buffers for copying for unaligned cases */ + struct scatterlist in_sgl; struct scatterlist out_sgl; - size_t out_offset; + struct scatterlist *orig_out; + int sgs_copied; - size_t buflen; - void *buf_in; - size_t dma_size; + struct scatter_walk in_walk; + struct scatter_walk out_walk; int dma_in; struct dma_chan *dma_lch_in; - dma_addr_t dma_addr_in; - void *buf_out; int dma_out; struct dma_chan *dma_lch_out; - dma_addr_t dma_addr_out; - + int in_sg_len; + int out_sg_len; + int pio_only; const struct omap_aes_pdata *pdata; }; @@ -172,16 +191,36 @@ struct omap_aes_dev { static LIST_HEAD(dev_list); static DEFINE_SPINLOCK(list_lock); +#ifdef DEBUG +#define omap_aes_read(dd, offset) \ +({ \ + int _read_ret; \ + _read_ret = __raw_readl(dd->io_base + offset); \ + pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \ + offset, _read_ret); \ + _read_ret; \ +}) +#else static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) { return __raw_readl(dd->io_base + offset); } +#endif +#ifdef DEBUG +#define omap_aes_write(dd, offset, value) \ + do { \ + pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \ + offset, value); \ + __raw_writel(value, dd->io_base + offset); \ + } while (0) +#else static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, u32 value) { __raw_writel(value, dd->io_base + offset); } +#endif static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, u32 value, u32 mask) @@ -236,7 +275,7 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) if (dd->flags & FLAGS_CBC) val |= AES_REG_CTRL_CBC; if (dd->flags & FLAGS_CTR) { - val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; + val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; } if (dd->flags & FLAGS_ENCRYPT) @@ -323,33 +362,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) dd->dma_lch_out = NULL; dd->dma_lch_in = NULL; - dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); - dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); - dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; - dd->buflen &= ~(AES_BLOCK_SIZE - 1); - - if (!dd->buf_in || !dd->buf_out) { - dev_err(dd->dev, "unable to alloc pages.\n"); - goto err_alloc; - } - - /* MAP here */ - dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, - DMA_TO_DEVICE); - if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { - dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); - err = -EINVAL; - goto err_map_in; - } - - dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, - DMA_FROM_DEVICE); - if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { - dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); - err = -EINVAL; - goto err_map_out; - } - dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -376,14 +388,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) err_dma_out: dma_release_channel(dd->dma_lch_in); err_dma_in: - dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, - DMA_FROM_DEVICE); -err_map_out: - dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); -err_map_in: - free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); - free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); -err_alloc: if (err) pr_err("error: %d\n", err); return err; @@ -393,11 +397,6 @@ static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) { dma_release_channel(dd->dma_lch_out); dma_release_channel(dd->dma_lch_in); - dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, - DMA_FROM_DEVICE); - dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); - free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); - free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); } static void sg_copy_buf(void *buf, struct scatterlist *sg, @@ -414,59 +413,27 @@ static void sg_copy_buf(void *buf, struct scatterlist *sg, scatterwalk_done(&walk, out, 0); } -static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, - size_t buflen, size_t total, int out) -{ - unsigned int count, off = 0; - - while (buflen && total) { - count = min((*sg)->length - *offset, total); - count = min(count, buflen); - - if (!count) - return off; - - /* - * buflen and total are AES_BLOCK_SIZE size aligned, - * so count should be also aligned - */ - - sg_copy_buf(buf + off, *sg, *offset, count, out); - - off += count; - buflen -= count; - *offset += count; - total -= count; - - if (*offset == (*sg)->length) { - *sg = sg_next(*sg); - if (*sg) - *offset = 0; - else - total = 0; - } - } - - return off; -} - static int omap_aes_crypt_dma(struct crypto_tfm *tfm, - struct scatterlist *in_sg, struct scatterlist *out_sg) + struct scatterlist *in_sg, struct scatterlist *out_sg, + int in_sg_len, int out_sg_len) { struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct omap_aes_dev *dd = ctx->dd; struct dma_async_tx_descriptor *tx_in, *tx_out; struct dma_slave_config cfg; - dma_addr_t dma_addr_in = sg_dma_address(in_sg); - int ret, length = sg_dma_len(in_sg); + int ret; - pr_debug("len: %d\n", length); + if (dd->pio_only) { + scatterwalk_start(&dd->in_walk, dd->in_sg); + scatterwalk_start(&dd->out_walk, dd->out_sg); - dd->dma_size = length; + /* Enable DATAIN interrupt and let it take + care of the rest */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); + return 0; + } - if (!(dd->flags & FLAGS_FAST)) - dma_sync_single_for_device(dd->dev, dma_addr_in, length, - DMA_TO_DEVICE); + dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE); memset(&cfg, 0, sizeof(cfg)); @@ -485,7 +452,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, return ret; } - tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, 1, + tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!tx_in) { @@ -504,7 +471,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, return ret; } - tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, 1, + tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!tx_out) { @@ -522,7 +489,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_async_issue_pending(dd->dma_lch_out); /* start DMA */ - dd->pdata->trigger(dd, length); + dd->pdata->trigger(dd, dd->total); return 0; } @@ -531,93 +498,32 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm( crypto_ablkcipher_reqtfm(dd->req)); - int err, fast = 0, in, out; - size_t count; - dma_addr_t addr_in, addr_out; - struct scatterlist *in_sg, *out_sg; - int len32; + int err; pr_debug("total: %d\n", dd->total); - if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { - /* check for alignment */ - in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); - out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); - - fast = in && out; - } - - if (fast) { - count = min(dd->total, sg_dma_len(dd->in_sg)); - count = min(count, sg_dma_len(dd->out_sg)); - - if (count != dd->total) { - pr_err("request length != buffer length\n"); - return -EINVAL; - } - - pr_debug("fast\n"); - - err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + if (!dd->pio_only) { + err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len, + DMA_TO_DEVICE); if (!err) { dev_err(dd->dev, "dma_map_sg() error\n"); return -EINVAL; } - err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); + err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); if (!err) { dev_err(dd->dev, "dma_map_sg() error\n"); - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); return -EINVAL; } - - addr_in = sg_dma_address(dd->in_sg); - addr_out = sg_dma_address(dd->out_sg); - - in_sg = dd->in_sg; - out_sg = dd->out_sg; - - dd->flags |= FLAGS_FAST; - - } else { - /* use cache buffers */ - count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, - dd->buflen, dd->total, 0); - - len32 = DIV_ROUND_UP(count, DMA_MIN) * DMA_MIN; - - /* - * The data going into the AES module has been copied - * to a local buffer and the data coming out will go - * into a local buffer so set up local SG entries for - * both. - */ - sg_init_table(&dd->in_sgl, 1); - dd->in_sgl.offset = dd->in_offset; - sg_dma_len(&dd->in_sgl) = len32; - sg_dma_address(&dd->in_sgl) = dd->dma_addr_in; - - sg_init_table(&dd->out_sgl, 1); - dd->out_sgl.offset = dd->out_offset; - sg_dma_len(&dd->out_sgl) = len32; - sg_dma_address(&dd->out_sgl) = dd->dma_addr_out; - - in_sg = &dd->in_sgl; - out_sg = &dd->out_sgl; - - addr_in = dd->dma_addr_in; - addr_out = dd->dma_addr_out; - - dd->flags &= ~FLAGS_FAST; - } - dd->total -= count; - - err = omap_aes_crypt_dma(tfm, in_sg, out_sg); - if (err) { - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + err = omap_aes_crypt_dma(tfm, dd->in_sg, dd->out_sg, dd->in_sg_len, + dd->out_sg_len); + if (err && !dd->pio_only) { + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); } return err; @@ -637,7 +543,6 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) { int err = 0; - size_t count; pr_debug("total: %d\n", dd->total); @@ -646,23 +551,49 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) dmaengine_terminate_all(dd->dma_lch_in); dmaengine_terminate_all(dd->dma_lch_out); - if (dd->flags & FLAGS_FAST) { - dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); - dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); - } else { - dma_sync_single_for_device(dd->dev, dd->dma_addr_out, - dd->dma_size, DMA_FROM_DEVICE); - - /* copy data */ - count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, - dd->buflen, dd->dma_size, 1); - if (count != dd->dma_size) { - err = -EINVAL; - pr_err("not all data converted: %u\n", count); - } + return err; +} + +static int omap_aes_check_aligned(struct scatterlist *sg) +{ + while (sg) { + if (!IS_ALIGNED(sg->offset, 4)) + return -1; + if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) + return -1; + sg = sg_next(sg); } + return 0; +} - return err; +static int omap_aes_copy_sgs(struct omap_aes_dev *dd) +{ + void *buf_in, *buf_out; + int pages; + + pages = get_order(dd->total); + + buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); + buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); + + if (!buf_in || !buf_out) { + pr_err("Couldn't allocated pages for unaligned cases.\n"); + return -1; + } + + dd->orig_out = dd->out_sg; + + sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0); + + sg_init_table(&dd->in_sgl, 1); + sg_set_buf(&dd->in_sgl, buf_in, dd->total); + dd->in_sg = &dd->in_sgl; + + sg_init_table(&dd->out_sgl, 1); + sg_set_buf(&dd->out_sgl, buf_out, dd->total); + dd->out_sg = &dd->out_sgl; + + return 0; } static int omap_aes_handle_queue(struct omap_aes_dev *dd, @@ -698,11 +629,23 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, /* assign new request to device */ dd->req = req; dd->total = req->nbytes; - dd->in_offset = 0; + dd->total_save = req->nbytes; dd->in_sg = req->src; - dd->out_offset = 0; dd->out_sg = req->dst; + if (omap_aes_check_aligned(dd->in_sg) || + omap_aes_check_aligned(dd->out_sg)) { + if (omap_aes_copy_sgs(dd)) + pr_err("Failed to copy SGs for unaligned cases\n"); + dd->sgs_copied = 1; + } else { + dd->sgs_copied = 0; + } + + dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total); + dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total); + BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0); + rctx = ablkcipher_request_ctx(req); ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); rctx->mode &= FLAGS_MODE_MASK; @@ -726,21 +669,32 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; - int err; - - pr_debug("enter\n"); + void *buf_in, *buf_out; + int pages; + + pr_debug("enter done_task\n"); + + if (!dd->pio_only) { + dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, + DMA_FROM_DEVICE); + omap_aes_crypt_dma_stop(dd); + } - err = omap_aes_crypt_dma_stop(dd); + if (dd->sgs_copied) { + buf_in = sg_virt(&dd->in_sgl); + buf_out = sg_virt(&dd->out_sgl); - err = dd->err ? : err; + sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1); - if (dd->total && !err) { - err = omap_aes_crypt_dma_start(dd); - if (!err) - return; /* DMA started. Not fininishing. */ + pages = get_order(dd->total_save); + free_pages((unsigned long)buf_in, pages); + free_pages((unsigned long)buf_out, pages); } - omap_aes_finish_req(dd, err); + omap_aes_finish_req(dd, 0); omap_aes_handle_queue(dd, NULL); pr_debug("exit\n"); @@ -1002,6 +956,8 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = { .data_ofs = 0x60, .rev_ofs = 0x80, .mask_ofs = 0x84, + .irq_status_ofs = 0x8c, + .irq_enable_ofs = 0x90, .dma_enable_in = BIT(5), .dma_enable_out = BIT(6), .major_mask = 0x0700, @@ -1010,6 +966,90 @@ static const struct omap_aes_pdata omap_aes_pdata_omap4 = { .minor_shift = 0, }; +static irqreturn_t omap_aes_irq(int irq, void *dev_id) +{ + struct omap_aes_dev *dd = dev_id; + u32 status, i; + u32 *src, *dst; + + status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd)); + if (status & AES_REG_IRQ_DATA_IN) { + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); + + BUG_ON(!dd->in_sg); + + BUG_ON(_calc_walked(in) > dd->in_sg->length); + + src = sg_virt(dd->in_sg) + _calc_walked(in); + + for (i = 0; i < AES_BLOCK_WORDS; i++) { + omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src); + + scatterwalk_advance(&dd->in_walk, 4); + if (dd->in_sg->length == _calc_walked(in)) { + dd->in_sg = scatterwalk_sg_next(dd->in_sg); + if (dd->in_sg) { + scatterwalk_start(&dd->in_walk, + dd->in_sg); + src = sg_virt(dd->in_sg) + + _calc_walked(in); + } + } else { + src++; + } + } + + /* Clear IRQ status */ + status &= ~AES_REG_IRQ_DATA_IN; + omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); + + /* Enable DATA_OUT interrupt */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4); + + } else if (status & AES_REG_IRQ_DATA_OUT) { + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0); + + BUG_ON(!dd->out_sg); + + BUG_ON(_calc_walked(out) > dd->out_sg->length); + + dst = sg_virt(dd->out_sg) + _calc_walked(out); + + for (i = 0; i < AES_BLOCK_WORDS; i++) { + *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i)); + scatterwalk_advance(&dd->out_walk, 4); + if (dd->out_sg->length == _calc_walked(out)) { + dd->out_sg = scatterwalk_sg_next(dd->out_sg); + if (dd->out_sg) { + scatterwalk_start(&dd->out_walk, + dd->out_sg); + dst = sg_virt(dd->out_sg) + + _calc_walked(out); + } + } else { + dst++; + } + } + + dd->total -= AES_BLOCK_SIZE; + + BUG_ON(dd->total < 0); + + /* Clear IRQ status */ + status &= ~AES_REG_IRQ_DATA_OUT; + omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status); + + if (!dd->total) + /* All bytes read! */ + tasklet_schedule(&dd->done_task); + else + /* Enable DATA_IN interrupt for next block */ + omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2); + } + + return IRQ_HANDLED; +} + static const struct of_device_id omap_aes_of_match[] = { { .compatible = "ti,omap2-aes", @@ -1115,10 +1155,10 @@ static int omap_aes_probe(struct platform_device *pdev) struct omap_aes_dev *dd; struct crypto_alg *algp; struct resource res; - int err = -ENOMEM, i, j; + int err = -ENOMEM, i, j, irq = -1; u32 reg; - dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); + dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL); if (dd == NULL) { dev_err(dev, "unable to alloc data struct.\n"); goto err_data; @@ -1158,8 +1198,23 @@ static int omap_aes_probe(struct platform_device *pdev) tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); err = omap_aes_dma_init(dd); - if (err) - goto err_dma; + if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { + dd->pio_only = 1; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "can't get IRQ resource\n"); + goto err_irq; + } + + err = devm_request_irq(dev, irq, omap_aes_irq, 0, + dev_name(dev), dd); + if (err) { + dev_err(dev, "Unable to grab omap-aes IRQ\n"); + goto err_irq; + } + } + INIT_LIST_HEAD(&dd->list); spin_lock(&list_lock); @@ -1187,13 +1242,13 @@ err_algs: for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_alg( &dd->pdata->algs_info[i].algs_list[j]); - omap_aes_dma_cleanup(dd); -err_dma: + if (!dd->pio_only) + omap_aes_dma_cleanup(dd); +err_irq: tasklet_kill(&dd->done_task); tasklet_kill(&dd->queue_task); pm_runtime_disable(dev); err_res: - kfree(dd); dd = NULL; err_data: dev_err(dev, "initialization failed.\n"); @@ -1221,7 +1276,6 @@ static int omap_aes_remove(struct platform_device *pdev) tasklet_kill(&dd->queue_task); omap_aes_dma_cleanup(dd); pm_runtime_disable(dd->dev); - kfree(dd); dd = NULL; return 0; |