diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mtd/nand/pxa3xx_nand.c | 157 |
1 files changed, 101 insertions, 56 deletions
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index e42496adda8d..56e8954dd02d 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -234,15 +234,44 @@ struct pxa3xx_nand_info { int use_spare; /* use spare ? */ int need_wait; - unsigned int data_size; /* data to be read from FIFO */ - unsigned int chunk_size; /* split commands chunk size */ - unsigned int oob_size; + /* Amount of real data per full chunk */ + unsigned int chunk_size; + + /* Amount of spare data per full chunk */ unsigned int spare_size; + + /* Number of full chunks (i.e chunk_size + spare_size) */ + unsigned int nfullchunks; + + /* + * Total number of chunks. If equal to nfullchunks, then there + * are only full chunks. Otherwise, there is one last chunk of + * size (last_chunk_size + last_spare_size) + */ + unsigned int ntotalchunks; + + /* Amount of real data in the last chunk */ + unsigned int last_chunk_size; + + /* Amount of spare data in the last chunk */ + unsigned int last_spare_size; + unsigned int ecc_size; unsigned int ecc_err_cnt; unsigned int max_bitflips; int retcode; + /* + * Variables only valid during command + * execution. step_chunk_size and step_spare_size is the + * amount of real data and spare data in the current + * chunk. cur_chunk is the current chunk being + * read/programmed. + */ + unsigned int step_chunk_size; + unsigned int step_spare_size; + unsigned int cur_chunk; + /* cached register value */ uint32_t reg_ndcr; uint32_t ndtr0cs0; @@ -538,25 +567,6 @@ static int pxa3xx_nand_init(struct pxa3xx_nand_host *host) return 0; } -/* - * Set the data and OOB size, depending on the selected - * spare and ECC configuration. - * Only applicable to READ0, READOOB and PAGEPROG commands. - */ -static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info, - struct mtd_info *mtd) -{ - int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; - - info->data_size = mtd->writesize; - if (!oob_enable) - return; - - info->oob_size = info->spare_size; - if (!info->use_ecc) - info->oob_size += info->ecc_size; -} - /** * NOTE: it is a must to set ND_RUN firstly, then write * command buffer, otherwise, it does not work. @@ -672,28 +682,28 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) static void handle_data_pio(struct pxa3xx_nand_info *info) { - unsigned int do_bytes = min(info->data_size, info->chunk_size); - switch (info->state) { case STATE_PIO_WRITING: - writesl(info->mmio_base + NDDB, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + if (info->step_chunk_size) + writesl(info->mmio_base + NDDB, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(info->step_chunk_size, 4)); - if (info->oob_size > 0) + if (info->step_spare_size) writesl(info->mmio_base + NDDB, info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + DIV_ROUND_UP(info->step_spare_size, 4)); break; case STATE_PIO_READING: - drain_fifo(info, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + if (info->step_chunk_size) + drain_fifo(info, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(info->step_chunk_size, 4)); - if (info->oob_size > 0) + if (info->step_spare_size) drain_fifo(info, info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + DIV_ROUND_UP(info->step_spare_size, 4)); break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, @@ -702,9 +712,8 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) } /* Update buffer pointers for multi-page read/write */ - info->data_buff_pos += do_bytes; - info->oob_buff_pos += info->oob_size; - info->data_size -= do_bytes; + info->data_buff_pos += info->step_chunk_size; + info->oob_buff_pos += info->step_spare_size; } static void pxa3xx_nand_data_dma_irq(void *data) @@ -745,8 +754,9 @@ static void start_data_dma(struct pxa3xx_nand_info *info) info->state); BUG(); } - info->sg.length = info->data_size + - (info->oob_size ? info->spare_size + info->ecc_size : 0); + info->sg.length = info->chunk_size; + if (info->use_spare) + info->sg.length += info->spare_size + info->ecc_size; dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir); tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction, @@ -907,9 +917,11 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) /* reset data and oob column point to handle data */ info->buf_start = 0; info->buf_count = 0; - info->oob_size = 0; info->data_buff_pos = 0; info->oob_buff_pos = 0; + info->step_chunk_size = 0; + info->step_spare_size = 0; + info->cur_chunk = 0; info->use_ecc = 0; info->use_spare = 1; info->retcode = ERR_NONE; @@ -921,8 +933,6 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) case NAND_CMD_READ0: case NAND_CMD_PAGEPROG: info->use_ecc = 1; - case NAND_CMD_READOOB: - pxa3xx_set_datasize(info, mtd); break; case NAND_CMD_PARAM: info->use_spare = 0; @@ -981,6 +991,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, if (command == NAND_CMD_READOOB) info->buf_start += mtd->writesize; + if (info->cur_chunk < info->nfullchunks) { + info->step_chunk_size = info->chunk_size; + info->step_spare_size = info->spare_size; + } else { + info->step_chunk_size = info->last_chunk_size; + info->step_spare_size = info->last_spare_size; + } + /* * Multiple page read needs an 'extended command type' field, * which is either naked-read or last-read according to the @@ -992,8 +1010,8 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) | NDCB0_LEN_OVRD | NDCB0_EXT_CMD_TYPE(ext_cmd_type); - info->ndcb3 = info->chunk_size + - info->oob_size; + info->ndcb3 = info->step_chunk_size + + info->step_spare_size; } set_command_address(info, mtd->writesize, column, page_addr); @@ -1013,8 +1031,6 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | NDCB0_EXT_CMD_TYPE(ext_cmd_type) | addr_cycle | command; - /* No data transfer in this case */ - info->data_size = 0; exec_cmd = 1; } break; @@ -1026,6 +1042,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, break; } + if (info->cur_chunk < info->nfullchunks) { + info->step_chunk_size = info->chunk_size; + info->step_spare_size = info->spare_size; + } else { + info->step_chunk_size = info->last_chunk_size; + info->step_spare_size = info->last_spare_size; + } + /* Second command setting for large pages */ if (mtd->writesize > PAGE_CHUNK_SIZE) { /* @@ -1036,14 +1060,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | NDCB0_LEN_OVRD | NDCB0_EXT_CMD_TYPE(ext_cmd_type); - info->ndcb3 = info->chunk_size + - info->oob_size; + info->ndcb3 = info->step_chunk_size + + info->step_spare_size; /* * This is the command dispatch that completes a chunked * page program operation. */ - if (info->data_size == 0) { + if (info->cur_chunk == info->ntotalchunks) { info->ndcb0 = NDCB0_CMD_TYPE(0x1) | NDCB0_EXT_CMD_TYPE(ext_cmd_type) | command; @@ -1070,7 +1094,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | command; info->ndcb1 = (column & 0xFF); info->ndcb3 = INIT_BUFFER_SIZE; - info->data_size = INIT_BUFFER_SIZE; + info->step_chunk_size = INIT_BUFFER_SIZE; break; case NAND_CMD_READID: @@ -1080,7 +1104,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | command; info->ndcb1 = (column & 0xFF); - info->data_size = 8; + info->step_chunk_size = 8; break; case NAND_CMD_STATUS: info->buf_count = 1; @@ -1088,7 +1112,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command, | NDCB0_ADDR_CYC(1) | command; - info->data_size = 8; + info->step_chunk_size = 8; break; case NAND_CMD_ERASE1: @@ -1229,6 +1253,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd, init_completion(&info->dev_ready); do { info->state = STATE_PREPARED; + exec_cmd = prepare_set_command(info, command, ext_cmd_type, column, page_addr); if (!exec_cmd) { @@ -1248,22 +1273,30 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd, break; } + /* Only a few commands need several steps */ + if (command != NAND_CMD_PAGEPROG && + command != NAND_CMD_READ0 && + command != NAND_CMD_READOOB) + break; + + info->cur_chunk++; + /* Check if the sequence is complete */ - if (info->data_size == 0 && command != NAND_CMD_PAGEPROG) + if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG) break; /* * After a splitted program command sequence has issued * the command dispatch, the command sequence is complete. */ - if (info->data_size == 0 && + if (info->cur_chunk == (info->ntotalchunks + 1) && command == NAND_CMD_PAGEPROG && ext_cmd_type == EXT_CMD_TYPE_DISPATCH) break; if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) { /* Last read: issue a 'last naked read' */ - if (info->data_size == info->chunk_size) + if (info->cur_chunk == info->ntotalchunks - 1) ext_cmd_type = EXT_CMD_TYPE_LAST_RW; else ext_cmd_type = EXT_CMD_TYPE_NAKED_RW; @@ -1273,7 +1306,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd, * the command dispatch must be issued to complete. */ } else if (command == NAND_CMD_PAGEPROG && - info->data_size == 0) { + info->cur_chunk == info->ntotalchunks) { ext_cmd_type = EXT_CMD_TYPE_DISPATCH; } } while (1); @@ -1518,6 +1551,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, int strength, int ecc_stepsize, int page_size) { if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 2048; info->spare_size = 40; info->ecc_size = 24; @@ -1526,6 +1561,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, ecc->strength = 1; } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) { + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 512; info->spare_size = 8; info->ecc_size = 8; @@ -1539,6 +1576,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, */ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) { info->ecc_bch = 1; + info->nfullchunks = 1; + info->ntotalchunks = 1; info->chunk_size = 2048; info->spare_size = 32; info->ecc_size = 32; @@ -1549,6 +1588,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { info->ecc_bch = 1; + info->nfullchunks = 2; + info->ntotalchunks = 2; info->chunk_size = 2048; info->spare_size = 32; info->ecc_size = 32; @@ -1563,8 +1604,12 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info, */ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) { info->ecc_bch = 1; + info->nfullchunks = 4; + info->ntotalchunks = 5; info->chunk_size = 1024; info->spare_size = 0; + info->last_chunk_size = 0; + info->last_spare_size = 64; info->ecc_size = 32; ecc->mode = NAND_ECC_HW; ecc->size = info->chunk_size; |