summaryrefslogtreecommitdiff
path: root/drivers/scsi/megaraid/megaraid_sas_fusion.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/megaraid/megaraid_sas_fusion.c')
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c384
1 files changed, 312 insertions, 72 deletions
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 745da54c11aa..ad162fed389c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -99,7 +99,34 @@ static void megasas_free_reply_fusion(struct megasas_instance *instance);
static inline
void megasas_configure_queue_sizes(struct megasas_instance *instance);
+/**
+ * megasas_check_same_4gb_region - check if allocation
+ * crosses same 4GB boundary or not
+ * @instance - adapter's soft instance
+ * start_addr - start address of DMA allocation
+ * size - size of allocation in bytes
+ * return - true : allocation does not cross same
+ * 4GB boundary
+ * false: allocation crosses same
+ * 4GB boundary
+ */
+static inline bool megasas_check_same_4gb_region
+ (struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
+{
+ dma_addr_t end_addr;
+
+ end_addr = start_addr + size;
+ if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
+ dev_err(&instance->pdev->dev,
+ "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
+ (unsigned long long)start_addr,
+ (unsigned long long)end_addr);
+ return false;
+ }
+
+ return true;
+}
/**
* megasas_enable_intr_fusion - Enables interrupts
@@ -294,17 +321,23 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
struct fusion_context *fusion = instance->ctrl_context;
struct megasas_cmd_fusion *cmd;
- /* SG, Sense */
- for (i = 0; i < instance->max_mpt_cmds; i++) {
- cmd = fusion->cmd_list[i];
- if (cmd) {
- if (cmd->sg_frame)
- dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
- cmd->sg_frame_phys_addr);
- if (cmd->sense)
- dma_pool_free(fusion->sense_dma_pool, cmd->sense,
- cmd->sense_phys_addr);
+ if (fusion->sense)
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+
+ /* SG */
+ if (fusion->cmd_list) {
+ for (i = 0; i < instance->max_mpt_cmds; i++) {
+ cmd = fusion->cmd_list[i];
+ if (cmd) {
+ if (cmd->sg_frame)
+ dma_pool_free(fusion->sg_dma_pool,
+ cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+ }
+ kfree(cmd);
}
+ kfree(fusion->cmd_list);
}
if (fusion->sg_dma_pool) {
@@ -336,13 +369,6 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
dma_pool_destroy(fusion->io_request_frames_pool);
fusion->io_request_frames_pool = NULL;
}
-
-
- /* cmd_list */
- for (i = 0; i < instance->max_mpt_cmds; i++)
- kfree(fusion->cmd_list[i]);
-
- kfree(fusion->cmd_list);
}
/**
@@ -356,10 +382,12 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
u16 max_cmd;
struct fusion_context *fusion;
struct megasas_cmd_fusion *cmd;
+ int sense_sz;
+ u32 offset;
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
-
+ sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE;
fusion->sg_dma_pool =
dma_pool_create("mr_sg", &instance->pdev->dev,
@@ -368,7 +396,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
/* SCSI_SENSE_BUFFERSIZE = 96 bytes */
fusion->sense_dma_pool =
dma_pool_create("mr_sense", &instance->pdev->dev,
- SCSI_SENSE_BUFFERSIZE, 64, 0);
+ sense_sz, 64, 0);
if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
dev_err(&instance->pdev->dev,
@@ -376,6 +404,51 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
return -ENOMEM;
}
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* sense buffer, request frame and reply desc pool requires to be in
+ * same 4 gb region. Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * Older allocation and pool will be destroyed.
+ * Alignment will be used such a way that next allocation if success,
+ * will always meet same 4gb region requirement.
+ * Actual requirement is not alignment, but we need start and end of
+ * DMA address must have same upper 32 bit address.
+ */
+
+ if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
+ sense_sz)) {
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+ fusion->sense = NULL;
+ dma_pool_destroy(fusion->sense_dma_pool);
+
+ fusion->sense_dma_pool =
+ dma_pool_create("mr_sense_align", &instance->pdev->dev,
+ sense_sz, roundup_pow_of_two(sense_sz),
+ 0);
+ if (!fusion->sense_dma_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL,
+ &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
/*
* Allocate and attach a frame to each of the commands in cmd_list
*/
@@ -384,9 +457,11 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
GFP_KERNEL, &cmd->sg_frame_phys_addr);
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sg_frame || !cmd->sense) {
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
+ if (!cmd->sg_frame) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
@@ -396,13 +471,10 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
/* create sense buffer for the raid 1/10 fp */
for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i];
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sense) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
}
return 0;
@@ -481,6 +553,40 @@ retry_alloc:
}
}
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->io_request_frames_phys,
+ fusion->io_frames_alloc_sz)) {
+ dma_pool_free(fusion->io_request_frames_pool,
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ fusion->io_request_frames = NULL;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+
+ fusion->io_request_frames_pool =
+ dma_pool_create("mr_ioreq_align",
+ &instance->pdev->dev,
+ fusion->io_frames_alloc_sz,
+ roundup_pow_of_two(fusion->io_frames_alloc_sz),
+ 0);
+
+ if (!fusion->io_request_frames_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+ GFP_KERNEL,
+ &fusion->io_request_frames_phys);
+
+ if (!fusion->io_request_frames) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
fusion->req_frames_desc =
dma_alloc_coherent(&instance->pdev->dev,
fusion->request_alloc_sz,
@@ -521,6 +627,41 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->reply_frames_desc_phys[0],
+ (fusion->reply_alloc_sz * count))) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[0],
+ fusion->reply_frames_desc_phys[0]);
+ fusion->reply_frames_desc[0] = NULL;
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
+
+ fusion->reply_frames_desc_pool =
+ dma_pool_create("mr_reply_align",
+ &instance->pdev->dev,
+ fusion->reply_alloc_sz * count,
+ roundup_pow_of_two(fusion->reply_alloc_sz * count),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->reply_frames_desc[0] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL,
+ &fusion->reply_frames_desc_phys[0]);
+
+ if (!fusion->reply_frames_desc[0]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
reply_desc = fusion->reply_frames_desc[0];
for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = cpu_to_le64(ULLONG_MAX);
@@ -539,52 +680,124 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
int
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{
- int i, j, count;
+ int i, j, k, msix_count;
struct fusion_context *fusion;
union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
+ dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
+ u8 dma_alloc_count, abs_index;
+ u32 chunk_size, array_size, offset;
fusion = instance->ctrl_context;
+ chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
+ array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
+ MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
+ &fusion->rdpq_phys);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- memset(fusion->rdpq_virt, 0,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
- count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ memset(fusion->rdpq_virt, 0, array_size);
+ msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
&instance->pdev->dev,
- fusion->reply_alloc_sz,
- 16, 0);
-
- if (!fusion->reply_frames_desc_pool) {
+ chunk_size, 16, 0);
+ fusion->reply_frames_desc_pool_align =
+ dma_pool_create("mr_rdpq_align",
+ &instance->pdev->dev,
+ chunk_size,
+ roundup_pow_of_two(chunk_size),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool ||
+ !fusion->reply_frames_desc_pool_align) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- for (i = 0; i < count; i++) {
- fusion->reply_frames_desc[i] =
- dma_pool_alloc(fusion->reply_frames_desc_pool,
- GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
- if (!fusion->reply_frames_desc[i]) {
+/*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be
+ * within 4GB boundary and also reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating the
+ * DMA'able memory for reply queues according. Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+
+ for (i = 0; i < dma_alloc_count; i++) {
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+ /* reply desc pool requires to be in same 4 gb region.
+ * Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * For RDPQ buffers, driver always allocate two separate pci pool.
+ * Alignment will be used such a way that next allocation if
+ * success, will always meet same 4gb region requirement.
+ * rdpq_tracker keep track of each buffer's physical,
+ * virtual address and pci pool descriptor. It will help driver
+ * while freeing the resources.
+ *
+ */
+ if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
+ chunk_size)) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ rdpq_chunk_virt[i],
+ rdpq_chunk_phys[i]);
+
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool_align,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool_align;
+ } else {
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool;
+ }
- fusion->rdpq_virt[i].RDPQBaseAddress =
- cpu_to_le64(fusion->reply_frames_desc_phys[i]);
+ fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
+ fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
+ }
- reply_desc = fusion->reply_frames_desc[i];
- for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
- reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ for (k = 0; k < dma_alloc_count; k++) {
+ for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) {
+ abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i;
+
+ if (abs_index == msix_count)
+ break;
+ offset = fusion->reply_alloc_sz * i;
+ fusion->rdpq_virt[abs_index].RDPQBaseAddress =
+ cpu_to_le64(rdpq_chunk_phys[k] + offset);
+ fusion->reply_frames_desc_phys[abs_index] =
+ rdpq_chunk_phys[k] + offset;
+ fusion->reply_frames_desc[abs_index] =
+ (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset);
+
+ reply_desc = fusion->reply_frames_desc[abs_index];
+ for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+ }
}
+
return 0;
}
@@ -596,15 +809,18 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
fusion = instance->ctrl_context;
- for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
- if (fusion->reply_frames_desc[i])
- dma_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc[i],
- fusion->reply_frames_desc_phys[i]);
+ for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) {
+ if (fusion->rdpq_tracker[i].pool_entry_virt)
+ dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
+ fusion->rdpq_tracker[i].pool_entry_virt,
+ fusion->rdpq_tracker[i].pool_entry_phys);
+
}
if (fusion->reply_frames_desc_pool)
dma_pool_destroy(fusion->reply_frames_desc_pool);
+ if (fusion->reply_frames_desc_pool_align)
+ dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
pci_free_consistent(instance->pdev,
@@ -771,6 +987,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
u32 scratch_pad_2;
unsigned long flags;
struct timeval tv;
+ bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
@@ -784,6 +1001,19 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+ if (instance->adapter_type == INVADER_SERIES) {
+ cur_fw_64bit_dma_capable =
+ (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+
+ if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
+ dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
+ "DMA mask, but upcoming FW does not support 64bit DMA mask\n");
+ megaraid_sas_kill_hba(instance);
+ ret = 1;
+ goto fail_fw_init;
+ }
+ }
+
if (instance->is_rdpq && !cur_rdpq_mode) {
dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
" from RDPQ mode to non RDPQ mode\n");
@@ -811,6 +1041,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->MsgFlags = instance->is_rdpq ?
MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+ IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
@@ -852,6 +1083,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+
+ if (instance->consistent_mask_64bit)
+ drv_ops->mfi_capabilities.support_64bit_mode = 1;
+
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -861,8 +1096,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
strlen(sys_info) > 64 ? 64 : strlen(sys_info));
instance->system_info_buf->systemIdLength =
strlen(sys_info) > 64 ? 64 : strlen(sys_info);
- init_frame->system_info_lo = instance->system_info_h;
- init_frame->system_info_hi = 0;
+ init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h));
+ init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h));
}
init_frame->queue_info_new_phys_addr_hi =
@@ -953,6 +1188,15 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
memset(pd_sync, 0, pd_seq_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ if (pend) {
+ dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
+ instance->jbod_seq_cmd = cmd;
+ } else {
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ }
+
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -960,19 +1204,14 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
+
+ megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz);
if (pend) {
- dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
- instance->jbod_seq_cmd = cmd;
instance->instancet->issue_dcmd(instance, cmd);
return 0;
}
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
-
/* Below code is only for non pended DCMD */
if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
@@ -1055,13 +1294,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
@@ -1159,15 +1398,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
instance->map_update_cmd = cmd;
@@ -2872,7 +3111,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SGLOffset0 =
offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
- io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+ io_request->SenseBufferLowAddress =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
@@ -2913,7 +3153,7 @@ void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
(fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
/*sense buffer is different for r1 command*/
r1_cmd->io_request->SenseBufferLowAddress =
- cpu_to_le32(r1_cmd->sense_phys_addr);
+ cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr));
r1_cmd->scmd = cmd->scmd;
req_desc2 = megasas_get_request_descriptor(instance,
(r1_cmd->index - 1));