diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-22 11:50:05 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-22 11:50:05 -0700 |
commit | 7c034dfd58bbc056280262887acf5b7a98944d0a (patch) | |
tree | 49dc3b77590f9929069db17922c823bb679c1456 /drivers/infiniband/hw | |
parent | 1204c464458e9837320a326a9fce550e3c5ef5de (diff) | |
parent | c1c2fef6cfb04cf30a56bb37cff40d3498d7edbf (diff) |
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA updates from Roland Dreier:
- IPoIB fixes from Doug Ledford and Erez Shitrit
- iSER updates from Sagi Grimberg
- mlx4 GUID handling changes from Yishai Hadas
- other misc fixes
* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (51 commits)
mlx5: wrong page mask if CONFIG_ARCH_DMA_ADDR_T_64BIT enabled for 32Bit architectures
IB/iser: Rewrite bounce buffer code path
IB/iser: Bump version to 1.6
IB/iser: Remove code duplication for a single DMA entry
IB/iser: Pass struct iser_mem_reg to iser_fast_reg_mr and iser_reg_sig_mr
IB/iser: Modify struct iser_mem_reg members
IB/iser: Make fastreg pool cache friendly
IB/iser: Move PI context alloc/free to routines
IB/iser: Move fastreg descriptor pool get/put to helper functions
IB/iser: Merge build page-vec into register page-vec
IB/iser: Get rid of struct iser_rdma_regd
IB/iser: Remove redundant assignments in iser_reg_page_vec
IB/iser: Move memory reg/dereg routines to iser_memory.c
IB/iser: Don't pass ib_device to fall_to_bounce_buff routine
IB/iser: Remove a redundant struct iser_data_buf
IB/iser: Remove redundant cmd_data_len calculation
IB/iser: Fix wrong calculation of protection buffer length
IB/iser: Handle fastreg/local_inv completion errors
IB/iser: Fix unload during ep_poll wrong dereference
ib_srpt: convert printk's to pr_* functions
...
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mlx4/alias_GUID.c | 457 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 26 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mlx4_ib.h | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/sysfs.c | 44 |
6 files changed, 391 insertions, 166 deletions
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index a31e031afd87..0f00204d2ece 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context { int query_id; struct list_head list; int block_num; + ib_sa_comp_mask guid_indexes; + u8 method; }; struct mlx4_next_alias_guid_work { u8 port; u8 block_num; + u8 method; struct mlx4_sriov_alias_guid_info_rec_det rec_det; }; +static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, + int *resched_delay_sec); void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num, u8 port_num, u8 *p_data) @@ -118,6 +123,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index) return IB_SA_COMP_MASK(4 + index); } +void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, + int port, int slave_init) +{ + __be64 curr_guid, required_guid; + int record_num = slave / 8; + int index = slave % 8; + int port_index = port - 1; + unsigned long flags; + int do_work = 0; + + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & + GUID_STATE_NEED_PORT_INIT) + goto unlock; + if (!slave_init) { + curr_guid = *(__be64 *)&dev->sriov. + alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + all_recs[GUID_REC_SIZE * index]; + if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) || + !curr_guid) + goto unlock; + required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); + } else { + required_guid = mlx4_get_admin_guid(dev->dev, slave, port); + if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + goto unlock; + } + *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + all_recs[GUID_REC_SIZE * index] = required_guid; + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].guid_indexes + |= mlx4_ib_get_aguid_comp_mask_from_ix(index); + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].status + = MLX4_GUID_INFO_STATUS_IDLE; + /* set to run immediately */ + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num].time_to_run = 0; + dev->sriov.alias_guid.ports_guid[port_index]. + all_rec_per_port[record_num]. + guids_retry_schedule[index] = 0; + do_work = 1; +unlock: + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); + + if (do_work) + mlx4_ib_init_alias_guid_work(dev, port_index); +} + /* * Whenever new GUID is set/unset (guid table change) create event and * notify the relevant slave (master also should be notified). @@ -138,10 +194,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, enum slave_port_state prev_state; __be64 tmp_cur_ag, form_cache_ag; enum slave_port_gen_event gen_event; + struct mlx4_sriov_alias_guid_info_rec_det *rec; + unsigned long flags; + __be64 required_value; if (!mlx4_is_master(dev->dev)) return; + rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. + all_rec_per_port[block_num]; guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. ports_guid[port_num - 1]. all_rec_per_port[block_num].guid_indexes); @@ -166,8 +227,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, */ if (tmp_cur_ag != form_cache_ag) continue; - mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; + + if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + required_value = 0; + + if (tmp_cur_ag == required_value) { + rec->guid_indexes = rec->guid_indexes & + ~mlx4_ib_get_aguid_comp_mask_from_ix(i); + } else { + /* may notify port down if value is 0 */ + if (tmp_cur_ag != MLX4_NOT_SET_GUID) { + spin_unlock_irqrestore(&dev->sriov. + alias_guid.ag_work_lock, flags); + continue; + } + } + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, + flags); + mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num); /*2 cases: Valid GUID, and Invalid Guid*/ if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/ @@ -188,10 +268,14 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, set_and_calc_slave_port_state(dev->dev, slave_id, port_num, MLX4_PORT_STATE_IB_EVENT_GID_INVALID, &gen_event); - pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", - slave_id, port_num); - mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num, - MLX4_PORT_CHANGE_SUBTYPE_DOWN); + if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) { + pr_debug("sending PORT DOWN event to slave: %d, port: %d\n", + slave_id, port_num); + mlx4_gen_port_state_change_eqe(dev->dev, + slave_id, + port_num, + MLX4_PORT_CHANGE_SUBTYPE_DOWN); + } } } } @@ -206,6 +290,9 @@ static void aliasguid_query_handler(int status, int i; struct mlx4_sriov_alias_guid_info_rec_det *rec; unsigned long flags, flags1; + ib_sa_comp_mask declined_guid_indexes = 0; + ib_sa_comp_mask applied_guid_indexes = 0; + unsigned int resched_delay_sec = 0; if (!context) return; @@ -216,9 +303,9 @@ static void aliasguid_query_handler(int status, all_rec_per_port[cb_ctx->block_num]; if (status) { - rec->status = MLX4_GUID_INFO_STATUS_IDLE; pr_debug("(port: %d) failed: status = %d\n", cb_ctx->port, status); + rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC; goto out; } @@ -235,57 +322,101 @@ static void aliasguid_query_handler(int status, rec = &dev->sriov.alias_guid.ports_guid[port_index]. all_rec_per_port[guid_rec->block_num]; - rec->status = MLX4_GUID_INFO_STATUS_SET; - rec->method = MLX4_GUID_INFO_RECORD_SET; - + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) { - __be64 tmp_cur_ag; - tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE]; + __be64 sm_response, required_val; + + if (!(cb_ctx->guid_indexes & + mlx4_ib_get_aguid_comp_mask_from_ix(i))) + continue; + sm_response = *(__be64 *)&guid_rec->guid_info_list + [i * GUID_REC_SIZE]; + required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]; + if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) { + if (required_val == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + goto next_entry; + + /* A new value was set till we got the response */ + pr_debug("need to set new value %llx, record num %d, block_num:%d\n", + be64_to_cpu(required_val), + i, guid_rec->block_num); + goto entry_declined; + } + /* check if the SM didn't assign one of the records. - * if it didn't, if it was not sysadmin request: - * ask the SM to give a new GUID, (instead of the driver request). + * if it didn't, re-ask for. */ - if (tmp_cur_ag == MLX4_NOT_SET_GUID) { - mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in " - "block_num: %d was declined by SM, " - "ownership by %d (0 = driver, 1=sysAdmin," - " 2=None)\n", __func__, i, - guid_rec->block_num, rec->ownership); - if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) { - /* if it is driver assign, asks for new GUID from SM*/ - *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = - MLX4_NOT_SET_GUID; - - /* Mark the record as not assigned, and let it - * be sent again in the next work sched.*/ - rec->status = MLX4_GUID_INFO_STATUS_IDLE; - rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); - } + if (sm_response == MLX4_NOT_SET_GUID) { + if (rec->guids_retry_schedule[i] == 0) + mlx4_ib_warn(&dev->ib_dev, + "%s:Record num %d in block_num: %d was declined by SM\n", + __func__, i, + guid_rec->block_num); + goto entry_declined; } else { /* properly assigned record. */ /* We save the GUID we just got from the SM in the * admin_guid in order to be persistent, and in the * request from the sm the process will ask for the same GUID */ - if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN && - tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) { - /* the sysadmin assignment failed.*/ - mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" - " admin guid after SysAdmin " - "configuration. " - "Record num %d in block_num:%d " - "was declined by SM, " - "new val(0x%llx) was kept\n", - __func__, i, - guid_rec->block_num, - be64_to_cpu(*(__be64 *) & - rec->all_recs[i * GUID_REC_SIZE])); + if (required_val && + sm_response != required_val) { + /* Warn only on first retry */ + if (rec->guids_retry_schedule[i] == 0) + mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" + " admin guid after SysAdmin " + "configuration. " + "Record num %d in block_num:%d " + "was declined by SM, " + "new val(0x%llx) was kept, SM returned (0x%llx)\n", + __func__, i, + guid_rec->block_num, + be64_to_cpu(required_val), + be64_to_cpu(sm_response)); + goto entry_declined; } else { - memcpy(&rec->all_recs[i * GUID_REC_SIZE], - &guid_rec->guid_info_list[i * GUID_REC_SIZE], - GUID_REC_SIZE); + *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] = + sm_response; + if (required_val == 0) + mlx4_set_admin_guid(dev->dev, + sm_response, + (guid_rec->block_num + * NUM_ALIAS_GUID_IN_REC) + i, + cb_ctx->port); + goto next_entry; } } +entry_declined: + declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i); + rec->guids_retry_schedule[i] = + (rec->guids_retry_schedule[i] == 0) ? 1 : + min((unsigned int)60, + rec->guids_retry_schedule[i] * 2); + /* using the minimum value among all entries in that record */ + resched_delay_sec = (resched_delay_sec == 0) ? + rec->guids_retry_schedule[i] : + min(resched_delay_sec, + rec->guids_retry_schedule[i]); + continue; + +next_entry: + rec->guids_retry_schedule[i] = 0; } + + applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes; + if (declined_guid_indexes || + rec->guid_indexes & ~(applied_guid_indexes)) { + pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n", + guid_rec->block_num, + be64_to_cpu((__force __be64)rec->guid_indexes), + be64_to_cpu((__force __be64)applied_guid_indexes), + be64_to_cpu((__force __be64)declined_guid_indexes)); + rec->time_to_run = ktime_get_real_ns() + + resched_delay_sec * NSEC_PER_SEC; + } else { + rec->status = MLX4_GUID_INFO_STATUS_SET; + } + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); /* The func is call here to close the cases when the sm doesn't send smp, so in the sa response the driver @@ -297,10 +428,13 @@ static void aliasguid_query_handler(int status, out: spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); - if (!dev->sriov.is_going_down) + if (!dev->sriov.is_going_down) { + get_low_record_time_index(dev, port_index, &resched_delay_sec); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, &dev->sriov.alias_guid.ports_guid[port_index]. - alias_guid_work, 0); + alias_guid_work, + msecs_to_jiffies(resched_delay_sec * 1000)); + } if (cb_ctx->sa_query) { list_del(&cb_ctx->list); kfree(cb_ctx); @@ -317,9 +451,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) ib_sa_comp_mask comp_mask = 0; dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status - = MLX4_GUID_INFO_STATUS_IDLE; - dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method - = MLX4_GUID_INFO_RECORD_SET; + = MLX4_GUID_INFO_STATUS_SET; /* calculate the comp_mask for that record.*/ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { @@ -333,19 +465,21 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index) need to assign GUIDs, then don't put it up for assignment. */ if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val || - (!index && !i) || - MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid. - ports_guid[port - 1].all_rec_per_port[index].ownership) + (!index && !i)) continue; comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i); } dev->sriov.alias_guid.ports_guid[port - 1]. - all_rec_per_port[index].guid_indexes = comp_mask; + all_rec_per_port[index].guid_indexes |= comp_mask; + if (dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[index].guid_indexes) + dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE; + } static int set_guid_rec(struct ib_device *ibdev, - u8 port, int index, - struct mlx4_sriov_alias_guid_info_rec_det *rec_det) + struct mlx4_next_alias_guid_work *rec) { int err; struct mlx4_ib_dev *dev = to_mdev(ibdev); @@ -354,6 +488,9 @@ static int set_guid_rec(struct ib_device *ibdev, struct ib_port_attr attr; struct mlx4_alias_guid_work_context *callback_context; unsigned long resched_delay, flags, flags1; + u8 port = rec->port + 1; + int index = rec->block_num; + struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det; struct list_head *head = &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; @@ -380,6 +517,8 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->port = port; callback_context->dev = dev; callback_context->block_num = index; + callback_context->guid_indexes = rec_det->guid_indexes; + callback_context->method = rec->method; memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec)); @@ -399,7 +538,7 @@ static int set_guid_rec(struct ib_device *ibdev, callback_context->query_id = ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, ibdev, port, &guid_info_rec, - comp_mask, rec_det->method, 1000, + comp_mask, rec->method, 1000, GFP_KERNEL, aliasguid_query_handler, callback_context, &callback_context->sa_query); @@ -434,6 +573,30 @@ out: return err; } +static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port) +{ + int j, k, entry; + __be64 guid; + + /*Check if the SM doesn't need to assign the GUIDs*/ + for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { + for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { + entry = j * NUM_ALIAS_GUID_IN_REC + k; + /* no request for the 0 entry (hw guid) */ + if (!entry || entry > dev->dev->persist->num_vfs || + !mlx4_is_slave_active(dev->dev, entry)) + continue; + guid = mlx4_get_admin_guid(dev->dev, entry, port); + *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. + all_rec_per_port[j].all_recs + [GUID_REC_SIZE * k] = guid; + pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n", + entry, + be64_to_cpu(guid), + port); + } + } +} void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) { int i; @@ -443,6 +606,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); + + if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & + GUID_STATE_NEED_PORT_INIT) { + mlx4_ib_guid_port_init(dev, port); + dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= + (~GUID_STATE_NEED_PORT_INIT); + } for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++) invalidate_guid_record(dev, port, i); @@ -462,60 +632,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port) spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); } -/* The function returns the next record that was - * not configured (or failed to be configured) */ -static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, - struct mlx4_next_alias_guid_work *rec) +static void set_required_record(struct mlx4_ib_dev *dev, u8 port, + struct mlx4_next_alias_guid_work *next_rec, + int record_index) { - int j; - unsigned long flags; + int i; + int lowset_time_entry = -1; + int lowest_time = 0; + ib_sa_comp_mask delete_guid_indexes = 0; + ib_sa_comp_mask set_guid_indexes = 0; + struct mlx4_sriov_alias_guid_info_rec_det *rec = + &dev->sriov.alias_guid.ports_guid[port]. + all_rec_per_port[record_index]; - for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { - spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); - if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status == - MLX4_GUID_INFO_STATUS_IDLE) { - memcpy(&rec->rec_det, - &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j], - sizeof (struct mlx4_sriov_alias_guid_info_rec_det)); - rec->port = port; - rec->block_num = j; - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status = - MLX4_GUID_INFO_STATUS_PENDING; - spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); - return 0; + for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) { + if (!(rec->guid_indexes & + mlx4_ib_get_aguid_comp_mask_from_ix(i))) + continue; + + if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) + delete_guid_indexes |= + mlx4_ib_get_aguid_comp_mask_from_ix(i); + else + set_guid_indexes |= + mlx4_ib_get_aguid_comp_mask_from_ix(i); + + if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <= + lowest_time) { + lowset_time_entry = i; + lowest_time = rec->guids_retry_schedule[i]; } - spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); } - return -ENOENT; + + memcpy(&next_rec->rec_det, rec, sizeof(*rec)); + next_rec->port = port; + next_rec->block_num = record_index; + + if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] == + cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) { + next_rec->rec_det.guid_indexes = delete_guid_indexes; + next_rec->method = MLX4_GUID_INFO_RECORD_DELETE; + } else { + next_rec->rec_det.guid_indexes = set_guid_indexes; + next_rec->method = MLX4_GUID_INFO_RECORD_SET; + } } -static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port, - int rec_index, - struct mlx4_sriov_alias_guid_info_rec_det *rec_det) +/* return index of record that should be updated based on lowest + * rescheduled time + */ +static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, + int *resched_delay_sec) { - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes = - rec_det->guid_indexes; - memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs, - rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); - dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status = - rec_det->status; + int record_index = -1; + u64 low_record_time = 0; + struct mlx4_sriov_alias_guid_info_rec_det rec; + int j; + + for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { + rec = dev->sriov.alias_guid.ports_guid[port]. + all_rec_per_port[j]; + if (rec.status == MLX4_GUID_INFO_STATUS_IDLE && + rec.guid_indexes) { + if (record_index == -1 || + rec.time_to_run < low_record_time) { + record_index = j; + low_record_time = rec.time_to_run; + } + } + } + if (resched_delay_sec) { + u64 curr_time = ktime_get_real_ns(); + + *resched_delay_sec = (low_record_time < curr_time) ? 0 : + div_u64((low_record_time - curr_time), NSEC_PER_SEC); + } + + return record_index; } -static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port) +/* The function returns the next record that was + * not configured (or failed to be configured) */ +static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port, + struct mlx4_next_alias_guid_work *rec) { - int j; - struct mlx4_sriov_alias_guid_info_rec_det rec_det ; - - for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) { - memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE); - rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) | - IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 | - IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 | - IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 | - IB_SA_GUIDINFO_REC_GID7; - rec_det.status = MLX4_GUID_INFO_STATUS_IDLE; - set_administratively_guid_record(dev, port, j, &rec_det); + unsigned long flags; + int record_index; + int ret = 0; + + spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); + record_index = get_low_record_time_index(dev, port, NULL); + + if (record_index < 0) { + ret = -ENOENT; + goto out; } + + set_required_record(dev, port, rec, record_index); +out: + spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); + return ret; } static void alias_guid_work(struct work_struct *work) @@ -545,9 +762,7 @@ static void alias_guid_work(struct work_struct *work) goto out; } - set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num, - &rec->rec_det); - + set_guid_rec(&dev->ib_dev, rec); out: kfree(rec); } @@ -562,6 +777,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) { + /* If there is pending one should cancell then run, otherwise + * won't run till previous one is ended as same work + * struct is used. + */ + cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. + alias_guid_work); queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); } @@ -609,7 +830,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { char alias_wq_name[15]; int ret = 0; - int i, j, k; + int i, j; union ib_gid gid; if (!mlx4_is_master(dev->dev)) @@ -633,33 +854,25 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) for (i = 0 ; i < dev->num_ports; i++) { memset(&dev->sriov.alias_guid.ports_guid[i], 0, sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); - /*Check if the SM doesn't need to assign the GUIDs*/ + dev->sriov.alias_guid.ports_guid[i].state_flags |= + GUID_STATE_NEED_PORT_INIT; for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { - if (mlx4_ib_sm_guid_assign) { - dev->sriov.alias_guid.ports_guid[i]. - all_rec_per_port[j]. - ownership = MLX4_GUID_DRIVER_ASSIGN; - continue; - } - dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j]. - ownership = MLX4_GUID_NONE_ASSIGN; - /*mark each val as it was deleted, - till the sysAdmin will give it valid val*/ - for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { - *(__be64 *)&dev->sriov.alias_guid.ports_guid[i]. - all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] = - cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); - } + /* mark each val as it was deleted */ + memset(dev->sriov.alias_guid.ports_guid[i]. + all_rec_per_port[j].all_recs, 0xFF, + sizeof(dev->sriov.alias_guid.ports_guid[i]. + all_rec_per_port[j].all_recs)); } INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); /*prepare the records, set them to be allocated by sm*/ + if (mlx4_ib_sm_guid_assign) + for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++) + mlx4_set_admin_guid(dev->dev, 0, j, i + 1); for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) invalidate_guid_record(dev, i + 1, j); dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; - if (mlx4_ib_sm_guid_assign) - set_all_slaves_guids(dev, i); snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 59040265e361..9cd2b002d7ae 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1430,6 +1430,10 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->ring[i].addr, rx_buf_size, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { + kfree(tun_qp->ring[i].addr); + goto err; + } } for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { @@ -1442,6 +1446,11 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, tun_qp->tx_ring[i].buf.addr, tx_buf_size, DMA_TO_DEVICE); + if (ib_dma_mapping_error(ctx->ib_dev, + tun_qp->tx_ring[i].buf.map)) { + kfree(tun_qp->tx_ring[i].buf.addr); + goto tx_err; + } tun_qp->tx_ring[i].ah = NULL; } spin_lock_init(&tun_qp->tx_lock); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 976bea794b5f..57070c529dfb 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -66,9 +66,9 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); -int mlx4_ib_sm_guid_assign = 1; +int mlx4_ib_sm_guid_assign = 0; module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); -MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)"); +MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)"); static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" @@ -2791,9 +2791,31 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_SLAVE_INIT: /* here, p is the slave id */ do_slave_init(ibdev, p, 1); + if (mlx4_is_master(dev)) { + int i; + + for (i = 1; i <= ibdev->num_ports; i++) { + if (rdma_port_get_link_layer(&ibdev->ib_dev, i) + == IB_LINK_LAYER_INFINIBAND) + mlx4_ib_slave_alias_guid_event(ibdev, + p, i, + 1); + } + } return; case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: + if (mlx4_is_master(dev)) { + int i; + + for (i = 1; i <= ibdev->num_ports; i++) { + if (rdma_port_get_link_layer(&ibdev->ib_dev, i) + == IB_LINK_LAYER_INFINIBAND) + mlx4_ib_slave_alias_guid_event(ibdev, + p, i, + 0); + } + } /* here, p is the slave id */ do_slave_init(ibdev, p, 0); return; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f829fd935b79..fce3934372a1 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -342,14 +342,9 @@ struct mlx4_ib_ah { enum mlx4_guid_alias_rec_status { MLX4_GUID_INFO_STATUS_IDLE, MLX4_GUID_INFO_STATUS_SET, - MLX4_GUID_INFO_STATUS_PENDING, }; -enum mlx4_guid_alias_rec_ownership { - MLX4_GUID_DRIVER_ASSIGN, - MLX4_GUID_SYSADMIN_ASSIGN, - MLX4_GUID_NONE_ASSIGN, /*init state of each record*/ -}; +#define GUID_STATE_NEED_PORT_INIT 0x01 enum mlx4_guid_alias_rec_method { MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET, @@ -360,8 +355,8 @@ struct mlx4_sriov_alias_guid_info_rec_det { u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC]; ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/ enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/ - u8 method; /*set or delete*/ - enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/ + unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC]; + u64 time_to_run; }; struct mlx4_sriov_alias_guid_port_rec_det { @@ -369,6 +364,7 @@ struct mlx4_sriov_alias_guid_port_rec_det { struct workqueue_struct *wq; struct delayed_work alias_guid_work; u8 port; + u32 state_flags; struct mlx4_sriov_alias_guid *parent; struct list_head cb_list; }; @@ -802,6 +798,8 @@ int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, struct attribute *attr); ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index); +void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave, + int port, int slave_init); int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ed2bd6701f9b..02fc91c68027 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -566,6 +566,10 @@ static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, sizeof (struct mlx4_ib_proxy_sqp_hdr), DMA_FROM_DEVICE); + if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { + kfree(qp->sqp_proxy_rcv[i].addr); + goto err; + } } return 0; @@ -2605,8 +2609,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); - *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | - wr->wr.ud.hlen); + *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen); *lso_seg_len = halign; return 0; } diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index d10c2b8a5dad..6797108ce873 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -46,21 +46,17 @@ static ssize_t show_admin_alias_guid(struct device *dev, struct device_attribute *attr, char *buf) { - int record_num;/*0-15*/ - int guid_index_in_rec; /*0 - 7*/ struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry = container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry); struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; + __be64 sysadmin_ag_val; - record_num = mlx4_ib_iov_dentry->entry_num / 8 ; - guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8 ; + sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev, + mlx4_ib_iov_dentry->entry_num, + port->num); - return sprintf(buf, "%llx\n", - be64_to_cpu(*(__be64 *)&mdev->sriov.alias_guid. - ports_guid[port->num - 1]. - all_rec_per_port[record_num]. - all_recs[8 * guid_index_in_rec])); + return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val)); } /* store_admin_alias_guid stores the (new) administratively assigned value of that GUID. @@ -80,6 +76,7 @@ static ssize_t store_admin_alias_guid(struct device *dev, struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx; struct mlx4_ib_dev *mdev = port->dev; u64 sysadmin_ag_val; + unsigned long flags; record_num = mlx4_ib_iov_dentry->entry_num / 8; guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8; @@ -87,6 +84,7 @@ static ssize_t store_admin_alias_guid(struct device *dev, pr_err("GUID 0 block 0 is RO\n"); return count; } + spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags); sscanf(buf, "%llx", &sysadmin_ag_val); *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1]. all_rec_per_port[record_num]. @@ -96,33 +94,15 @@ static ssize_t store_admin_alias_guid(struct device *dev, /* Change the state to be pending for update */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status = MLX4_GUID_INFO_STATUS_IDLE ; - - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method - = MLX4_GUID_INFO_RECORD_SET; - - switch (sysadmin_ag_val) { - case MLX4_GUID_FOR_DELETE_VAL: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method - = MLX4_GUID_INFO_RECORD_DELETE; - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_SYSADMIN_ASSIGN; - break; - /* The sysadmin requests the SM to re-assign */ - case MLX4_NOT_SET_GUID: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_DRIVER_ASSIGN; - break; - /* The sysadmin requests a specific value.*/ - default: - mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership - = MLX4_GUID_SYSADMIN_ASSIGN; - break; - } + mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val), + mlx4_ib_iov_dentry->entry_num, + port->num); /* set the record index */ mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes - = mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); + |= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec); + spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_init_alias_guid_work(mdev, port->num - 1); return count; |