From d8966fcd4c25708c3a76ea7619644218373df639 Mon Sep 17 00:00:00 2001 From: Dasaratharaman Chandramouli Date: Sat, 29 Apr 2017 14:41:28 -0400 Subject: IB/core: Use rdma_ah_attr accessor functions Modify core and driver components to use accessor functions introduced to access individual fields of rdma_ah_attr Reviewed-by: Ira Weiny Reviewed-by: Don Hiatt Reviewed-by: Sean Hefty Reviewed-by: Niranjana Vishwanathapura Signed-off-by: Dasaratharaman Chandramouli Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/ah.c | 115 +++++++++++++++++++++++---------------- drivers/infiniband/hw/mlx4/mad.c | 43 +++++++++------ drivers/infiniband/hw/mlx4/mcg.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 92 ++++++++++++++++--------------- 4 files changed, 143 insertions(+), 109 deletions(-) (limited to 'drivers/infiniband/hw/mlx4') diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 17fcb0b49d0e..3cbac5f7b0f5 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -46,25 +46,32 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, { struct mlx4_dev *dev = to_mdev(pd->device)->dev; - ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); - ah->av.ib.g_slid = ah_attr->src_path_bits; - ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); - if (ah_attr->ah_flags & IB_AH_GRH) { + ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | + (rdma_ah_get_port_num(ah_attr) << 24)); + ah->av.ib.g_slid = rdma_ah_get_path_bits(ah_attr); + ah->av.ib.sl_tclass_flowlabel = + cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28); + if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { + const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); + ah->av.ib.g_slid |= 0x80; - ah->av.ib.gid_index = ah_attr->grh.sgid_index; - ah->av.ib.hop_limit = ah_attr->grh.hop_limit; + ah->av.ib.gid_index = grh->sgid_index; + ah->av.ib.hop_limit = grh->hop_limit; ah->av.ib.sl_tclass_flowlabel |= - cpu_to_be32((ah_attr->grh.traffic_class << 20) | - ah_attr->grh.flow_label); - memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16); + cpu_to_be32((grh->traffic_class << 20) | + grh->flow_label); + memcpy(ah->av.ib.dgid, grh->dgid.raw, 16); } - ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid); - if (ah_attr->static_rate) { - ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; - while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && - !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) - --ah->av.ib.stat_rate; + ah->av.ib.dlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr)); + if (rdma_ah_get_static_rate(ah_attr)) { + u8 static_rate = rdma_ah_get_static_rate(ah_attr) + + MLX4_STAT_RATE_OFFSET; + + while (static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && + !(1 << static_rate & dev->caps.stat_rate_support)) + --static_rate; + ah->av.ib.stat_rate = static_rate; } return &ah->ibah; @@ -81,17 +88,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, u16 vlan_tag = 0xffff; union ib_gid sgid; struct ib_gid_attr gid_attr; + const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); int ret; - memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); + memcpy(&in6, grh->dgid.raw, sizeof(in6)); if (rdma_is_multicast_addr(&in6)) { is_mcast = 1; rdma_get_mcast_mac(&in6, ah->av.eth.mac); } else { memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN); } - ret = ib_get_cached_gid(pd->device, ah_attr->port_num, - ah_attr->grh.sgid_index, &sgid, &gid_attr); + ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr), + grh->sgid_index, &sgid, &gid_attr); if (ret) return ERR_PTR(ret); eth_zero_addr(ah->av.eth.s_mac); @@ -102,32 +110,36 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, dev_put(gid_attr.ndev); } if (vlan_tag < 0x1000) - vlan_tag |= (ah_attr->sl & 7) << 13; - ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); - ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); + vlan_tag |= (rdma_ah_get_sl(ah_attr) & 7) << 13; + ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | + (rdma_ah_get_port_num(ah_attr) << 24)); + ret = mlx4_ib_gid_index_to_real_index(ibdev, + rdma_ah_get_port_num(ah_attr), + grh->sgid_index); if (ret < 0) return ERR_PTR(ret); ah->av.eth.gid_index = ret; ah->av.eth.vlan = cpu_to_be16(vlan_tag); - ah->av.eth.hop_limit = ah_attr->grh.hop_limit; - if (ah_attr->static_rate) { - ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; + ah->av.eth.hop_limit = grh->hop_limit; + if (rdma_ah_get_static_rate(ah_attr)) { + ah->av.eth.stat_rate = rdma_ah_get_static_rate(ah_attr) + + MLX4_STAT_RATE_OFFSET; while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) --ah->av.eth.stat_rate; } ah->av.eth.sl_tclass_flowlabel |= - cpu_to_be32((ah_attr->grh.traffic_class << 20) | - ah_attr->grh.flow_label); + cpu_to_be32((grh->traffic_class << 20) | + grh->flow_label); /* * HW requires multicast LID so we just choose one. */ if (is_mcast) ah->av.ib.dlid = cpu_to_be16(0xc000); - memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); - ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29); - + memcpy(ah->av.eth.dgid, grh->dgid.raw, 16); + ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(rdma_ah_get_sl(ah_attr) + << 29); return &ah->ibah; } @@ -142,8 +154,10 @@ struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, if (!ah) return ERR_PTR(-ENOMEM); - if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) { - if (!(ah_attr->ah_flags & IB_AH_GRH)) { + if (rdma_port_get_link_layer(pd->device, + rdma_ah_get_port_num(ah_attr)) == + IB_LINK_LAYER_ETHERNET) { + if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { ret = ERR_PTR(-EINVAL); } else { /* @@ -171,28 +185,35 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) enum rdma_link_layer ll; memset(ah_attr, 0, sizeof *ah_attr); - ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; - ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num); + rdma_ah_set_port_num(ah_attr, + be32_to_cpu(ah->av.ib.port_pd) >> 24); + ll = rdma_port_get_link_layer(ibah->device, + rdma_ah_get_port_num(ah_attr)); if (ll == IB_LINK_LAYER_ETHERNET) - ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29; + rdma_ah_set_sl(ah_attr, + be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) + >> 29); else - ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; + rdma_ah_set_sl(ah_attr, + be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) + >> 28); - ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0; + rdma_ah_set_dlid(ah_attr, (ll == IB_LINK_LAYER_INFINIBAND) ? + be16_to_cpu(ah->av.ib.dlid) : 0); if (ah->av.ib.stat_rate) - ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET; - ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F; + rdma_ah_set_static_rate(ah_attr, + ah->av.ib.stat_rate - + MLX4_STAT_RATE_OFFSET); + rdma_ah_set_path_bits(ah_attr, ah->av.ib.g_slid & 0x7F); if (mlx4_ib_ah_grh_present(ah)) { - ah_attr->ah_flags = IB_AH_GRH; - - ah_attr->grh.traffic_class = - be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20; - ah_attr->grh.flow_label = - be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff; - ah_attr->grh.hop_limit = ah->av.ib.hop_limit; - ah_attr->grh.sgid_index = ah->av.ib.gid_index; - memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16); + u32 tc_fl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel); + + rdma_ah_set_grh(ah_attr, NULL, + tc_fl & 0xfffff, ah->av.ib.gid_index, + ah->av.ib.hop_limit, + tc_fl >> 20); + rdma_ah_set_dgid_raw(ah_attr, ah->av.ib.dgid); } return 0; diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 90915c5e5338..425515eb01ea 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -196,9 +196,9 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) return; memset(&ah_attr, 0, sizeof ah_attr); - ah_attr.dlid = lid; - ah_attr.sl = sl; - ah_attr.port_num = port_num; + rdma_ah_set_dlid(&ah_attr, lid); + rdma_ah_set_sl(&ah_attr, sl); + rdma_ah_set_port_num(&ah_attr, port_num); new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, &ah_attr); @@ -555,13 +555,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, /* create ah. Just need an empty one with the port num for the post send. * The driver will set the force loopback bit in post_send */ memset(&attr, 0, sizeof attr); - attr.port_num = port; + + rdma_ah_set_port_num(&attr, port); if (is_eth) { union ib_gid sgid; + union ib_gid dgid; - if (get_gids_from_l3_hdr(grh, &sgid, &attr.grh.dgid)) + if (get_gids_from_l3_hdr(grh, &sgid, &dgid)) return -EINVAL; - attr.ah_flags = IB_AH_GRH; + rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0); } ah = rdma_create_ah(tun_ctx->pd, &attr); if (IS_ERR(ah)) @@ -1363,6 +1365,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, struct mlx4_mad_snd_buf *sqp_mad; struct ib_ah *ah; struct ib_qp *send_qp = NULL; + struct ib_global_route *grh; unsigned wire_tx_ix = 0; int ret = 0; u16 wire_pkey_ix; @@ -1389,12 +1392,13 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, send_qp = sqp->qp; /* create ah */ - sgid_index = attr->grh.sgid_index; - attr->grh.sgid_index = 0; + grh = rdma_ah_retrieve_grh(attr); + sgid_index = grh->sgid_index; + grh->sgid_index = 0; ah = rdma_create_ah(sqp_ctx->pd, attr); if (IS_ERR(ah)) return -ENOMEM; - attr->grh.sgid_index = sgid_index; + grh->sgid_index = sgid_index; to_mah(ah)->av.ib.gid_index = sgid_index; /* get rid of force-loopback bit */ to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF); @@ -1442,7 +1446,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, if (s_mac) memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6); if (vlan_id < 0x1000) - vlan_id |= (attr->sl & 7) << 13; + vlan_id |= (rdma_ah_get_sl(attr) & 7) << 13; to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id); @@ -1469,10 +1473,11 @@ static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port) static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port, struct rdma_ah_attr *ah_attr) { + struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr); if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) - ah_attr->grh.sgid_index = slave; + grh->sgid_index = slave; else - ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port); + grh->sgid_index += get_slave_base_gid_ix(dev, slave, port); } static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) @@ -1487,6 +1492,8 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc int slave; int port; u16 vlan_id; + u8 qos; + u8 *dmac; /* Get slave that sent this packet */ if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || @@ -1571,14 +1578,16 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); mlx4_ib_query_ah(&ah.ibah, &ah_attr); - if (ah_attr.ah_flags & IB_AH_GRH) + if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); - - memcpy(ah_attr.dmac, tunnel->hdr.mac, 6); + dmac = rdma_ah_retrieve_dmac(&ah_attr); + if (dmac) + memcpy(dmac, tunnel->hdr.mac, ETH_ALEN); vlan_id = be16_to_cpu(tunnel->hdr.vlan); /* if slave have default vlan use it */ - mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, - &vlan_id, &ah_attr.sl); + if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, + &vlan_id, &qos)) + rdma_ah_set_sl(&ah_attr, qos); mlx4_ib_send_to_wire(dev, slave, ctx->port, is_proxy_qp0(dev, wc->src_qp, slave) ? diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index eb009045643d..3405e947dc1e 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -244,7 +244,7 @@ static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx, wc.sl = 0; wc.dlid_path_bits = 0; wc.port_num = ctx->port; - wc.slid = ah_attr.dlid; /* opensm lid */ + wc.slid = rdma_ah_get_dlid(&ah_attr); /* opensm lid */ wc.src_qp = 1; return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad); } diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index c211902ea22b..ef4adf32da66 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1394,21 +1394,22 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, int smac_index; int err; - - path->grh_mylmc = ah->src_path_bits & 0x7f; - path->rlid = cpu_to_be16(ah->dlid); - if (ah->static_rate) { - path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET; + path->grh_mylmc = rdma_ah_get_path_bits(ah) & 0x7f; + path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah)); + if (rdma_ah_get_static_rate(ah)) { + path->static_rate = rdma_ah_get_static_rate(ah) + + MLX4_STAT_RATE_OFFSET; while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) --path->static_rate; } else path->static_rate = 0; - if (ah->ah_flags & IB_AH_GRH) { - int real_sgid_index = mlx4_ib_gid_index_to_real_index(dev, - port, - ah->grh.sgid_index); + if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) { + const struct ib_global_route *grh = rdma_ah_read_grh(ah); + int real_sgid_index = + mlx4_ib_gid_index_to_real_index(dev, port, + grh->sgid_index); if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) { pr_err("sgid_index (%u) too large. max is %d\n", @@ -1418,19 +1419,19 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, path->grh_mylmc |= 1 << 7; path->mgid_index = real_sgid_index; - path->hop_limit = ah->grh.hop_limit; + path->hop_limit = grh->hop_limit; path->tclass_flowlabel = - cpu_to_be32((ah->grh.traffic_class << 20) | - (ah->grh.flow_label)); - memcpy(path->rgid, ah->grh.dgid.raw, 16); + cpu_to_be32((grh->traffic_class << 20) | + (grh->flow_label)); + memcpy(path->rgid, grh->dgid.raw, 16); } if (is_eth) { - if (!(ah->ah_flags & IB_AH_GRH)) + if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH)) return -1; path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | - ((port - 1) << 6) | ((ah->sl & 7) << 3); + ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 7) << 3); path->feup |= MLX4_FEUP_FORCE_ETH_UP; if (vlan_tag < 0x1000) { @@ -1489,14 +1490,13 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, } else { smac_index = smac_info->smac_index; } - memcpy(path->dmac, ah->dmac, 6); path->ackto = MLX4_IB_LINK_TYPE_ETH; /* put MAC table smac index for IBoE */ path->grh_mylmc = (u8) (smac_index) | 0x80; } else { path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | - ((port - 1) << 6) | ((ah->sl & 0xf) << 2); + ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 0xf) << 2); } return 0; @@ -1768,11 +1768,13 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, u16 vlan = 0xffff; u8 smac[ETH_ALEN]; int status = 0; - int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) && - attr->ah_attr.ah_flags & IB_AH_GRH; + int is_eth = + rdma_cap_eth_ah(&dev->ib_dev, port_num) && + rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; - if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) { - int index = attr->ah_attr.grh.sgid_index; + if (is_eth) { + int index = + rdma_ah_read_grh(&attr->ah_attr)->sgid_index; status = ib_get_cached_gid(ibqp->device, port_num, index, &gid, &gid_attr); @@ -3396,39 +3398,40 @@ static int to_ib_qp_access_flags(int mlx4_flags) } static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev, - struct rdma_ah_attr *ib_ah_attr, + struct rdma_ah_attr *ah_attr, struct mlx4_qp_path *path) { struct mlx4_dev *dev = ibdev->dev; int is_eth; + u8 port_num = path->sched_queue & 0x40 ? 2 : 1; - memset(ib_ah_attr, 0, sizeof *ib_ah_attr); - ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; + memset(ah_attr, 0, sizeof(*ah_attr)); + rdma_ah_set_port_num(ah_attr, port_num); - if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) + if (port_num == 0 || port_num > dev->caps.num_ports) return; - is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) == - IB_LINK_LAYER_ETHERNET; + is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, + rdma_ah_get_port_num(ah_attr)) == + IB_LINK_LAYER_ETHERNET; if (is_eth) - ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) | - ((path->sched_queue & 4) << 1); + rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) | + ((path->sched_queue & 4) << 1)); else - ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; + rdma_ah_set_sl(ah_attr, (path->sched_queue >> 2) & 0xf); - ib_ah_attr->dlid = be16_to_cpu(path->rlid); - ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; - ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; - ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; - if (ib_ah_attr->ah_flags) { - ib_ah_attr->grh.sgid_index = path->mgid_index; - ib_ah_attr->grh.hop_limit = path->hop_limit; - ib_ah_attr->grh.traffic_class = - (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; - ib_ah_attr->grh.flow_label = - be32_to_cpu(path->tclass_flowlabel) & 0xfffff; - memcpy(ib_ah_attr->grh.dgid.raw, - path->rgid, sizeof ib_ah_attr->grh.dgid.raw); + rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid)); + rdma_ah_set_path_bits(ah_attr, path->grh_mylmc & 0x7f); + rdma_ah_set_static_rate(ah_attr, + path->static_rate ? path->static_rate - 5 : 0); + if (path->grh_mylmc & (1 << 7)) { + rdma_ah_set_grh(ah_attr, NULL, + be32_to_cpu(path->tclass_flowlabel) & 0xfffff, + path->mgid_index, + path->hop_limit, + (be32_to_cpu(path->tclass_flowlabel) + >> 20) & 0xff); + rdma_ah_set_dgid_raw(ah_attr, path->rgid); } } @@ -3472,7 +3475,8 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; - qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; + qp_attr->alt_port_num = + rdma_ah_get_port_num(&qp_attr->alt_ah_attr); } qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; -- cgit v1.2.3