diff options
author | Zhenyu Wang <zhenyuw@linux.intel.com> | 2017-12-19 13:02:51 +0800 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2017-12-22 16:33:03 +0800 |
commit | 90551a1296d4dbe0dccc4c3cb5e57e7f2c929009 (patch) | |
tree | 8a277d0f916fdd8acd8ae314c98bb1fb37cdf81f /drivers | |
parent | 4e889d62b89d00e641d588eafed7e721e0a46090 (diff) |
drm/i915/gvt: cleanup usage for typed mmio reg vs. offset
We had previous hack that tried to accept either i915_reg_t or offset
value to access vGPU virtual/shadow regs which broke that purpose to
be type safe in context. This one trys to explicitly separate the usage
of typed mmio reg with real offset.
Old vgpu_vreg(offset) helper is used only for offset now with new
vgpu_vreg_t(reg) is used for i915_reg_t only. Convert left usage
of that to new helper.
Also fixed left KASAN warning issues caused by previous hack.
v2: rebase, fixup against recent mmio switch change
Reviewed-by: Zhi Wang <zhi.a.wang@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/display.c | 78 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/edid.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/fb_decoder.c | 30 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio_context.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/vgpu.c | 24 |
11 files changed, 138 insertions, 148 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index be5c519b3324..edec15d19538 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1239,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, return 0; if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { - stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); - tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & + stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); + tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; } else { - stride = (vgpu_vreg(s->vgpu, info->stride_reg) & + stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(15, 6)) >> 6; - tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; + tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; } if (stride != info->stride_val) @@ -1264,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip( struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct intel_vgpu *vgpu = s->vgpu; - set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), + set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { - set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), + set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); - set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), + set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), info->tile_val << 10); } else { - set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6), + set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6), info->stride_val << 6); - set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10), + set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10), info->tile_val << 10); } - vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; + vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; intel_vgpu_trigger_virtual_event(vgpu, info->event); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 1de5919dd043..dd96ffc878ac 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) + if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) return 0; if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) @@ -74,7 +74,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES)) return -EINVAL; - if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) + if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) return 1; if (edp_pipe_is_enabled(vgpu) && @@ -169,105 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | + vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { - vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | + vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | SDE_PORTE_HOTPLUG_SPT); - vgpu_vreg(vgpu, SKL_FUSE_STATUS) |= + vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= SKL_FUSE_DOWNLOAD_STATUS | SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG2); - vgpu_vreg(vgpu, LCPLL1_CTL) |= + vgpu_vreg_t(vgpu, LCPLL1_CTL) |= LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK; - vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; + vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { - vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; - vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); - vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { - vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &= + vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &= ~PORT_CLK_SEL_MASK; - vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |= + vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; - vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { - vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; - vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); - vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { - vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &= + vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &= ~PORT_CLK_SEL_MASK; - vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |= + vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; - vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { - vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; - vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); - vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { - vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &= + vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &= ~PORT_CLK_SEL_MASK; - vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |= + vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; - vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; } if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { - vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { if (IS_BROADWELL(dev_priv)) - vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= GEN8_PORT_DP_A_HOTPLUG; else - vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; + vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; - vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; } /* Clear host CRT status, so guest couldn't detect this host CRT. */ if (IS_BROADWELL(dev_priv)) - vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; + vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; - vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; } static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) @@ -369,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) if (!pipe_is_enabled(vgpu, pipe)) continue; - vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; + vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; intel_vgpu_trigger_virtual_event(vgpu, event); } if (pipe_is_enabled(vgpu, pipe)) { - vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++; + vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(pipe))++; intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]); } } diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 42cd09ec63fa..f61337632969 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0) static void reset_gmbus_controller(struct intel_vgpu *vgpu) { - vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY; + vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY; if (!vgpu->display.i2c_edid.edid_available) - vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; + vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; } @@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, vgpu->display.i2c_edid.state = I2C_GMBUS; vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; - vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; - vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE; + vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; + vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE; if (intel_vgpu_has_monitor_on_port(vgpu, port) && !intel_vgpu_port_is_dp(vgpu, port)) { vgpu->display.i2c_edid.port = port; vgpu->display.i2c_edid.edid_available = true; - vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER; + vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER; } else - vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; + vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; return 0; } @@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, * 2) HW_RDY bit asserted */ if (wvalue & GMBUS_SW_CLR_INT) { - vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT; - vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY; + vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT; + vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY; } /* For virtualization, we suppose that HW is always ready, @@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, * visible in gmbus interface) */ i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE; - vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; + vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; } break; case NIDX_NS_W: @@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, * START (-->INDEX) -->DATA */ i2c_edid->gmbus.phase = GMBUS_DATA_PHASE; - vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; + vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; break; default: gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n"); @@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, u32 reg_data = 0; /* Data can only be recevied if previous settings correct */ - if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) { + if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) { if (byte_left <= 0) { memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); return 0; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 6cc99543693f..6b50fe78dc1b 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -147,7 +147,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - u32 stride_reg = vgpu_vreg(vgpu, DSPSTRIDE(pipe)) & stride_mask; + u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride = stride_reg; if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { @@ -209,7 +209,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, if (pipe >= I915_MAX_PIPES) return -ENODEV; - val = vgpu_vreg(vgpu, DSPCNTR(pipe)); + val = vgpu_vreg_t(vgpu, DSPCNTR(pipe)); plane->enabled = !!(val & DISPLAY_PLANE_ENABLE); if (!plane->enabled) return -ENODEV; @@ -244,7 +244,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; - plane->base = vgpu_vreg(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; + plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { gvt_vgpu_err("invalid gma address: %lx\n", (unsigned long)plane->base); @@ -263,14 +263,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); - plane->width = (vgpu_vreg(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> + plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> _PIPE_H_SRCSZ_SHIFT; plane->width += 1; - plane->height = (vgpu_vreg(vgpu, PIPESRC(pipe)) & + plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; plane->height += 1; /* raw height is one minus the real value */ - val = vgpu_vreg(vgpu, DSPTILEOFF(pipe)); + val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe)); plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >> _PRI_PLANE_X_OFF_SHIFT; plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >> @@ -344,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, if (pipe >= I915_MAX_PIPES) return -ENODEV; - val = vgpu_vreg(vgpu, CURCNTR(pipe)); + val = vgpu_vreg_t(vgpu, CURCNTR(pipe)); mode = val & CURSOR_MODE; plane->enabled = (mode != CURSOR_MODE_DISABLE); if (!plane->enabled) @@ -370,7 +370,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", alpha_plane, alpha_force); - plane->base = vgpu_vreg(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; + plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { gvt_vgpu_err("invalid gma address: %lx\n", (unsigned long)plane->base); @@ -384,7 +384,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, return -EINVAL; } - val = vgpu_vreg(vgpu, CURPOS(pipe)); + val = vgpu_vreg_t(vgpu, CURPOS(pipe)); plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; @@ -424,7 +424,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, if (pipe >= I915_MAX_PIPES) return -ENODEV; - val = vgpu_vreg(vgpu, SPRCTL(pipe)); + val = vgpu_vreg_t(vgpu, SPRCTL(pipe)); plane->enabled = !!(val & SPRITE_ENABLE); if (!plane->enabled) return -ENODEV; @@ -475,7 +475,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; - plane->base = vgpu_vreg(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; + plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { gvt_vgpu_err("invalid gma address: %lx\n", (unsigned long)plane->base); @@ -489,10 +489,10 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, return -EINVAL; } - plane->stride = vgpu_vreg(vgpu, SPRSTRIDE(pipe)) & + plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) & _SPRITE_STRIDE_MASK; - val = vgpu_vreg(vgpu, SPRSIZE(pipe)); + val = vgpu_vreg_t(vgpu, SPRSIZE(pipe)); plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >> _SPRITE_SIZE_HEIGHT_SHIFT; plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >> @@ -500,11 +500,11 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->height += 1; /* raw height is one minus the real value */ plane->width += 1; /* raw width is one minus the real value */ - val = vgpu_vreg(vgpu, SPRPOS(pipe)); + val = vgpu_vreg_t(vgpu, SPRPOS(pipe)); plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT; plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT; - val = vgpu_vreg(vgpu, SPROFFSET(pipe)); + val = vgpu_vreg_t(vgpu, SPROFFSET(pipe)); plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >> _SPRITE_OFFSET_START_X_SHIFT; plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >> diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 71a0f2b87b3a..8bfac4ed24e6 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2244,7 +2244,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, int page_table_level) { - u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); + u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0])); struct intel_vgpu_mm *mm; if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) @@ -2279,7 +2279,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int page_table_level) { - u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); + u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0])); struct intel_vgpu_mm *mm; if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 1e9f11c8b7bb..0822d0fd45da 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -412,23 +412,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu); void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value); -/* Macros for easily accessing vGPU virtual/shadow register */ -#define vgpu_vreg(vgpu, reg) \ - (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) -#define vgpu_vreg8(vgpu, reg) \ - (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) -#define vgpu_vreg16(vgpu, reg) \ - (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) -#define vgpu_vreg64(vgpu, reg) \ - (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) -#define vgpu_sreg(vgpu, reg) \ - (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) -#define vgpu_sreg8(vgpu, reg) \ - (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) -#define vgpu_sreg16(vgpu, reg) \ - (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) -#define vgpu_sreg64(vgpu, reg) \ - (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) +/* Macros for easily accessing vGPU virtual/shadow register. + Explicitly seperate use for typed MMIO reg or real offset.*/ +#define vgpu_vreg_t(vgpu, reg) \ + (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) +#define vgpu_vreg(vgpu, offset) \ + (*(u32 *)(vgpu->mmio.vreg + (offset))) +#define vgpu_vreg64_t(vgpu, reg) \ + (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) +#define vgpu_vreg64(vgpu, offset) \ + (*(u64 *)(vgpu->mmio.vreg + (offset))) +#define vgpu_sreg_t(vgpu, reg) \ + (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg))) +#define vgpu_sreg(vgpu, offset) \ + (*(u32 *)(vgpu->mmio.sreg + (offset))) #define for_each_active_vgpu(gvt, vgpu, id) \ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index a367663a47f6..92d6468daeee 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -343,13 +343,13 @@ static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu, write_vreg(vgpu, offset, p_data, bytes); if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) { - vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON; - vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE; - vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN; - vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE; + vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON; + vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE; + vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN; + vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE; } else - vgpu_vreg(vgpu, PCH_PP_STATUS) &= + vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~(PP_ON | PP_SEQUENCE_POWER_DOWN | PP_CYCLE_DELAY_ACTIVE); return 0; @@ -503,7 +503,7 @@ static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } else { vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE; if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E))) - vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) + vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) &= ~DP_TP_STATUS_AUTOTRAIN_DONE; } return 0; @@ -521,9 +521,9 @@ static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu, static int fdi_auto_training_started(struct intel_vgpu *vgpu) { - u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E)); + u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E)); u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL); - u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E)); + u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E)); if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) && (rx_ctl & FDI_RX_ENABLE) && @@ -564,12 +564,12 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu, fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits; /* If imr bit has been masked */ - if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits) + if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits) return 0; - if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits) + if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits) == fdi_tx_check_bits) - && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits) + && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits) == fdi_rx_check_bits)) return 1; else @@ -626,17 +626,17 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, if (ret < 0) return ret; if (ret) - vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK; + vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK; ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2); if (ret < 0) return ret; if (ret) - vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK; + vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK; if (offset == _FDI_RXA_CTL) if (fdi_auto_training_started(vgpu)) - vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |= + vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |= DP_TP_STATUS_AUTOTRAIN_DONE; return 0; } @@ -657,7 +657,7 @@ static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8; if (data == 0x2) { status_reg = DP_TP_STATUS(index); - vgpu_vreg(vgpu, status_reg) |= (1 << 25); + vgpu_vreg_t(vgpu, status_reg) |= (1 << 25); } return 0; } @@ -721,7 +721,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, }; write_vreg(vgpu, offset, p_data, bytes); - vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); + vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); set_bit(flip_event[index], vgpu->irq.flip_done_event[index]); return 0; @@ -742,7 +742,7 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, }; write_vreg(vgpu, offset, p_data, bytes); - vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); + vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset); set_bit(flip_event[index], vgpu->irq.flip_done_event[index]); return 0; @@ -1064,9 +1064,9 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu, static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> + if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) { - unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) & + unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) & SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT; vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, sbi_offset); @@ -1091,13 +1091,13 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, vgpu_vreg(vgpu, offset) = data; - if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> + if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) { - unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) & + unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) & SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT; write_virtual_sbi_register(vgpu, sbi_offset, - vgpu_vreg(vgpu, SBI_DATA)); + vgpu_vreg_t(vgpu, SBI_DATA)); } return 0; } @@ -1343,7 +1343,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, { u32 value = *(u32 *)p_data; u32 cmd = value & 0xff; - u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); + u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA); switch (cmd) { case GEN9_PCODE_READ_MEM_LATENCY: diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index f7227a3ad469..b18a8bed6c18 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -336,10 +336,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); memcpy(vgpu->mmio.sreg, mmio, info->mmio_size); - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; + vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; /* set the bit 0:2(Core C-State ) to C0 */ - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; + vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu->mmio.disable_warn_untrack = false; } else { diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 62709ac351cd..71b620875943 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -76,13 +76,6 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data); - -#define INTEL_GVT_MMIO_OFFSET(reg) ({ \ - typeof(reg) __reg = reg; \ - u32 *offset = (u32 *)&__reg; \ - *offset; \ -}) - int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr); void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 94ac93996969..74834395dd89 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -224,7 +224,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); else - vgpu_vreg(vgpu, regs[ring_id]) = 0; + vgpu_vreg_t(vgpu, reg) = 0; intel_uncore_forcewake_put(dev_priv, fw); @@ -257,11 +257,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { if (pre) - old_v = vgpu_vreg(pre, offset); + old_v = vgpu_vreg_t(pre, offset); else old_v = gen9_render_mocs.control_table[ring_id][i]; if (next) - new_v = vgpu_vreg(next, offset); + new_v = vgpu_vreg_t(next, offset); else new_v = gen9_render_mocs.control_table[ring_id][i]; @@ -275,11 +275,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, l3_offset.reg = 0xb020; for (i = 0; i < 32; i++) { if (pre) - old_v = vgpu_vreg(pre, l3_offset); + old_v = vgpu_vreg_t(pre, l3_offset); else old_v = gen9_render_mocs.l3cc_table[i]; if (next) - new_v = vgpu_vreg(next, l3_offset); + new_v = vgpu_vreg_t(next, l3_offset); else new_v = gen9_render_mocs.l3cc_table[i]; @@ -316,11 +316,11 @@ static void switch_mmio(struct intel_vgpu *pre, continue; // save if (pre) { - vgpu_vreg(pre, mmio->reg) = I915_READ_FW(mmio->reg); + vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg); if (mmio->mask) - vgpu_vreg(pre, mmio->reg) &= + vgpu_vreg_t(pre, mmio->reg) &= ~(mmio->mask << 16); - old_v = vgpu_vreg(pre, mmio->reg); + old_v = vgpu_vreg_t(pre, mmio->reg); } else old_v = mmio->value = I915_READ_FW(mmio->reg); @@ -340,10 +340,10 @@ static void switch_mmio(struct intel_vgpu *pre, continue; if (mmio->mask) - new_v = vgpu_vreg(next, mmio->reg) | + new_v = vgpu_vreg_t(next, mmio->reg) | (mmio->mask << 16); else - new_v = vgpu_vreg(next, mmio->reg); + new_v = vgpu_vreg_t(next, mmio->reg); } else { if (mmio->in_context) continue; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 39926176fbeb..4688619f6a1c 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -38,25 +38,25 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) { /* setup the ballooning information */ - vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC; - vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1; - vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0; - vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0; - vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id; + vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; + vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; + vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0; + vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; - vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; - vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; - vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = + vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_aperture_gmadr_base(vgpu); - vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = + vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = vgpu_aperture_sz(vgpu); - vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = + vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = vgpu_hidden_gmadr_base(vgpu); - vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = + vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = vgpu_hidden_sz(vgpu); - vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); + vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", |