diff options
author | Maxime Ripard <maxime@cerno.tech> | 2020-08-18 14:14:25 +0200 |
---|---|---|
committer | Maxime Ripard <maxime@cerno.tech> | 2020-08-18 14:14:25 +0200 |
commit | d85ddd1318e66c0c2665dbfcbc21a8b66c9152aa (patch) | |
tree | e49e401abd2468b398d4bc84c7e05c2c2c3b0966 /drivers/gpu | |
parent | 652bcaec7da0f06f00be578c200e1c57099449d2 (diff) | |
parent | 9123e3a74ec7b934a4a099e98af6a61c2f80bbf5 (diff) |
Merge v5.9-rc1 into drm-misc-next
Sam needs 5.9-rc1 to have dev_err_probe in to merge some patches.
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/gpu')
37 files changed, 235 insertions, 255 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index a7a30e39aa1b..aa2b328c6202 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -258,11 +258,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, new->shared_count = k; /* Install the new fence list, seqcount provides the barriers */ - preempt_disable(); write_seqcount_begin(&resv->seq); RCU_INIT_POINTER(resv->fence, new); write_seqcount_end(&resv->seq); - preempt_enable(); /* Drop the references to the removed fences or move them to ef_list */ for (i = j, k = 0; i < old->shared_count; ++i) { diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index 24fb1befdfa2..f19d9f7a5db2 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -71,13 +71,9 @@ static int lvds_codec_probe(struct platform_device *pdev) lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev); lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); - if (IS_ERR(lvds_codec->powerdown_gpio)) { - int err = PTR_ERR(lvds_codec->powerdown_gpio); - - if (err != -EPROBE_DEFER) - dev_err(dev, "powerdown GPIO failure: %d\n", err); - return err; - } + if (IS_ERR(lvds_codec->powerdown_gpio)) + return dev_err_probe(dev, PTR_ERR(lvds_codec->powerdown_gpio), + "powerdown GPIO failure\n"); /* Locate the panel DT node. */ panel_node = of_graph_get_remote_node(dev->of_node, 1, 0); diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 19d8ae59ea03..33fd33f953ec 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -672,8 +672,8 @@ static void sii902x_audio_shutdown(struct device *dev, void *data) clk_disable_unprepare(sii902x->audio.mclk); } -static int sii902x_audio_digital_mute(struct device *dev, - void *data, bool enable) +static int sii902x_audio_mute(struct device *dev, void *data, + bool enable, int direction) { struct sii902x *sii902x = dev_get_drvdata(dev); @@ -724,9 +724,10 @@ static int sii902x_audio_get_dai_id(struct snd_soc_component *component, static const struct hdmi_codec_ops sii902x_audio_codec_ops = { .hw_params = sii902x_audio_hw_params, .audio_shutdown = sii902x_audio_shutdown, - .digital_mute = sii902x_audio_digital_mute, + .mute_stream = sii902x_audio_mute, .get_eld = sii902x_audio_get_eld, .get_dai_id = sii902x_audio_get_dai_id, + .no_capture_mute = 1, }; static int sii902x_audio_codec_init(struct sii902x *sii902x, diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 95f3d8cfe9ec..843265d7f1b1 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -986,7 +986,7 @@ static void sii8620_set_auto_zone(struct sii8620 *ctx) static void sii8620_stop_video(struct sii8620 *ctx) { - u8 uninitialized_var(val); + u8 val; sii8620_write_seq_static(ctx, REG_TPI_INTR_EN, 0, @@ -2300,10 +2300,9 @@ static int sii8620_probe(struct i2c_client *client, INIT_LIST_HEAD(&ctx->mt_queue); ctx->clk_xtal = devm_clk_get(dev, "xtal"); - if (IS_ERR(ctx->clk_xtal)) { - dev_err(dev, "failed to get xtal clock from DT\n"); - return PTR_ERR(ctx->clk_xtal); - } + if (IS_ERR(ctx->clk_xtal)) + return dev_err_probe(dev, PTR_ERR(ctx->clk_xtal), + "failed to get xtal clock from DT\n"); if (!client->irq) { dev_err(dev, "no irq provided\n"); @@ -2314,16 +2313,14 @@ static int sii8620_probe(struct i2c_client *client, sii8620_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "sii8620", ctx); - if (ret < 0) { - dev_err(dev, "failed to install IRQ handler\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, + "failed to install IRQ handler\n"); ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(ctx->gpio_reset)) { - dev_err(dev, "failed to get reset gpio from DT\n"); - return PTR_ERR(ctx->gpio_reset); - } + if (IS_ERR(ctx->gpio_reset)) + return dev_err_probe(dev, PTR_ERR(ctx->gpio_reset), + "failed to get reset gpio from DT\n"); ctx->supplies[0].supply = "cvcc10"; ctx->supplies[1].supply = "iovcc18"; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f6c723904204..6840f0530a38 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3093,7 +3093,7 @@ static int drm_cvt_modes(struct drm_connector *connector, const u8 empty[3] = { 0, 0, 0 }; for (i = 0; i < 4; i++) { - int uninitialized_var(width), height; + int width, height; cvt = &(timing->data.other_data.data.cvt[i]); diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c index 7ac0fc0a9415..bd481fdd6b87 100644 --- a/drivers/gpu/drm/drm_vblank_work.c +++ b/drivers/gpu/drm/drm_vblank_work.c @@ -248,9 +248,6 @@ EXPORT_SYMBOL(drm_vblank_work_init); int drm_vblank_worker_init(struct drm_vblank_crtc *vblank) { - struct sched_param param = { - .sched_priority = MAX_RT_PRIO - 1, - }; struct kthread_worker *worker; INIT_LIST_HEAD(&vblank->pending_work); @@ -263,5 +260,6 @@ int drm_vblank_worker_init(struct drm_vblank_crtc *vblank) vblank->worker = worker; - return sched_setscheduler(vblank->worker->task, SCHED_FIFO, ¶m); + sched_set_fifo(worker->task); + return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index db0eab53dcfe..843dfcefc46a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -547,9 +547,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi, unsigned long best_freq = 0; u32 min_delta = 0xffffffff; u8 p_min, p_max; - u8 _p, uninitialized_var(best_p); - u16 _m, uninitialized_var(best_m); - u8 _s, uninitialized_var(best_s); + u8 _p, best_p; + u16 _m, best_m; + u8 _s, best_s; p_min = DIV_ROUND_UP(fin, (12 * MHZ)); p_max = fin / (6 * MHZ); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 8c3f5b21eff4..c5ba32fca5f3 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1605,7 +1605,8 @@ static int hdmi_audio_hw_params(struct device *dev, void *data, return 0; } -static int hdmi_audio_digital_mute(struct device *dev, void *data, bool mute) +static int hdmi_audio_mute(struct device *dev, void *data, + bool mute, int direction) { struct hdmi_context *hdata = dev_get_drvdata(dev); @@ -1635,8 +1636,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = hdmi_audio_hw_params, .audio_shutdown = hdmi_audio_shutdown, - .digital_mute = hdmi_audio_digital_mute, + .mute_stream = hdmi_audio_mute, .get_eld = hdmi_audio_get_eld, + .no_capture_mute = 1, }; static int hdmi_register_audio_device(struct hdmi_context *hdata) diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 50fd119a5276..b7ec6c374fbd 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -1133,8 +1133,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data) mutex_unlock(&priv->audio_mutex); } -static int tda998x_audio_digital_mute(struct device *dev, void *data, - bool enable) +static int tda998x_audio_mute_stream(struct device *dev, void *data, + bool enable, int direction) { struct tda998x_priv *priv = dev_get_drvdata(dev); @@ -1162,8 +1162,9 @@ static int tda998x_audio_get_eld(struct device *dev, void *data, static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = tda998x_audio_hw_params, .audio_shutdown = tda998x_audio_shutdown, - .digital_mute = tda998x_audio_digital_mute, + .mute_stream = tda998x_audio_mute_stream, .get_eld = tda998x_audio_get_eld, + .no_capture_mute = 1, }; static int tda998x_audio_codec_init(struct tda998x_priv *priv, diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 85723fba6002..24c3a0f212c6 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -477,7 +477,7 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, unsigned int size, unsigned int fb_cpp) { struct intel_fbc *fbc = &dev_priv->fbc; - struct drm_mm_node *uninitialized_var(compressed_llb); + struct drm_mm_node *compressed_llb; int ret; drm_WARN_ON(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index aaed9eb3b56c..bbde3b12c311 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -1929,7 +1929,7 @@ static int pwm_setup_backlight(struct intel_connector *connector, return retval; } - level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100, + level = DIV_ROUND_UP_ULL(pwm_get_duty_cycle(panel->backlight.pwm) * 100, CRC_PMIC_PWM_PERIOD_NS); panel->backlight.level = intel_panel_compute_brightness(connector, level); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index e946032b13e4..2c2bf24140c9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -469,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) locked = 1; } ret = pin_user_pages_remote - (work->task, mm, + (mm, obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, flags, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index e0280a672f1d..24322ef08aa4 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1106,7 +1106,7 @@ static struct i915_request * __unwind_incomplete_requests(struct intel_engine_cs *engine) { struct i915_request *rq, *rn, *active = NULL; - struct list_head *uninitialized_var(pl); + struct list_head *pl; int prio = I915_PRIORITY_INVALID; lockdep_assert_held(&engine->active.lock); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f5edee17902a..8d5a933e6af6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1993,7 +1993,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore, unsigned int slow_timeout_ms, u32 *out_value) { - u32 uninitialized_var(reg_value); + u32 reg_value; #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) int ret; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9a46be05425a..b9810bf156c3 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -24,6 +24,7 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/iommu.h> #include <drm/drm_managed.h> @@ -118,6 +119,9 @@ struct drm_i915_private *mock_gem_device(void) { struct drm_i915_private *i915; struct pci_dev *pdev; +#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) + struct dev_iommu iommu; +#endif int err; pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); @@ -136,8 +140,10 @@ struct drm_i915_private *mock_gem_device(void) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) - /* hack to disable iommu for the fake device; force identity mapping */ - pdev->dev.archdata.iommu = (void *)-1; + /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */ + memset(&iommu, 0, sizeof(iommu)); + iommu.priv = (void *)-1; + pdev->dev.iommu = &iommu; #endif pci_set_drvdata(pdev, i915); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 040834bdee9e..3fc5511330b9 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -483,6 +483,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_finalize(cmdq_handle); cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); } #endif diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 95591262ac36..f2e9b429960b 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1643,7 +1643,8 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data) } static int -mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) +mtk_hdmi_audio_mute(struct device *dev, void *data, + bool enable, int direction) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1684,9 +1685,10 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { .hw_params = mtk_hdmi_audio_hw_params, .audio_startup = mtk_hdmi_audio_startup, .audio_shutdown = mtk_hdmi_audio_shutdown, - .digital_mute = mtk_hdmi_audio_digital_mute, + .mute_stream = mtk_hdmi_audio_mute, .get_eld = mtk_hdmi_audio_get_eld, .hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb, + .no_capture_mute = 1, }; static int mtk_hdmi_register_audio_driver(struct device *dev) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 36d98d4116ca..7d641c7e3514 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -401,7 +401,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) struct msm_kms *kms; struct msm_mdss *mdss; int ret, i; - struct sched_param param; ddev = drm_dev_alloc(drv, dev); if (IS_ERR(ddev)) { @@ -507,12 +506,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) ddev->mode_config.funcs = &mode_config_funcs; ddev->mode_config.helper_private = &mode_config_helper_funcs; - /** - * this priority was found during empiric testing to have appropriate - * realtime scheduling to process display updates and interact with - * other real time and normal priority task - */ - param.sched_priority = 16; for (i = 0; i < priv->num_crtcs; i++) { /* initialize event thread */ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; @@ -524,11 +517,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) goto err_msm_uninit; } - ret = sched_setscheduler(priv->event_thread[i].worker->task, - SCHED_FIFO, ¶m); - if (ret) - dev_warn(dev, "event_thread set priority failed:%d\n", - ret); + sched_set_fifo(priv->event_thread[i].worker->task); } ret = drm_vblank_init(ddev, priv->num_crtcs); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 98a1739e01e9..4e8112fde3e6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -143,6 +143,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, { struct device *dev = drm->dev->dev; struct page *dpage, *spage; + struct nouveau_svmm *svmm; spage = migrate_pfn_to_page(args->src[0]); if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE)) @@ -157,14 +158,19 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, if (dma_mapping_error(dev, *dma_addr)) goto error_free_page; + svmm = spage->zone_device_data; + mutex_lock(&svmm->mutex); + nouveau_svmm_invalidate(svmm, args->start, args->end); if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr, NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) goto error_dma_unmap; + mutex_unlock(&svmm->mutex); args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; return 0; error_dma_unmap: + mutex_unlock(&svmm->mutex); dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); error_free_page: __free_page(dpage); @@ -185,7 +191,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) .end = vmf->address + PAGE_SIZE, .src = &src, .dst = &dst, - .src_owner = drm->dev, + .pgmap_owner = drm->dev, + .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE, }; /* @@ -558,7 +565,8 @@ nouveau_dmem_init(struct nouveau_drm *drm) } static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, - unsigned long src, dma_addr_t *dma_addr, u64 *pfn) + struct nouveau_svmm *svmm, unsigned long src, + dma_addr_t *dma_addr, u64 *pfn) { struct device *dev = drm->dev->dev; struct page *dpage, *spage; @@ -588,6 +596,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, goto out_free_page; } + dpage->zone_device_data = svmm; *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM | ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT); if (src & MIGRATE_PFN_WRITE) @@ -611,8 +620,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm, unsigned long addr = args->start, nr_dma = 0, i; for (i = 0; addr < args->end; i++) { - args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i], - dma_addrs + nr_dma, pfns + i); + args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm, + args->src[i], dma_addrs + nr_dma, pfns + i); if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma])) nr_dma++; addr += PAGE_SIZE; @@ -643,6 +652,8 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm, struct migrate_vma args = { .vma = vma, .start = start, + .pgmap_owner = drm->dev, + .flags = MIGRATE_VMA_SELECT_SYSTEM, }; unsigned long i; u64 *pfns; diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index d4b4f866ab78..2df1c0460559 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -93,17 +93,6 @@ nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst) return NULL; } -struct nouveau_svmm { - struct mmu_notifier notifier; - struct nouveau_vmm *vmm; - struct { - unsigned long start; - unsigned long limit; - } unmanaged; - - struct mutex mutex; -}; - #define SVMM_DBG(s,f,a...) \ NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a) #define SVMM_ERR(s,f,a...) \ @@ -246,7 +235,7 @@ nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst) } /* Invalidate SVMM address-range on GPU. */ -static void +void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) { if (limit > start) { @@ -279,6 +268,14 @@ nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn, if (unlikely(!svmm->vmm)) goto out; + /* + * Ignore invalidation callbacks for device private pages since + * the invalidation is handled as part of the migration process. + */ + if (update->event == MMU_NOTIFY_MIGRATE && + update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev) + goto out; + if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) { if (start < svmm->unmanaged.start) { nouveau_svmm_invalidate(svmm, start, @@ -515,53 +512,68 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = { }; static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm, - struct hmm_range *range, u64 *ioctl_addr) + struct hmm_range *range, + struct nouveau_pfnmap_args *args) { - unsigned long i, npages; + struct page *page; /* - * The ioctl_addr prepared here is passed through nvif_object_ioctl() + * The address prepared here is passed through nvif_object_ioctl() * to an eventual DMA map in something like gp100_vmm_pgt_pfn() * * This is all just encoding the internal hmm representation into a * different nouveau internal representation. */ - npages = (range->end - range->start) >> PAGE_SHIFT; - for (i = 0; i < npages; ++i) { - struct page *page; - - if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) { - ioctl_addr[i] = 0; - continue; - } + if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) { + args->p.phys[0] = 0; + return; + } - page = hmm_pfn_to_page(range->hmm_pfns[i]); - if (is_device_private_page(page)) - ioctl_addr[i] = nouveau_dmem_page_addr(page) | - NVIF_VMM_PFNMAP_V0_V | - NVIF_VMM_PFNMAP_V0_VRAM; - else - ioctl_addr[i] = page_to_phys(page) | - NVIF_VMM_PFNMAP_V0_V | - NVIF_VMM_PFNMAP_V0_HOST; - if (range->hmm_pfns[i] & HMM_PFN_WRITE) - ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W; + page = hmm_pfn_to_page(range->hmm_pfns[0]); + /* + * Only map compound pages to the GPU if the CPU is also mapping the + * page as a compound page. Otherwise, the PTE protections might not be + * consistent (e.g., CPU only maps part of a compound page). + * Note that the underlying page might still be larger than the + * CPU mapping (e.g., a PUD sized compound page partially mapped with + * a PMD sized page table entry). + */ + if (hmm_pfn_to_map_order(range->hmm_pfns[0])) { + unsigned long addr = args->p.addr; + + args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) + + PAGE_SHIFT; + args->p.size = 1UL << args->p.page; + args->p.addr &= ~(args->p.size - 1); + page -= (addr - args->p.addr) >> PAGE_SHIFT; } + if (is_device_private_page(page)) + args->p.phys[0] = nouveau_dmem_page_addr(page) | + NVIF_VMM_PFNMAP_V0_V | + NVIF_VMM_PFNMAP_V0_VRAM; + else + args->p.phys[0] = page_to_phys(page) | + NVIF_VMM_PFNMAP_V0_V | + NVIF_VMM_PFNMAP_V0_HOST; + if (range->hmm_pfns[0] & HMM_PFN_WRITE) + args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W; } static int nouveau_range_fault(struct nouveau_svmm *svmm, - struct nouveau_drm *drm, void *data, u32 size, - unsigned long hmm_pfns[], u64 *ioctl_addr, + struct nouveau_drm *drm, + struct nouveau_pfnmap_args *args, u32 size, + unsigned long hmm_flags, struct svm_notifier *notifier) { unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); /* Have HMM fault pages within the fault window to the GPU. */ + unsigned long hmm_pfns[1]; struct hmm_range range = { .notifier = ¬ifier->notifier, .start = notifier->notifier.interval_tree.start, .end = notifier->notifier.interval_tree.last + 1, - .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE, + .default_flags = hmm_flags, .hmm_pfns = hmm_pfns, .dev_private_owner = drm->dev, }; @@ -577,11 +589,6 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, ret = hmm_range_fault(&range); mmap_read_unlock(mm); if (ret) { - /* - * FIXME: the input PFN_REQ flags are destroyed on - * -EBUSY, we need to regenerate them, also for the - * other continue below - */ if (ret == -EBUSY) continue; return ret; @@ -596,10 +603,10 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, break; } - nouveau_hmm_convert_pfn(drm, &range, ioctl_addr); + nouveau_hmm_convert_pfn(drm, &range, args); svmm->vmm->vmm.object.client->super = true; - ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL); + ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); @@ -616,17 +623,12 @@ nouveau_svm_fault(struct nvif_notify *notify) struct nvif_object *device = &svm->drm->client.device.object; struct nouveau_svmm *svmm; struct { - struct { - struct nvif_ioctl_v0 i; - struct nvif_ioctl_mthd_v0 m; - struct nvif_vmm_pfnmap_v0 p; - } i; - u64 phys[16]; + struct nouveau_pfnmap_args i; + u64 phys[1]; } args; - unsigned long hmm_pfns[ARRAY_SIZE(args.phys)]; - struct vm_area_struct *vma; + unsigned long hmm_flags; u64 inst, start, limit; - int fi, fn, pi, fill; + int fi, fn; int replay = 0, ret; /* Parse available fault buffer entries into a cache, and update @@ -693,128 +695,83 @@ nouveau_svm_fault(struct nvif_notify *notify) * window into a single update. */ start = buffer->fault[fi]->addr; - limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT); + limit = start + PAGE_SIZE; if (start < svmm->unmanaged.limit) limit = min_t(u64, limit, svmm->unmanaged.start); - SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); - mm = svmm->notifier.mm; - if (!mmget_not_zero(mm)) { - nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); - continue; - } - - /* Intersect fault window with the CPU VMA, cancelling - * the fault if the address is invalid. + /* + * Prepare the GPU-side update of all pages within the + * fault window, determining required pages and access + * permissions based on pending faults. */ - mmap_read_lock(mm); - vma = find_vma_intersection(mm, start, limit); - if (!vma) { - SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit); - mmap_read_unlock(mm); - mmput(mm); - nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); - continue; + args.i.p.addr = start; + args.i.p.page = PAGE_SHIFT; + args.i.p.size = PAGE_SIZE; + /* + * Determine required permissions based on GPU fault + * access flags. + * XXX: atomic? + */ + switch (buffer->fault[fi]->access) { + case 0: /* READ. */ + hmm_flags = HMM_PFN_REQ_FAULT; + break; + case 3: /* PREFETCH. */ + hmm_flags = 0; + break; + default: + hmm_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE; + break; } - start = max_t(u64, start, vma->vm_start); - limit = min_t(u64, limit, vma->vm_end); - mmap_read_unlock(mm); - SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); - if (buffer->fault[fi]->addr != start) { - SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr); - mmput(mm); + mm = svmm->notifier.mm; + if (!mmget_not_zero(mm)) { nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); continue; } - /* Prepare the GPU-side update of all pages within the - * fault window, determining required pages and access - * permissions based on pending faults. - */ - args.i.p.page = PAGE_SHIFT; - args.i.p.addr = start; - for (fn = fi, pi = 0;;) { - /* Determine required permissions based on GPU fault - * access flags. - *XXX: atomic? - */ - switch (buffer->fault[fn]->access) { - case 0: /* READ. */ - hmm_pfns[pi++] = HMM_PFN_REQ_FAULT; - break; - case 3: /* PREFETCH. */ - hmm_pfns[pi++] = 0; - break; - default: - hmm_pfns[pi++] = HMM_PFN_REQ_FAULT | - HMM_PFN_REQ_WRITE; - break; - } - args.i.p.size = pi << PAGE_SHIFT; + notifier.svmm = svmm; + ret = mmu_interval_notifier_insert(¬ifier.notifier, mm, + args.i.p.addr, args.i.p.size, + &nouveau_svm_mni_ops); + if (!ret) { + ret = nouveau_range_fault(svmm, svm->drm, &args.i, + sizeof(args), hmm_flags, ¬ifier); + mmu_interval_notifier_remove(¬ifier.notifier); + } + mmput(mm); + limit = args.i.p.addr + args.i.p.size; + for (fn = fi; ++fn < buffer->fault_nr; ) { /* It's okay to skip over duplicate addresses from the * same SVMM as faults are ordered by access type such * that only the first one needs to be handled. * * ie. WRITE faults appear first, thus any handling of * pending READ faults will already be satisfied. + * But if a large page is mapped, make sure subsequent + * fault addresses have sufficient access permission. */ - while (++fn < buffer->fault_nr && - buffer->fault[fn]->svmm == svmm && - buffer->fault[fn ]->addr == - buffer->fault[fn - 1]->addr); - - /* If the next fault is outside the window, or all GPU - * faults have been dealt with, we're done here. - */ - if (fn >= buffer->fault_nr || - buffer->fault[fn]->svmm != svmm || - buffer->fault[fn]->addr >= limit) + if (buffer->fault[fn]->svmm != svmm || + buffer->fault[fn]->addr >= limit || + (buffer->fault[fi]->access == 0 /* READ. */ && + !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) || + (buffer->fault[fi]->access != 0 /* READ. */ && + buffer->fault[fi]->access != 3 /* PREFETCH. */ && + !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W))) break; - - /* Fill in the gap between this fault and the next. */ - fill = (buffer->fault[fn ]->addr - - buffer->fault[fn - 1]->addr) >> PAGE_SHIFT; - while (--fill) - hmm_pfns[pi++] = 0; } - SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)", - args.i.p.addr, - args.i.p.addr + args.i.p.size, fn - fi); - - notifier.svmm = svmm; - ret = mmu_interval_notifier_insert(¬ifier.notifier, - svmm->notifier.mm, - args.i.p.addr, args.i.p.size, - &nouveau_svm_mni_ops); - if (!ret) { - ret = nouveau_range_fault( - svmm, svm->drm, &args, - sizeof(args.i) + pi * sizeof(args.phys[0]), - hmm_pfns, args.phys, ¬ifier); - mmu_interval_notifier_remove(¬ifier.notifier); - } - mmput(mm); + /* If handling failed completely, cancel all faults. */ + if (ret) { + while (fi < fn) { + struct nouveau_svm_fault *fault = + buffer->fault[fi++]; - /* Cancel any faults in the window whose pages didn't manage - * to keep their valid bit, or stay writeable when required. - * - * If handling failed completely, cancel all faults. - */ - while (fi < fn) { - struct nouveau_svm_fault *fault = buffer->fault[fi++]; - pi = (fault->addr - args.i.p.addr) >> PAGE_SHIFT; - if (ret || - !(args.phys[pi] & NVIF_VMM_PFNMAP_V0_V) || - (!(args.phys[pi] & NVIF_VMM_PFNMAP_V0_W) && - fault->access != 0 && fault->access != 3)) { nouveau_svm_fault_cancel_fault(svm, fault); - continue; } + } else replay++; - } } /* Issue fault replay to the GPU. */ diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h index f0fcd1b72e8b..e7d63d7f0c2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.h +++ b/drivers/gpu/drm/nouveau/nouveau_svm.h @@ -1,11 +1,21 @@ #ifndef __NOUVEAU_SVM_H__ #define __NOUVEAU_SVM_H__ #include <nvif/os.h> +#include <linux/mmu_notifier.h> struct drm_device; struct drm_file; struct nouveau_drm; -struct nouveau_svmm; +struct nouveau_svmm { + struct mmu_notifier notifier; + struct nouveau_vmm *vmm; + struct { + unsigned long start; + unsigned long limit; + } unmanaged; + + struct mutex mutex; +}; #if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) void nouveau_svm_init(struct nouveau_drm *); @@ -19,6 +29,7 @@ int nouveau_svmm_join(struct nouveau_svmm *, u64 inst); void nouveau_svmm_part(struct nouveau_svmm *, u64 inst); int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *); +void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit); u64 *nouveau_pfns_alloc(unsigned long npages); void nouveau_pfns_free(u64 *pfns); void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c index 9539e6cda4d9..236db5570771 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c @@ -79,8 +79,12 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, dma_addr_t addr; nvkm_kmap(pt->memory); - while (ptes--) { + for (; ptes; ptes--, map->pfn++) { u64 data = 0; + + if (!(*map->pfn & NVKM_VMM_PFN_V)) + continue; + if (!(*map->pfn & NVKM_VMM_PFN_W)) data |= BIT_ULL(6); /* RO. */ @@ -100,7 +104,6 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, } VMM_WO064(pt, vmm, ptei++ * 8, data); - map->pfn++; } nvkm_done(pt->memory); } @@ -310,9 +313,12 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, dma_addr_t addr; nvkm_kmap(pt->memory); - while (ptes--) { + for (; ptes; ptes--, map->pfn++) { u64 data = 0; + if (!(*map->pfn & NVKM_VMM_PFN_V)) + continue; + if (!(*map->pfn & NVKM_VMM_PFN_W)) data |= BIT_ULL(6); /* RO. */ @@ -332,7 +338,6 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, } VMM_WO064(pt, vmm, ptei++ * 16, data); - map->pfn++; } nvkm_done(pt->memory); } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 1a49e619aacf..e8f7b11352d2 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, while (len) { size_t pgsize = get_pgsize(iova | paddr, len); - ops->map(ops, iova, paddr, pgsize, prot); + ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL); iova += pgsize; paddr += pgsize; len -= pgsize; diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h index a0ee41632d7e..a7bc31f6d565 100644 --- a/drivers/gpu/drm/qxl/qxl_dev.h +++ b/drivers/gpu/drm/qxl/qxl_dev.h @@ -131,8 +131,6 @@ enum SpiceCursorType { #pragma pack(push, 1) -#define REDHAT_PCI_VENDOR_ID 0x1b36 - /* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */ #define QXL_DEVICE_ID_STABLE 0x0100 diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index c634b95b50f7..a4a45daf93f2 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -817,8 +817,8 @@ out: mutex_unlock(&dp->lock); } -static int cdn_dp_audio_digital_mute(struct device *dev, void *data, - bool enable) +static int cdn_dp_audio_mute_stream(struct device *dev, void *data, + bool enable, int direction) { struct cdn_dp_device *dp = dev_get_drvdata(dev); int ret; @@ -849,8 +849,9 @@ static int cdn_dp_audio_get_eld(struct device *dev, void *data, static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = cdn_dp_audio_hw_params, .audio_shutdown = cdn_dp_audio_shutdown, - .digital_mute = cdn_dp_audio_digital_mute, + .mute_stream = cdn_dp_audio_mute_stream, .get_eld = cdn_dp_audio_get_eld, + .no_capture_mute = 1, }; static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 3feff0c45b3f..542dcf7eddd6 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -517,8 +517,8 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode, unsigned long best_freq = 0; unsigned long fvco_min, fvco_max, fin, fout; unsigned int min_prediv, max_prediv; - unsigned int _prediv, uninitialized_var(best_prediv); - unsigned long _fbdiv, uninitialized_var(best_fbdiv); + unsigned int _prediv, best_prediv; + unsigned long _fbdiv, best_fbdiv; unsigned long min_delta = ULONG_MAX; dsi->format = format; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index d6eaa23ad746..96f763d888af 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -762,11 +762,10 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) */ static int drm_sched_main(void *param) { - struct sched_param sparam = {.sched_priority = 1}; struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; int r; - sched_setscheduler(current, SCHED_FIFO, &sparam); + sched_set_fifo_low(current); while (!kthread_should_stop()) { struct drm_sched_entity *entity = NULL; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 5b15c4974e6b..008f07923bbc 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1191,7 +1191,8 @@ static int hdmi_audio_hw_params(struct device *dev, return 0; } -static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) +static int hdmi_audio_mute(struct device *dev, void *data, + bool enable, int direction) { struct sti_hdmi *hdmi = dev_get_drvdata(dev); @@ -1219,8 +1220,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = hdmi_audio_hw_params, .audio_shutdown = hdmi_audio_shutdown, - .digital_mute = hdmi_audio_digital_mute, + .mute_stream = hdmi_audio_mute, .get_eld = hdmi_audio_get_eld, + .no_capture_mute = 1, }; static int sti_hdmi_register_audio_driver(struct device *dev, diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index bf060c69850f..75d0dc2f6d28 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -39,8 +39,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) u32 events_read, events_clear = 0; /* read the config space */ - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - events_read, &events_read); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + events_read, &events_read); if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { if (vgdev->has_edid) virtio_gpu_cmd_get_edids(vgdev); @@ -49,8 +49,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) drm_helper_hpd_irq_event(vgdev->ddev); events_clear |= VIRTIO_GPU_EVENT_DISPLAY; } - virtio_cwrite(vgdev->vdev, struct virtio_gpu_config, - events_clear, &events_clear); + virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, + events_clear, &events_clear); } static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, @@ -169,8 +169,8 @@ int virtio_gpu_init(struct drm_device *dev) } /* get display info */ - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - num_scanouts, &num_scanouts); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + num_scanouts, &num_scanouts); vgdev->num_scanouts = min_t(uint32_t, num_scanouts, VIRTIO_GPU_MAX_SCANOUTS); if (!vgdev->num_scanouts) { @@ -180,8 +180,8 @@ int virtio_gpu_init(struct drm_device *dev) } DRM_INFO("number of scanouts: %d\n", num_scanouts); - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - num_capsets, &num_capsets); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + num_capsets, &num_capsets); DRM_INFO("number of cap sets: %d\n", num_capsets); virtio_gpu_modeset_init(vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 1359eb8f1a02..729f98ad7c02 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -141,7 +141,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, struct virtio_gpu_mem_entry **ents, unsigned int *nents) { - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct scatterlist *sg; int si, ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 823223b0bb75..c93c2db35aaf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) @@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 3e660fb111b3..013c9e0e412c 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -157,7 +157,8 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, u64 dbuf_cookie, u32 width, u32 height, - u32 bpp, u64 size, struct page **pages) + u32 bpp, u64 size, u32 offset, + struct page **pages) { struct xen_drm_front_evtchnl *evtchnl; struct xen_drm_front_dbuf *dbuf; @@ -194,6 +195,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, req->op.dbuf_create.gref_directory = xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf); req->op.dbuf_create.buffer_sz = size; + req->op.dbuf_create.data_ofs = offset; req->op.dbuf_create.dbuf_cookie = dbuf_cookie; req->op.dbuf_create.width = width; req->op.dbuf_create.height = height; @@ -400,15 +402,15 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp, args->size = args->pitch * args->height; obj = xen_drm_front_gem_create(dev, args->size); - if (IS_ERR_OR_NULL(obj)) { - ret = PTR_ERR_OR_ZERO(obj); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); goto fail; } ret = xen_drm_front_dbuf_create(drm_info->front_info, xen_drm_front_dbuf_to_cookie(obj), args->width, args->height, args->bpp, - args->size, + args->size, 0, xen_drm_front_gem_get_pages(obj)); if (ret) goto fail_backend; diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h index f92c258350ca..54486d89650e 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.h +++ b/drivers/gpu/drm/xen/xen_drm_front.h @@ -145,7 +145,7 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, u64 dbuf_cookie, u32 width, u32 height, - u32 bpp, u64 size, struct page **pages); + u32 bpp, u64 size, u32 offset, struct page **pages); int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, u64 dbuf_cookie, u64 fb_cookie, u32 width, diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c index 459702fa990e..44f1f70c0aed 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_conn.c +++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c @@ -33,6 +33,7 @@ static const u32 plane_formats[] = { DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555, DRM_FORMAT_ARGB1555, + DRM_FORMAT_YUYV, }; const u32 *xen_drm_front_conn_get_formats(int *format_count) diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index f0b85e094111..39ff95b75357 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) size = round_up(size, PAGE_SIZE); xen_obj = gem_create_obj(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return xen_obj; if (drm_info->front_info->cfg.be_alloc) { @@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) */ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); xen_obj->pages = drm_gem_get_pages(&xen_obj->base); - if (IS_ERR_OR_NULL(xen_obj->pages)) { + if (IS_ERR(xen_obj->pages)) { ret = PTR_ERR(xen_obj->pages); xen_obj->pages = NULL; goto fail; @@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, struct xen_gem_object *xen_obj; xen_obj = gem_create(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); return &xen_obj->base; @@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, size = attach->dmabuf->size; xen_obj = gem_create_obj(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); ret = gem_alloc_pages_array(xen_obj, size); @@ -210,7 +210,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, ret = xen_drm_front_dbuf_create(drm_info->front_info, xen_drm_front_dbuf_to_cookie(&xen_obj->base), - 0, 0, 0, size, xen_obj->pages); + 0, 0, 0, size, sgt->sgl->offset, + xen_obj->pages); if (ret < 0) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index 78096bbcd226..ef11b1e4de39 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp, int ret; fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); - if (IS_ERR_OR_NULL(fb)) + if (IS_ERR(fb)) return fb; gem_obj = fb->obj[0]; diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c index 76a16d997a23..cd79ca0a92a9 100644 --- a/drivers/gpu/drm/zte/zx_hdmi.c +++ b/drivers/gpu/drm/zte/zx_hdmi.c @@ -439,8 +439,8 @@ static int zx_hdmi_audio_hw_params(struct device *dev, return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO); } -static int zx_hdmi_audio_digital_mute(struct device *dev, void *data, - bool enable) +static int zx_hdmi_audio_mute(struct device *dev, void *data, + bool enable, int direction) { struct zx_hdmi *hdmi = dev_get_drvdata(dev); @@ -468,8 +468,9 @@ static const struct hdmi_codec_ops zx_hdmi_codec_ops = { .audio_startup = zx_hdmi_audio_startup, .hw_params = zx_hdmi_audio_hw_params, .audio_shutdown = zx_hdmi_audio_shutdown, - .digital_mute = zx_hdmi_audio_digital_mute, + .mute_stream = zx_hdmi_audio_mute, .get_eld = zx_hdmi_audio_get_eld, + .no_capture_mute = 1, }; static struct hdmi_codec_pdata zx_hdmi_codec_pdata = { |