summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
14 files changed, 128 insertions, 73 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 6896dec97fc7..0ed41a9d2d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
if ((adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 71913a18d142..a38e0fb4a6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -38,6 +38,7 @@
#include "amdgpu_gem.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
+#include <linux/dma-fence-array.h>
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
@@ -187,6 +188,48 @@ error:
return ERR_PTR(ret);
}
+static int
+__reservation_object_make_exclusive(struct reservation_object *obj)
+{
+ struct dma_fence **fences;
+ unsigned int count;
+ int r;
+
+ if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+ return 0;
+
+ r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+ if (r)
+ return r;
+
+ if (count == 0) {
+ /* Now that was unexpected. */
+ } else if (count == 1) {
+ reservation_object_add_excl_fence(obj, fences[0]);
+ dma_fence_put(fences[0]);
+ kfree(fences);
+ } else {
+ struct dma_fence_array *array;
+
+ array = dma_fence_array_create(count, fences,
+ dma_fence_context_alloc(1), 0,
+ false);
+ if (!array)
+ goto err_fences_put;
+
+ reservation_object_add_excl_fence(obj, &array->base);
+ dma_fence_put(&array->base);
+ }
+
+ return 0;
+
+err_fences_put:
+ while (count--)
+ dma_fence_put(fences[count]);
+ kfree(fences);
+ return -ENOMEM;
+}
+
/**
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: Shared DMA buffer
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
if (attach->dev->driver != adev->dev->driver) {
/*
- * Wait for all shared fences to complete before we switch to future
- * use of exclusive fence on this prime shared bo.
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
*/
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (unlikely(r < 0)) {
- DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
+ r = __reservation_object_make_exclusive(bo->tbo.resv);
+ if (r)
goto error_unreserve;
- }
}
/* pin buffer into GTT */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d2ea5ce2cefb..7c108e687683 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3363,14 +3363,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info)
{
struct amdgpu_vm *vm;
+ unsigned long flags;
- spin_lock(&adev->vm_manager.pasid_lock);
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
if (vm)
*task_info = vm->task_info;
- spin_unlock(&adev->vm_manager.pasid_lock);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 4cd31a276dcd..186db182f924 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
+ u32 tmp = 0;
+ if (enable) {
+ tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
+ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
+ REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
+
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
+ lower_32_bits(adev->doorbell.base));
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
+ upper_32_bits(adev->doorbell.base));
+ }
+
+ WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
}
static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8849b74078d6..9b639974c70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle)
case CHIP_RAVEN:
adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
- adev->external_rev_id = adev->rev_id + 0x81;
+ adev->external_rev_id = adev->rev_id + 0x79;
else if (adev->pdev->device == 0x15d8)
adev->external_rev_id = adev->rev_id + 0x41;
+ else if (adev->rev_id == 1)
+ adev->external_rev_id = adev->rev_id + 0x20;
else
- adev->external_rev_id = 0x1;
+ adev->external_rev_id = adev->rev_id + 0x01;
if (adev->rev_id >= 0x8) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 5d85ff341385..2e7c44955f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
return 0;
}
-#if CONFIG_X86_64
+#ifdef CONFIG_X86_64
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
uint32_t *num_entries,
struct crat_subtype_iolink *sub_type_hdr)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f4fa40c387d3..0b392bfca284 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4082,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
}
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
drm_connector_attach_vrr_capable_property(
&aconnector->base);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index afd287f08bc9..19801bdba0d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements(
dc,
context->bw.dce.sclk_khz);
- pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+ /*
+ * As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
+ * This is not required for less than 5 displays,
+ * thus don't request decfclk in dc to avoid impact
+ * on power saving.
+ *
+ */
+ pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
+ pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw.dce.sclk_deep_sleep_khz;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index f95c5f50eb0f..5273de3c5b98 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
break;
case amd_pp_dpp_clock:
pclk_vol_table = pinfo->vdd_dep_on_dppclk;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779a80b4..a97294ac96d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
- enum pci_bus_speed speed_cap;
+ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
- speed_cap = pcie_get_speed_cap(root);
+ if (!pci_is_root_bus(rdev->pdev->bus))
+ speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0;
} else {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3af015..0a785ef0ab66 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
- enum pci_bus_speed speed_cap;
+ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
- speed_cap = pcie_get_speed_cap(root);
+ if (!pci_is_root_bus(rdev->pdev->bus))
+ speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0;
} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25afb1d594e3..7ef5dcb06104 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -26,6 +26,7 @@
**************************************************************************/
#include <linux/module.h>
#include <linux/console.h>
+#include <linux/dma-mapping.h>
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
@@ -34,7 +35,6 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <linux/intel-iommu.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
@@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
}
/**
+ * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
+ * taking place.
+ * @dev: Pointer to the struct drm_device.
+ *
+ * Return: true if iommu present, false otherwise.
+ */
+static bool vmw_assume_iommu(struct drm_device *dev)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev->dev);
+
+ return !dma_is_direct(ops) && ops &&
+ ops->map_page != dma_direct_map_page;
+}
+
+/**
* vmw_dma_select_mode - Determine how DMA mappings should be set up for this
* system.
*
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Keeping DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
-#ifdef CONFIG_X86
- const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_enabled) {
+ if (vmw_force_coherent)
+ dev_priv->map_mode = vmw_dma_alloc_coherent;
+ else if (vmw_assume_iommu(dev_priv->dev))
dev_priv->map_mode = vmw_dma_map_populate;
- goto out_fixup;
- }
-#endif
-
- if (!(vmw_force_iommu || vmw_force_coherent)) {
+ else if (!vmw_force_iommu)
dev_priv->map_mode = vmw_dma_phys;
- DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
- return 0;
- }
-
- dev_priv->map_mode = vmw_dma_map_populate;
-
- if (dma_ops && dma_ops->sync_single_for_cpu)
+ else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
dev_priv->map_mode = vmw_dma_alloc_coherent;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl() == 0)
+ else
dev_priv->map_mode = vmw_dma_map_populate;
-#endif
-#ifdef CONFIG_INTEL_IOMMU
-out_fixup:
-#endif
- if (dev_priv->map_mode == vmw_dma_map_populate &&
- vmw_restrict_iommu)
+ if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
- if (vmw_force_coherent)
- dev_priv->map_mode = vmw_dma_alloc_coherent;
-
-#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
- /*
- * No coherent page pool
- */
- if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ /* No TTM coherent page pool? FIXME: Ask TTM instead! */
+ if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
+ (dev_priv->map_mode == vmw_dma_alloc_coherent))
return -EINVAL;
-#endif
-
-#else /* CONFIG_X86 */
- dev_priv->map_mode = vmw_dma_map_populate;
-#endif /* CONFIG_X86 */
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
-
return 0;
}
@@ -625,24 +612,20 @@ out_fixup:
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
* restriction also for 64-bit systems.
*/
-#ifdef CONFIG_INTEL_IOMMU
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ int ret = 0;
- if (intel_iommu_enabled &&
+ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
+ if (dev_priv->map_mode != vmw_dma_phys &&
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
- return 0;
-}
-#else
-static int vmw_dma_masks(struct vmw_private *dev_priv)
-{
- return 0;
+
+ return ret;
}
-#endif
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f2d13a72c05d..88b8178d4687 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
*p_fence = NULL;
}
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b351fb5214d3..ed2f67822f45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!du->pref_active) {
+ if (!du->pref_active && new_crtc_state->enable) {
ret = -EINVAL;
goto clean;
}
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
user_fence_rep)
{
struct vmw_fence_obj *fence = NULL;
- uint32_t handle;
- int ret;
+ uint32_t handle = 0;
+ int ret = 0;
if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
out_fence)