summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
5 files changed, 34 insertions, 64 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 51241de5e7a7..713848c36349 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
+ if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
@@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload;
+ if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 66374dba3b1a..6166e34d892b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
+ dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ __free_page(gvt->gtt.scratch_ggtt_page);
return ret;
}
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 28d817e96e58..3a74e79eac2f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -182,9 +182,6 @@ struct intel_vgpu {
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
- struct work_struct unpin_work;
- spinlock_t unpin_lock; /* To protect unpin_list */
- struct list_head unpin_list;
} vdev;
#endif
};
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 75a6e1d8af0d..fd0c85f9ef3c 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -78,7 +78,6 @@ struct gvt_dma {
struct rb_node node;
gfn_t gfn;
unsigned long iova;
- struct list_head list;
};
static inline bool handle_valid(unsigned long handle)
@@ -167,7 +166,6 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new->gfn = gfn;
new->iova = iova;
- INIT_LIST_HEAD(&new->list);
mutex_lock(&vgpu->vdev.cache_lock);
while (*link) {
@@ -199,52 +197,26 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
kfree(entry);
}
-static void intel_vgpu_unpin_work(struct work_struct *work)
+static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
- vdev.unpin_work);
struct device *dev = mdev_dev(vgpu->vdev.mdev);
struct gvt_dma *this;
- unsigned long gfn;
-
- for (;;) {
- spin_lock(&vgpu->vdev.unpin_lock);
- if (list_empty(&vgpu->vdev.unpin_list)) {
- spin_unlock(&vgpu->vdev.unpin_lock);
- break;
- }
- this = list_first_entry(&vgpu->vdev.unpin_list,
- struct gvt_dma, list);
- list_del(&this->list);
- spin_unlock(&vgpu->vdev.unpin_lock);
-
- gfn = this->gfn;
- vfio_unpin_pages(dev, &gfn, 1);
- kfree(this);
- }
-}
-
-static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn)
-{
- struct gvt_dma *this;
+ unsigned long g1;
+ int rc;
mutex_lock(&vgpu->vdev.cache_lock);
this = __gvt_cache_find(vgpu, gfn);
if (!this) {
mutex_unlock(&vgpu->vdev.cache_lock);
- return false;
+ return;
}
+
+ g1 = gfn;
gvt_dma_unmap_iova(vgpu, this->iova);
- /* remove this from rb tree */
- rb_erase(&this->node, &vgpu->vdev.cache);
+ rc = vfio_unpin_pages(dev, &g1, 1);
+ WARN_ON(rc != 1);
+ __gvt_cache_remove_entry(vgpu, this);
mutex_unlock(&vgpu->vdev.cache_lock);
-
- /* put this to the unpin_list */
- spin_lock(&vgpu->vdev.unpin_lock);
- list_move_tail(&this->list, &vgpu->vdev.unpin_list);
- spin_unlock(&vgpu->vdev.unpin_lock);
-
- return true;
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
@@ -485,9 +457,6 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
}
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
- INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work);
- spin_lock_init(&vgpu->vdev.unpin_lock);
- INIT_LIST_HEAD(&vgpu->vdev.unpin_list);
vgpu->vdev.mdev = mdev;
mdev_set_drvdata(mdev, vgpu);
@@ -517,7 +486,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu,
vdev.iommu_notifier);
- bool sched_unmap = false;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -527,10 +495,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
end_gfn = gfn + unmap->size / PAGE_SIZE;
while (gfn < end_gfn)
- sched_unmap |= gvt_cache_mark_remove(vgpu, gfn++);
-
- if (sched_unmap)
- schedule_work(&vgpu->vdev.unpin_work);
+ gvt_cache_remove(vgpu, gfn++);
}
return NOTIFY_OK;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 488fdea348a9..4f7057d62d88 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- /* If the status is -EINPROGRESS means this workload
- * doesn't meet any issue during dispatching so when
- * get the SCHEDULE_OUT set the status to be zero for
- * good. If the status is NOT -EINPROGRESS means there
- * is something wrong happened during dispatching and
- * the status should not be set to zero
- */
- if (workload->status == -EINPROGRESS)
- workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
+ /* If this request caused GPU hang, req->fence.error will
+ * be set to -EIO. Use -EIO to set workload status so
+ * that when this request caused GPU hang, didn't trigger
+ * context switch interrupt to guest.
+ */
+ if (likely(workload->status == -EINPROGRESS)) {
+ if (workload->req->fence.error == -EIO)
+ workload->status = -EIO;
+ else
+ workload->status = 0;
+ }
+
i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) {
@@ -464,8 +467,6 @@ struct workload_thread_param {
int ring_id;
};
-static DEFINE_MUTEX(scheduler_mutex);
-
static int workload_thread(void *priv)
{
struct workload_thread_param *p = (struct workload_thread_param *)priv;
@@ -497,8 +498,6 @@ static int workload_thread(void *priv)
if (!workload)
break;
- mutex_lock(&scheduler_mutex);
-
gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
workload->ring_id, workload,
workload->vgpu->id);
@@ -537,9 +536,6 @@ complete:
FORCEWAKE_ALL);
intel_runtime_pm_put(gvt->dev_priv);
-
- mutex_unlock(&scheduler_mutex);
-
}
return 0;
}