summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiaoguang Chen <xiaoguang.chen@intel.com>2016-11-03 18:38:30 +0800
committerZhenyu Wang <zhenyuw@linux.intel.com>2016-11-10 15:42:39 +0800
commitc754936fe66c45d2075970dc1e6ebdfeec4df6f3 (patch)
tree203ad09eb6603e7584836985621ecb8b0e2e6879
parent9baf0920b5d8e4e4ea302cb954168e06cf838d63 (diff)
drm/i915/gvt: use kmap instead of kmap_atomic around guest memory access
kmap_atomic doesn't allow sleep until unmapped. However, it's necessary to allow sleep during reading/writing guest memory, so use kmap instead. Signed-off-by: Bing Niu <bing.niu@intel.com> Signed-off-by: Xiaoguang Chen <xiaoguang.chen@intel.com> Signed-off-by: Jike Song <jike.song@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 843a5de4300d..7d87c43661c5 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
- dst = kmap_atomic(page);
+ dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
- kunmap_atomic(dst);
+ kunmap(page);
i++;
}
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ shadow_ring_context = kmap(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
@@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
- kunmap_atomic(shadow_ring_context);
+ kunmap(page);
return 0;
}
@@ -318,10 +318,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
- src = kmap_atomic(page);
+ src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
- kunmap_atomic(src);
+ kunmap(page);
i++;
}
@@ -329,7 +329,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
- shadow_ring_context = kmap_atomic(page);
+ shadow_ring_context = kmap(page);
#define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
@@ -347,7 +347,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
- kunmap_atomic(shadow_ring_context);
+ kunmap(page);
}
static void complete_current_workload(struct intel_gvt *gvt, int ring_id)