summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c153
1 files changed, 105 insertions, 48 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 39f5c658ef5e..c0ae6bbbd9b5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -989,10 +989,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
/* Only handle setting domains to types used by the CPU. */
- if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
- if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ if (read_domains & I915_GEM_GPU_DOMAINS)
return -EINVAL;
/* Having something in the write domain implies it's in the read
@@ -1481,14 +1481,19 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
* Returned sequence numbers are nonzero on success.
*/
static uint32_t
-i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *i915_file_priv = NULL;
struct drm_i915_gem_request *request;
uint32_t seqno;
int was_empty;
RING_LOCALS;
+ if (file_priv != NULL)
+ i915_file_priv = file_priv->driver_priv;
+
request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
if (request == NULL)
return 0;
@@ -1515,6 +1520,12 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->emitted_jiffies = jiffies;
was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
+ if (i915_file_priv) {
+ list_add_tail(&request->client_list,
+ &i915_file_priv->mm.request_list);
+ } else {
+ INIT_LIST_HEAD(&request->client_list);
+ }
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
@@ -1664,6 +1675,7 @@ i915_gem_retire_requests(struct drm_device *dev)
i915_gem_retire_request(dev, request);
list_del(&request->list);
+ list_del(&request->client_list);
drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
} else
break;
@@ -1702,7 +1714,10 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
BUG_ON(seqno == 0);
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- ier = I915_READ(IER);
+ if (IS_IGDNG(dev))
+ ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else
+ ier = I915_READ(IER);
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
@@ -1754,8 +1769,7 @@ i915_gem_flush(struct drm_device *dev,
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
- if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) {
+ if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
/*
* read/write caches:
*
@@ -1977,7 +1991,7 @@ i915_gem_evict_something(struct drm_device *dev)
i915_gem_flush(dev,
obj->write_domain,
obj->write_domain);
- i915_add_request(dev, obj->write_domain);
+ i915_add_request(dev, NULL, obj->write_domain);
obj = NULL;
continue;
@@ -1991,7 +2005,7 @@ i915_gem_evict_something(struct drm_device *dev)
/* If we didn't do any of the above, there's nothing to be done
* and we just can't fit it in.
*/
- return -ENOMEM;
+ return -ENOSPC;
}
return ret;
}
@@ -2006,7 +2020,7 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret != 0)
break;
}
- if (ret == -ENOMEM)
+ if (ret == -ENOSPC)
return 0;
return ret;
}
@@ -2215,7 +2229,7 @@ try_again:
loff_t offset;
if (avail == 0)
- return -ENOMEM;
+ return -ENOSPC;
for (i = dev_priv->fence_reg_start;
i < dev_priv->num_fence_regs; i++) {
@@ -2248,7 +2262,7 @@ try_again:
i915_gem_flush(dev,
I915_GEM_GPU_DOMAINS,
I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev,
+ seqno = i915_add_request(dev, NULL,
I915_GEM_GPU_DOMAINS);
if (seqno == 0)
return -ENOMEM;
@@ -2364,7 +2378,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
spin_unlock(&dev_priv->mm.active_list_lock);
if (lists_empty) {
DRM_ERROR("GTT full, but LRU list empty\n");
- return -ENOMEM;
+ return -ENOSPC;
}
ret = i915_gem_evict_something(dev);
@@ -2409,8 +2423,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
return 0;
}
@@ -2452,7 +2466,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */
i915_gem_flush(dev, 0, obj->write_domain);
- seqno = i915_add_request(dev, obj->write_domain);
+ seqno = i915_add_request(dev, NULL, obj->write_domain);
obj->write_domain = 0;
i915_gem_object_move_to_active(obj, seqno);
}
@@ -3035,20 +3049,12 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
int nbox = exec->num_cliprects;
int i = 0, count;
- uint32_t exec_start, exec_len;
+ uint32_t exec_start, exec_len;
RING_LOCALS;
exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
exec_len = (uint32_t) exec->batch_len;
- if ((exec_start | exec_len) & 0x7) {
- DRM_ERROR("alignment\n");
- return -EINVAL;
- }
-
- if (!exec_start)
- return -EINVAL;
-
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
@@ -3089,6 +3095,10 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
+ * Note that if we were to use the current jiffies each time around the loop,
+ * we wouldn't escape the function with any frames outstanding if the time to
+ * render a frame was over 20ms.
+ *
* This should get us reasonable parallelism between CPU and GPU but also
* relatively low latency when blocking on a particular request to finish.
*/
@@ -3097,15 +3107,25 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
int ret = 0;
- uint32_t seqno;
+ unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
mutex_lock(&dev->struct_mutex);
- seqno = i915_file_priv->mm.last_gem_throttle_seqno;
- i915_file_priv->mm.last_gem_throttle_seqno =
- i915_file_priv->mm.last_gem_seqno;
- if (seqno)
- ret = i915_wait_request(dev, seqno);
+ while (!list_empty(&i915_file_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&i915_file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
+
+ if (time_after_eq(request->emitted_jiffies, recent_enough))
+ break;
+
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret != 0)
+ break;
+ }
mutex_unlock(&dev->struct_mutex);
+
return ret;
}
@@ -3182,12 +3202,29 @@ err:
return ret;
}
+static int
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+ uint64_t exec_offset)
+{
+ uint32_t exec_start, exec_len;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ if ((exec_start | exec_len) & 0x7)
+ return -EINVAL;
+
+ if (!exec_start)
+ return -EINVAL;
+
+ return 0;
+}
+
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
@@ -3312,7 +3349,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
break;
/* error other than GTT full, or we've already tried again */
- if (ret != -ENOMEM || pin_tries >= 1) {
+ if (ret != -ENOSPC || pin_tries >= 1) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to pin buffers %d\n", ret);
goto err;
@@ -3331,8 +3368,20 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* Set the pending read domains for the batch buffer to COMMAND */
batch_obj = object_list[args->buffer_count-1];
- batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
- batch_obj->pending_write_domain = 0;
+ if (batch_obj->pending_write_domain) {
+ DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ /* Sanity check the batch buffer, prior to moving objects */
+ exec_offset = exec_list[args->buffer_count - 1].offset;
+ ret = i915_gem_check_execbuffer (args, exec_offset);
+ if (ret != 0) {
+ DRM_ERROR("execbuf with invalid offset/length\n");
+ goto err;
+ }
i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -3363,7 +3412,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
dev->invalidate_domains,
dev->flush_domains);
if (dev->flush_domains)
- (void)i915_add_request(dev, dev->flush_domains);
+ (void)i915_add_request(dev, file_priv,
+ dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
@@ -3381,8 +3431,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
}
#endif
- exec_offset = exec_list[args->buffer_count - 1].offset;
-
#if WATCH_EXEC
i915_gem_dump_object(batch_obj,
args->batch_len,
@@ -3412,9 +3460,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
* *some* interrupts representing completion of buffers that we can
* wait on when trying to clear up gtt space).
*/
- seqno = i915_add_request(dev, flush_domains);
+ seqno = i915_add_request(dev, file_priv, flush_domains);
BUG_ON(seqno == 0);
- i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -3520,8 +3567,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
if (!obj_priv->active &&
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) == 0 &&
+ (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
}
@@ -3548,8 +3594,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
*/
if (obj_priv->pin_count == 0) {
if (!obj_priv->active &&
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) == 0)
+ (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
list_move_tail(&obj_priv->list,
&dev_priv->mm.inactive_list);
atomic_dec(&dev->pin_count);
@@ -3653,15 +3698,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
args->handle);
- mutex_unlock(&dev->struct_mutex);
return -EBADF;
}
+ mutex_lock(&dev->struct_mutex);
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs are
* actually unmasked, and our working set ends up being larger than
@@ -3800,9 +3844,8 @@ i915_gem_idle(struct drm_device *dev)
/* Flush the GPU along with all non-CPU write domains
*/
- i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
- ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
if (seqno == 0) {
mutex_unlock(&dev->struct_mutex);
@@ -4352,3 +4395,17 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
drm_agp_chipset_flush(dev);
return 0;
}
+
+void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+ /* Clean up our request list when the client is going away, so that
+ * later retire_requests won't dereference our soon-to-be-gone
+ * file_priv.
+ */
+ mutex_lock(&dev->struct_mutex);
+ while (!list_empty(&i915_file_priv->mm.request_list))
+ list_del_init(i915_file_priv->mm.request_list.next);
+ mutex_unlock(&dev->struct_mutex);
+}