diff options
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/Kconfig | 5 | ||||
-rw-r--r-- | drivers/dma-buf/Makefile | 8 | ||||
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 28 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-array.c | 32 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-chain.c | 24 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence.c | 55 | ||||
-rw-r--r-- | drivers/dma-buf/dma-resv.c (renamed from drivers/dma-buf/reservation.c) | 251 | ||||
-rw-r--r-- | drivers/dma-buf/selftest.c | 167 | ||||
-rw-r--r-- | drivers/dma-buf/selftest.h | 30 | ||||
-rw-r--r-- | drivers/dma-buf/selftests.h | 13 | ||||
-rw-r--r-- | drivers/dma-buf/st-dma-fence.c | 574 | ||||
-rw-r--r-- | drivers/dma-buf/sw_sync.c | 16 | ||||
-rw-r--r-- | drivers/dma-buf/sync_file.c | 2 |
13 files changed, 1047 insertions, 158 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index b6a9c2f1bc41..a23b6752d11a 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -39,4 +39,9 @@ config UDMABUF A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. +config DMABUF_SELFTESTS + tristate "Selftests for the dma-buf interfaces" + default n + depends on DMA_SHARED_BUFFER + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index e8c7310cb800..03479da06422 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,6 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ - reservation.o seqno-fence.o + dma-resv.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o + +dmabuf_selftests-y := \ + selftest.o \ + st-dma-fence.o + +obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index f45bfb29ef96..433d91d710e4 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -21,7 +21,7 @@ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/poll.h> -#include <linux/reservation.h> +#include <linux/dma-resv.h> #include <linux/mm.h> #include <linux/mount.h> #include <linux/pseudo_fs.h> @@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file) list_del(&dmabuf->list_node); mutex_unlock(&db_list.lock); - if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) - reservation_object_fini(dmabuf->resv); + if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) + dma_resv_fini(dmabuf->resv); module_put(dmabuf->owner); kfree(dmabuf); @@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) * To support cross-device and cross-driver synchronization of buffer access * implicit fences (represented internally in the kernel with &struct fence) can * be attached to a &dma_buf. The glue for that and a few related things are - * provided in the &reservation_object structure. + * provided in the &dma_resv structure. * * Userspace can query the state of these implicitly tracked fences using poll() * and related system calls: @@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { struct dma_buf *dmabuf; - struct reservation_object *resv; - struct reservation_object_list *fobj; + struct dma_resv *resv; + struct dma_resv_list *fobj; struct dma_fence *fence_excl; __poll_t events; unsigned shared_count, seq; @@ -506,13 +506,13 @@ err_alloc_file: struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { struct dma_buf *dmabuf; - struct reservation_object *resv = exp_info->resv; + struct dma_resv *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); int ret; if (!exp_info->resv) - alloc_size += sizeof(struct reservation_object); + alloc_size += sizeof(struct dma_resv); else /* prevent &dma_buf[1] == dma_buf->resv */ alloc_size += 1; @@ -544,8 +544,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; if (!resv) { - resv = (struct reservation_object *)&dmabuf[1]; - reservation_object_init(resv); + resv = (struct dma_resv *)&dmabuf[1]; + dma_resv_init(resv); } dmabuf->resv = resv; @@ -909,11 +909,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, { bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); - struct reservation_object *resv = dmabuf->resv; + struct dma_resv *resv = dmabuf->resv; long ret; /* Wait on any implicit rendering fences */ - ret = reservation_object_wait_timeout_rcu(resv, write, true, + ret = dma_resv_wait_timeout_rcu(resv, write, true, MAX_SCHEDULE_TIMEOUT); if (ret < 0) return ret; @@ -1154,8 +1154,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) int ret; struct dma_buf *buf_obj; struct dma_buf_attachment *attach_obj; - struct reservation_object *robj; - struct reservation_object_list *fobj; + struct dma_resv *robj; + struct dma_resv_list *fobj; struct dma_fence *fence; unsigned seq; int count = 0, attach_count, shared_count, i; diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 12c6f64c0bc2..d3fbd950be94 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -13,6 +13,8 @@ #include <linux/slab.h> #include <linux/dma-fence-array.h> +#define PENDING_ERROR 1 + static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) { return "dma_fence_array"; @@ -23,10 +25,29 @@ static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) return "unbound"; } +static void dma_fence_array_set_pending_error(struct dma_fence_array *array, + int error) +{ + /* + * Propagate the first error reported by any of our fences, but only + * before we ourselves are signaled. + */ + if (error) + cmpxchg(&array->base.error, PENDING_ERROR, error); +} + +static void dma_fence_array_clear_pending_error(struct dma_fence_array *array) +{ + /* Clear the error flag if not actually set. */ + cmpxchg(&array->base.error, PENDING_ERROR, 0); +} + static void irq_dma_fence_array_work(struct irq_work *wrk) { struct dma_fence_array *array = container_of(wrk, typeof(*array), work); + dma_fence_array_clear_pending_error(array); + dma_fence_signal(&array->base); dma_fence_put(&array->base); } @@ -38,6 +59,8 @@ static void dma_fence_array_cb_func(struct dma_fence *f, container_of(cb, struct dma_fence_array_cb, cb); struct dma_fence_array *array = array_cb->array; + dma_fence_array_set_pending_error(array, f->error); + if (atomic_dec_and_test(&array->num_pending)) irq_work_queue(&array->work); else @@ -63,9 +86,14 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence) dma_fence_get(&array->base); if (dma_fence_add_callback(array->fences[i], &cb[i].cb, dma_fence_array_cb_func)) { + int error = array->fences[i]->error; + + dma_fence_array_set_pending_error(array, error); dma_fence_put(&array->base); - if (atomic_dec_and_test(&array->num_pending)) + if (atomic_dec_and_test(&array->num_pending)) { + dma_fence_array_clear_pending_error(array); return false; + } } } @@ -142,6 +170,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences, atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); array->fences = fences; + array->base.error = PENDING_ERROR; + return array; } EXPORT_SYMBOL(dma_fence_array_create); diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index b5089f64be2a..44a741677d25 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -178,8 +178,30 @@ static bool dma_fence_chain_signaled(struct dma_fence *fence) static void dma_fence_chain_release(struct dma_fence *fence) { struct dma_fence_chain *chain = to_dma_fence_chain(fence); + struct dma_fence *prev; + + /* Manually unlink the chain as much as possible to avoid recursion + * and potential stack overflow. + */ + while ((prev = rcu_dereference_protected(chain->prev, true))) { + struct dma_fence_chain *prev_chain; + + if (kref_read(&prev->refcount) > 1) + break; + + prev_chain = to_dma_fence_chain(prev); + if (!prev_chain) + break; + + /* No need for atomic operations since we hold the last + * reference to prev_chain. + */ + chain->prev = prev_chain->prev; + RCU_INIT_POINTER(prev_chain->prev, NULL); + dma_fence_put(prev); + } + dma_fence_put(prev); - dma_fence_put(rcu_dereference_protected(chain->prev, true)); dma_fence_put(chain->fence); dma_fence_free(fence); } diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 59ac96ec7ba8..2c136aee3e79 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); * * - Then there's also implicit fencing, where the synchronization points are * implicitly passed around as part of shared &dma_buf instances. Such - * implicit fences are stored in &struct reservation_object through the + * implicit fences are stored in &struct dma_resv through the * &dma_buf.resv pointer. */ @@ -129,31 +129,27 @@ EXPORT_SYMBOL(dma_fence_context_alloc); int dma_fence_signal_locked(struct dma_fence *fence) { struct dma_fence_cb *cur, *tmp; - int ret = 0; + struct list_head cb_list; lockdep_assert_held(fence->lock); - if (WARN_ON(!fence)) + if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &fence->flags))) return -EINVAL; - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { - ret = -EINVAL; + /* Stash the cb_list before replacing it with the timestamp */ + list_replace(&fence->cb_list, &cb_list); - /* - * we might have raced with the unlocked dma_fence_signal, - * still run through all callbacks - */ - } else { - fence->timestamp = ktime_get(); - set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); - trace_dma_fence_signaled(fence); - } + fence->timestamp = ktime_get(); + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); + trace_dma_fence_signaled(fence); - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { - list_del_init(&cur->node); + list_for_each_entry_safe(cur, tmp, &cb_list, node) { + INIT_LIST_HEAD(&cur->node); cur->func(fence, cur); } - return ret; + + return 0; } EXPORT_SYMBOL(dma_fence_signal_locked); @@ -173,28 +169,16 @@ EXPORT_SYMBOL(dma_fence_signal_locked); int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; + int ret; if (!fence) return -EINVAL; - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return -EINVAL; - - fence->timestamp = ktime_get(); - set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); - trace_dma_fence_signaled(fence); - - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { - struct dma_fence_cb *cur, *tmp; + spin_lock_irqsave(fence->lock, flags); + ret = dma_fence_signal_locked(fence); + spin_unlock_irqrestore(fence->lock, flags); - spin_lock_irqsave(fence->lock, flags); - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { - list_del_init(&cur->node); - cur->func(fence, cur); - } - spin_unlock_irqrestore(fence->lock, flags); - } - return 0; + return ret; } EXPORT_SYMBOL(dma_fence_signal); @@ -248,7 +232,8 @@ void dma_fence_release(struct kref *kref) trace_dma_fence_destroy(fence); - if (WARN(!list_empty(&fence->cb_list), + if (WARN(!list_empty(&fence->cb_list) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), "Fence %s:%s:%llx:%llx released with pending signals!\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/dma-resv.c index 4447e13d1e89..42a8f3f11681 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/dma-resv.c @@ -32,7 +32,7 @@ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ -#include <linux/reservation.h> +#include <linux/dma-resv.h> #include <linux/export.h> /** @@ -56,26 +56,103 @@ const char reservation_seqcount_string[] = "reservation_seqcount"; EXPORT_SYMBOL(reservation_seqcount_string); /** - * reservation_object_reserve_shared - Reserve space to add shared fences to - * a reservation_object. + * dma_resv_list_alloc - allocate fence list + * @shared_max: number of fences we need space for + * + * Allocate a new dma_resv_list and make sure to correctly initialize + * shared_max. + */ +static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max) +{ + struct dma_resv_list *list; + + list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL); + if (!list) + return NULL; + + list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) / + sizeof(*list->shared); + + return list; +} + +/** + * dma_resv_list_free - free fence list + * @list: list to free + * + * Free a dma_resv_list and make sure to drop all references. + */ +static void dma_resv_list_free(struct dma_resv_list *list) +{ + unsigned int i; + + if (!list) + return; + + for (i = 0; i < list->shared_count; ++i) + dma_fence_put(rcu_dereference_protected(list->shared[i], true)); + + kfree_rcu(list, rcu); +} + +/** + * dma_resv_init - initialize a reservation object + * @obj: the reservation object + */ +void dma_resv_init(struct dma_resv *obj) +{ + ww_mutex_init(&obj->lock, &reservation_ww_class); + + __seqcount_init(&obj->seq, reservation_seqcount_string, + &reservation_seqcount_class); + RCU_INIT_POINTER(obj->fence, NULL); + RCU_INIT_POINTER(obj->fence_excl, NULL); +} +EXPORT_SYMBOL(dma_resv_init); + +/** + * dma_resv_fini - destroys a reservation object + * @obj: the reservation object + */ +void dma_resv_fini(struct dma_resv *obj) +{ + struct dma_resv_list *fobj; + struct dma_fence *excl; + + /* + * This object should be dead and all references must have + * been released to it, so no need to be protected with rcu. + */ + excl = rcu_dereference_protected(obj->fence_excl, 1); + if (excl) + dma_fence_put(excl); + + fobj = rcu_dereference_protected(obj->fence, 1); + dma_resv_list_free(fobj); + ww_mutex_destroy(&obj->lock); +} +EXPORT_SYMBOL(dma_resv_fini); + +/** + * dma_resv_reserve_shared - Reserve space to add shared fences to + * a dma_resv. * @obj: reservation object * @num_fences: number of fences we want to add * - * Should be called before reservation_object_add_shared_fence(). Must + * Should be called before dma_resv_add_shared_fence(). Must * be called with obj->lock held. * * RETURNS * Zero for success, or -errno */ -int reservation_object_reserve_shared(struct reservation_object *obj, - unsigned int num_fences) +int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) { - struct reservation_object_list *old, *new; + struct dma_resv_list *old, *new; unsigned int i, j, k, max; - reservation_object_assert_held(obj); + dma_resv_assert_held(obj); - old = reservation_object_get_list(obj); + old = dma_resv_get_list(obj); if (old && old->shared_max) { if ((old->shared_count + num_fences) <= old->shared_max) @@ -87,7 +164,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj, max = 4; } - new = kmalloc(offsetof(typeof(*new), shared[max]), GFP_KERNEL); + new = dma_resv_list_alloc(max); if (!new) return -ENOMEM; @@ -101,79 +178,76 @@ int reservation_object_reserve_shared(struct reservation_object *obj, struct dma_fence *fence; fence = rcu_dereference_protected(old->shared[i], - reservation_object_held(obj)); + dma_resv_held(obj)); if (dma_fence_is_signaled(fence)) RCU_INIT_POINTER(new->shared[--k], fence); else RCU_INIT_POINTER(new->shared[j++], fence); } new->shared_count = j; - new->shared_max = max; - preempt_disable(); - write_seqcount_begin(&obj->seq); /* - * RCU_INIT_POINTER can be used here, - * seqcount provides the necessary barriers + * We are not changing the effective set of fences here so can + * merely update the pointer to the new array; both existing + * readers and new readers will see exactly the same set of + * active (unsignaled) shared fences. Individual fences and the + * old array are protected by RCU and so will not vanish under + * the gaze of the rcu_read_lock() readers. */ - RCU_INIT_POINTER(obj->fence, new); - write_seqcount_end(&obj->seq); - preempt_enable(); + rcu_assign_pointer(obj->fence, new); if (!old) return 0; /* Drop the references to the signaled fences */ - for (i = k; i < new->shared_max; ++i) { + for (i = k; i < max; ++i) { struct dma_fence *fence; fence = rcu_dereference_protected(new->shared[i], - reservation_object_held(obj)); + dma_resv_held(obj)); dma_fence_put(fence); } kfree_rcu(old, rcu); return 0; } -EXPORT_SYMBOL(reservation_object_reserve_shared); +EXPORT_SYMBOL(dma_resv_reserve_shared); /** - * reservation_object_add_shared_fence - Add a fence to a shared slot + * dma_resv_add_shared_fence - Add a fence to a shared slot * @obj: the reservation object * @fence: the shared fence to add * * Add a fence to a shared slot, obj->lock must be held, and - * reservation_object_reserve_shared() has been called. + * dma_resv_reserve_shared() has been called. */ -void reservation_object_add_shared_fence(struct reservation_object *obj, - struct dma_fence *fence) +void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) { - struct reservation_object_list *fobj; + struct dma_resv_list *fobj; + struct dma_fence *old; unsigned int i, count; dma_fence_get(fence); - reservation_object_assert_held(obj); + dma_resv_assert_held(obj); - fobj = reservation_object_get_list(obj); + fobj = dma_resv_get_list(obj); count = fobj->shared_count; preempt_disable(); write_seqcount_begin(&obj->seq); for (i = 0; i < count; ++i) { - struct dma_fence *old_fence; - old_fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(obj)); - if (old_fence->context == fence->context || - dma_fence_is_signaled(old_fence)) { - dma_fence_put(old_fence); + old = rcu_dereference_protected(fobj->shared[i], + dma_resv_held(obj)); + if (old->context == fence->context || + dma_fence_is_signaled(old)) goto replace; - } } BUG_ON(fobj->shared_count >= fobj->shared_max); + old = NULL; count++; replace: @@ -183,26 +257,26 @@ replace: write_seqcount_end(&obj->seq); preempt_enable(); + dma_fence_put(old); } -EXPORT_SYMBOL(reservation_object_add_shared_fence); +EXPORT_SYMBOL(dma_resv_add_shared_fence); /** - * reservation_object_add_excl_fence - Add an exclusive fence. + * dma_resv_add_excl_fence - Add an exclusive fence. * @obj: the reservation object * @fence: the shared fence to add * * Add a fence to the exclusive slot. The obj->lock must be held. */ -void reservation_object_add_excl_fence(struct reservation_object *obj, - struct dma_fence *fence) +void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) { - struct dma_fence *old_fence = reservation_object_get_excl(obj); - struct reservation_object_list *old; + struct dma_fence *old_fence = dma_resv_get_excl(obj); + struct dma_resv_list *old; u32 i = 0; - reservation_object_assert_held(obj); + dma_resv_assert_held(obj); - old = reservation_object_get_list(obj); + old = dma_resv_get_list(obj); if (old) i = old->shared_count; @@ -221,28 +295,26 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, /* inplace update, no shared fences */ while (i--) dma_fence_put(rcu_dereference_protected(old->shared[i], - reservation_object_held(obj))); + dma_resv_held(obj))); dma_fence_put(old_fence); } -EXPORT_SYMBOL(reservation_object_add_excl_fence); +EXPORT_SYMBOL(dma_resv_add_excl_fence); /** -* reservation_object_copy_fences - Copy all fences from src to dst. +* dma_resv_copy_fences - Copy all fences from src to dst. * @dst: the destination reservation object * @src: the source reservation object * * Copy all fences from src to dst. dst-lock must be held. */ -int reservation_object_copy_fences(struct reservation_object *dst, - struct reservation_object *src) +int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) { - struct reservation_object_list *src_list, *dst_list; + struct dma_resv_list *src_list, *dst_list; struct dma_fence *old, *new; - size_t size; unsigned i; - reservation_object_assert_held(dst); + dma_resv_assert_held(dst); rcu_read_lock(); src_list = rcu_dereference(src->fence); @@ -251,10 +323,9 @@ retry: if (src_list) { unsigned shared_count = src_list->shared_count; - size = offsetof(typeof(*src_list), shared[shared_count]); rcu_read_unlock(); - dst_list = kmalloc(size, GFP_KERNEL); + dst_list = dma_resv_list_alloc(shared_count); if (!dst_list) return -ENOMEM; @@ -266,7 +337,6 @@ retry: } dst_list->shared_count = 0; - dst_list->shared_max = shared_count; for (i = 0; i < src_list->shared_count; ++i) { struct dma_fence *fence; @@ -276,7 +346,7 @@ retry: continue; if (!dma_fence_get_rcu(fence)) { - kfree(dst_list); + dma_resv_list_free(dst_list); src_list = rcu_dereference(src->fence); goto retry; } @@ -295,8 +365,8 @@ retry: new = dma_fence_get_rcu_safe(&src->fence_excl); rcu_read_unlock(); - src_list = reservation_object_get_list(dst); - old = reservation_object_get_excl(dst); + src_list = dma_resv_get_list(dst); + old = dma_resv_get_excl(dst); preempt_disable(); write_seqcount_begin(&dst->seq); @@ -306,16 +376,15 @@ retry: write_seqcount_end(&dst->seq); preempt_enable(); - if (src_list) - kfree_rcu(src_list, rcu); + dma_resv_list_free(src_list); dma_fence_put(old); return 0; } -EXPORT_SYMBOL(reservation_object_copy_fences); +EXPORT_SYMBOL(dma_resv_copy_fences); /** - * reservation_object_get_fences_rcu - Get an object's shared and exclusive + * dma_resv_get_fences_rcu - Get an object's shared and exclusive * fences without update side lock held * @obj: the reservation object * @pfence_excl: the returned exclusive fence (or NULL) @@ -327,10 +396,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences); * exclusive fence is not specified the fence is put into the array of the * shared fences as well. Returns either zero or -ENOMEM. */ -int reservation_object_get_fences_rcu(struct reservation_object *obj, - struct dma_fence **pfence_excl, - unsigned *pshared_count, - struct dma_fence ***pshared) +int dma_resv_get_fences_rcu(struct dma_resv *obj, + struct dma_fence **pfence_excl, + unsigned *pshared_count, + struct dma_fence ***pshared) { struct dma_fence **shared = NULL; struct dma_fence *fence_excl; @@ -338,7 +407,7 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, int ret = 1; do { - struct reservation_object_list *fobj; + struct dma_resv_list *fobj; unsigned int i, seq; size_t sz = 0; @@ -385,13 +454,6 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj, if (!dma_fence_get_rcu(shared[i])) break; } - - if (!pfence_excl && fence_excl) { - shared[i] = fence_excl; - fence_excl = NULL; - ++i; - ++shared_count; - } } if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) { @@ -406,6 +468,11 @@ unlock: rcu_read_unlock(); } while (ret); + if (pfence_excl) + *pfence_excl = fence_excl; + else if (fence_excl) + shared[++shared_count] = fence_excl; + if (!shared_count) { kfree(shared); shared = NULL; @@ -413,15 +480,12 @@ unlock: *pshared_count = shared_count; *pshared = shared; - if (pfence_excl) - *pfence_excl = fence_excl; - return ret; } -EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); +EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu); /** - * reservation_object_wait_timeout_rcu - Wait on reservation's objects + * dma_resv_wait_timeout_rcu - Wait on reservation's objects * shared and/or exclusive fences. * @obj: the reservation object * @wait_all: if true, wait on all fences, else wait on just exclusive fence @@ -432,9 +496,9 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than zer on success. */ -long reservation_object_wait_timeout_rcu(struct reservation_object *obj, - bool wait_all, bool intr, - unsigned long timeout) +long dma_resv_wait_timeout_rcu(struct dma_resv *obj, + bool wait_all, bool intr, + unsigned long timeout) { struct dma_fence *fence; unsigned seq, shared_count; @@ -462,8 +526,7 @@ retry: } if (wait_all) { - struct reservation_object_list *fobj = - rcu_dereference(obj->fence); + struct dma_resv_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; @@ -506,11 +569,10 @@ unlock_retry: rcu_read_unlock(); goto retry; } -EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu); +EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu); -static inline int -reservation_object_test_signaled_single(struct dma_fence *passed_fence) +static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) { struct dma_fence *fence, *lfence = passed_fence; int ret = 1; @@ -527,7 +589,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence) } /** - * reservation_object_test_signaled_rcu - Test if a reservation object's + * dma_resv_test_signaled_rcu - Test if a reservation object's * fences have been signaled. * @obj: the reservation object * @test_all: if true, test all fences, otherwise only test the exclusive @@ -536,8 +598,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence) * RETURNS * true if all fences signaled, else false */ -bool reservation_object_test_signaled_rcu(struct reservation_object *obj, - bool test_all) +bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all) { unsigned seq, shared_count; int ret; @@ -551,8 +612,7 @@ retry: if (test_all) { unsigned i; - struct reservation_object_list *fobj = - rcu_dereference(obj->fence); + struct dma_resv_list *fobj = rcu_dereference(obj->fence); if (fobj) shared_count = fobj->shared_count; @@ -560,7 +620,7 @@ retry: for (i = 0; i < shared_count; ++i) { struct dma_fence *fence = rcu_dereference(fobj->shared[i]); - ret = reservation_object_test_signaled_single(fence); + ret = dma_resv_test_signaled_single(fence); if (ret < 0) goto retry; else if (!ret) @@ -575,8 +635,7 @@ retry: struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl); if (fence_excl) { - ret = reservation_object_test_signaled_single( - fence_excl); + ret = dma_resv_test_signaled_single(fence_excl); if (ret < 0) goto retry; @@ -588,4 +647,4 @@ retry: rcu_read_unlock(); return ret; } -EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu); +EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu); diff --git a/drivers/dma-buf/selftest.c b/drivers/dma-buf/selftest.c new file mode 100644 index 000000000000..c60b6944b4bd --- /dev/null +++ b/drivers/dma-buf/selftest.c @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: MIT */ + +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/compiler.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> + +#include "selftest.h" + +enum { +#define selftest(n, func) __idx_##n, +#include "selftests.h" +#undef selftest +}; + +#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f }, +static struct selftest { + bool enabled; + const char *name; + int (*func)(void); +} selftests[] = { +#include "selftests.h" +}; +#undef selftest + +/* Embed the line number into the parameter name so that we can order tests */ +#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n)) +#define selftest_0(n, func, id) \ +module_param_named(id, selftests[__idx_##n].enabled, bool, 0400); +#define selftest(n, func) selftest_0(n, func, param(n)) +#include "selftests.h" +#undef selftest + +int __sanitycheck__(void) +{ + pr_debug("Hello World!\n"); + return 0; +} + +static char *__st_filter; + +static bool apply_subtest_filter(const char *caller, const char *name) +{ + char *filter, *sep, *tok; + bool result = true; + + filter = kstrdup(__st_filter, GFP_KERNEL); + for (sep = filter; (tok = strsep(&sep, ","));) { + bool allow = true; + char *sl; + + if (*tok == '!') { + allow = false; + tok++; + } + + if (*tok == '\0') + continue; + + sl = strchr(tok, '/'); + if (sl) { + *sl++ = '\0'; + if (strcmp(tok, caller)) { + if (allow) + result = false; + continue; + } + tok = sl; + } + + if (strcmp(tok, name)) { + if (allow) + result = false; + continue; + } + + result = allow; + break; + } + kfree(filter); + + return result; +} + +int +__subtests(const char *caller, const struct subtest *st, int count, void *data) +{ + int err; + + for (; count--; st++) { + cond_resched(); + if (signal_pending(current)) + return -EINTR; + + if (!apply_subtest_filter(caller, st->name)) + continue; + + pr_info("dma-buf: Running %s/%s\n", caller, st->name); + + err = st->func(data); + if (err && err != -EINTR) { + pr_err("dma-buf/%s: %s failed with error %d\n", + caller, st->name, err); + return err; + } + } + + return 0; +} + +static void set_default_test_all(struct selftest *st, unsigned long count) +{ + unsigned long i; + + for (i = 0; i < count; i++) + if (st[i].enabled) + return; + + for (i = 0; i < count; i++) + st[i].enabled = true; +} + +static int run_selftests(struct selftest *st, unsigned long count) +{ + int err = 0; + + set_default_test_all(st, count); + + /* Tests are listed in natural order in selftests.h */ + for (; count--; st++) { + if (!st->enabled) + continue; + + pr_info("dma-buf: Running %s\n", st->name); + err = st->func(); + if (err) + break; + } + + if (WARN(err > 0 || err == -ENOTTY, + "%s returned %d, conflicting with selftest's magic values!\n", + st->name, err)) + err = -1; + + return err; +} + +static int __init st_init(void) +{ + return run_selftests(selftests, ARRAY_SIZE(selftests)); +} + +static void __exit st_exit(void) +{ +} + +module_param_named(st_filter, __st_filter, charp, 0400); +module_init(st_init); +module_exit(st_exit); + +MODULE_DESCRIPTION("Self-test harness for dma-buf"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/dma-buf/selftest.h b/drivers/dma-buf/selftest.h new file mode 100644 index 000000000000..45793aff6142 --- /dev/null +++ b/drivers/dma-buf/selftest.h @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __SELFTEST_H__ +#define __SELFTEST_H__ + +#include <linux/compiler.h> + +#define selftest(name, func) int func(void); +#include "selftests.h" +#undef selftest + +struct subtest { + int (*func)(void *data); + const char *name; +}; + +int __subtests(const char *caller, + const struct subtest *st, + int count, + void *data); +#define subtests(T, data) \ + __subtests(__func__, T, ARRAY_SIZE(T), data) + +#define SUBTEST(x) { x, #x } + +#endif /* __SELFTEST_H__ */ diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h new file mode 100644 index 000000000000..5320386f02e5 --- /dev/null +++ b/drivers/dma-buf/selftests.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* List each unit test as selftest(name, function) + * + * The name is used as both an enum and expanded as subtest__name to create + * a module parameter. It must be unique and legal for a C identifier. + * + * The function should be of type int function(void). It may be conditionally + * compiled using #if IS_ENABLED(DRM_I915_SELFTEST). + * + * Tests are executed in order by igt/dmabuf_selftest + */ +selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */ +selftest(dma_fence, dma_fence) diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c new file mode 100644 index 000000000000..e593064341c8 --- /dev/null +++ b/drivers/dma-buf/st-dma-fence.c @@ -0,0 +1,574 @@ +/* SPDX-License-Identifier: MIT */ + +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/delay.h> +#include <linux/dma-fence.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "selftest.h" + +static struct kmem_cache *slab_fences; + +static struct mock_fence { + struct dma_fence base; + struct spinlock lock; +} *to_mock_fence(struct dma_fence *f) { + return container_of(f, struct mock_fence, base); +} + +static const char *mock_name(struct dma_fence *f) +{ + return "mock"; +} + +static void mock_fence_release(struct dma_fence *f) +{ + kmem_cache_free(slab_fences, to_mock_fence(f)); +} + +struct wait_cb { + struct dma_fence_cb cb; + struct task_struct *task; +}; + +static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) +{ + wake_up_process(container_of(cb, struct wait_cb, cb)->task); +} + +static long mock_wait(struct dma_fence *f, bool intr, long timeout) +{ + const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; + struct wait_cb cb = { .task = current }; + + if (dma_fence_add_callback(f, &cb.cb, mock_wakeup)) + return timeout; + + while (timeout) { + set_current_state(state); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) + break; + + if (signal_pending_state(state, current)) + break; + + timeout = schedule_timeout(timeout); + } + __set_current_state(TASK_RUNNING); + + if (!dma_fence_remove_callback(f, &cb.cb)) + return timeout; + + if (signal_pending_state(state, current)) + return -ERESTARTSYS; + + return -ETIME; +} + +static const struct dma_fence_ops mock_ops = { + .get_driver_name = mock_name, + .get_timeline_name = mock_name, + .wait = mock_wait, + .release = mock_fence_release, +}; + +static struct dma_fence *mock_fence(void) +{ + struct mock_fence *f; + + f = kmem_cache_alloc(slab_fences, GFP_KERNEL); + if (!f) + return NULL; + + spin_lock_init(&f->lock); + dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0); + + return &f->base; +} + +static int sanitycheck(void *arg) +{ + struct dma_fence *f; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + dma_fence_signal(f); + dma_fence_put(f); + + return 0; +} + +static int test_signaling(void *arg) +{ + struct dma_fence *f; + int err = -EINVAL; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + if (dma_fence_is_signaled(f)) { + pr_err("Fence unexpectedly signaled on creation\n"); + goto err_free; + } + + if (dma_fence_signal(f)) { + pr_err("Fence reported being already signaled\n"); + goto err_free; + } + + if (!dma_fence_is_signaled(f)) { + pr_err("Fence not reporting signaled\n"); + goto err_free; + } + + if (!dma_fence_signal(f)) { + pr_err("Fence reported not being already signaled\n"); + goto err_free; + } + + err = 0; +err_free: + dma_fence_put(f); + return err; +} + +struct simple_cb { + struct dma_fence_cb cb; + bool seen; +}; + +static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb) +{ + smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true); +} + +static int test_add_callback(void *arg) +{ + struct simple_cb cb = {}; + struct dma_fence *f; + int err = -EINVAL; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + if (dma_fence_add_callback(f, &cb.cb, simple_callback)) { + pr_err("Failed to add callback, fence already signaled!\n"); + goto err_free; + } + + dma_fence_signal(f); + if (!cb.seen) { + pr_err("Callback failed!\n"); + goto err_free; + } + + err = 0; +err_free: + dma_fence_put(f); + return err; +} + +static int test_late_add_callback(void *arg) +{ + struct simple_cb cb = {}; + struct dma_fence *f; + int err = -EINVAL; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + dma_fence_signal(f); + + if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) { + pr_err("Added callback, but fence was already signaled!\n"); + goto err_free; + } + + dma_fence_signal(f); + if (cb.seen) { + pr_err("Callback called after failed attachment !\n"); + goto err_free; + } + + err = 0; +err_free: + dma_fence_put(f); + return err; +} + +static int test_rm_callback(void *arg) +{ + struct simple_cb cb = {}; + struct dma_fence *f; + int err = -EINVAL; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + if (dma_fence_add_callback(f, &cb.cb, simple_callback)) { + pr_err("Failed to add callback, fence already signaled!\n"); + goto err_free; + } + + if (!dma_fence_remove_callback(f, &cb.cb)) { + pr_err("Failed to remove callback!\n"); + goto err_free; + } + + dma_fence_signal(f); + if (cb.seen) { + pr_err("Callback still signaled after removal!\n"); + goto err_free; + } + + err = 0; +err_free: + dma_fence_put(f); + return err; +} + +static int test_late_rm_callback(void *arg) +{ + struct simple_cb cb = {}; + struct dma_fence *f; + int err = -EINVAL; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + if (dma_fence_add_callback(f, &cb.cb, simple_callback)) { + pr_err("Failed to add callback, fence already signaled!\n"); + goto err_free; + } + + dma_fence_signal(f); + if (!cb.seen) { + pr_err("Callback failed!\n"); + goto err_free; + } + + if (dma_fence_remove_callback(f, &cb.cb)) { + pr_err("Callback removal succeed after being executed!\n"); + goto err_free; + } + + err = 0; +err_free: + dma_fence_put(f); + return err; +} + +static int test_status(void *arg) +{ + struct dma_fence *f; + int err = -EINVAL; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + if (dma_fence_get_status(f)) { + pr_err("Fence unexpectedly has signaled status on creation\n"); + goto err_free; + } + + dma_fence_signal(f); + if (!dma_fence_get_status(f)) { + pr_err("Fence not reporting signaled status\n"); + goto err_free; + } + + err = 0; +err_free: + dma_fence_put(f); + return err; +} + +static int test_error(void *arg) +{ + struct dma_fence *f; + int err = -EINVAL; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + dma_fence_set_error(f, -EIO); + + if (dma_fence_get_status(f)) { + pr_err("Fence unexpectedly has error status before signal\n"); + goto err_free; + } + + dma_fence_signal(f); + if (dma_fence_get_status(f) != -EIO) { + pr_err("Fence not reporting error status, got %d\n", + dma_fence_get_status(f)); + goto err_free; + } + + err = 0; +err_free: + dma_fence_put(f); + return err; +} + +static int test_wait(void *arg) +{ + struct dma_fence *f; + int err = -EINVAL; + + f = mock_fence(); + if (!f) + return -ENOMEM; + + if (dma_fence_wait_timeout(f, false, 0) != -ETIME) { + pr_err("Wait reported complete before being signaled\n"); + goto err_free; + } + + dma_fence_signal(f); + + if (dma_fence_wait_timeout(f, false, 0) != 0) { + pr_err("Wait reported incomplete after being signaled\n"); + goto err_free; + } + + err = 0; +err_free: + dma_fence_signal(f); + dma_fence_put(f); + return err; +} + +struct wait_timer { + struct timer_list timer; + struct dma_fence *f; +}; + +static void wait_timer(struct timer_list *timer) +{ + struct wait_timer *wt = from_timer(wt, timer, timer); + + dma_fence_signal(wt->f); +} + +static int test_wait_timeout(void *arg) +{ + struct wait_timer wt; + int err = -EINVAL; + + timer_setup_on_stack(&wt.timer, wait_timer, 0); + + wt.f = mock_fence(); + if (!wt.f) + return -ENOMEM; + + if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) { + pr_err("Wait reported complete before being signaled\n"); + goto err_free; + } + + mod_timer(&wt.timer, jiffies + 1); + + if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) { + if (timer_pending(&wt.timer)) { + pr_notice("Timer did not fire within the jiffie!\n"); + err = 0; /* not our fault! */ + } else { + pr_err("Wait reported incomplete after timeout\n"); + } + goto err_free; + } + + err = 0; +err_free: + del_timer_sync(&wt.timer); + destroy_timer_on_stack(&wt.timer); + dma_fence_signal(wt.f); + dma_fence_put(wt.f); + return err; +} + +static int test_stub(void *arg) +{ + struct dma_fence *f[64]; + int err = -EINVAL; + int i; + + for (i = 0; i < ARRAY_SIZE(f); i++) { + f[i] = dma_fence_get_stub(); + if (!dma_fence_is_signaled(f[i])) { + pr_err("Obtained unsignaled stub fence!\n"); + goto err; + } + } + + err = 0; +err: + while (i--) + dma_fence_put(f[i]); + return err; +} + +/* Now off to the races! */ + +struct race_thread { + struct dma_fence __rcu **fences; + struct task_struct *task; + bool before; + int id; +}; + +static void __wait_for_callbacks(struct dma_fence *f) +{ + spin_lock_irq(f->lock); + spin_unlock_irq(f->lock); +} + +static int thread_signal_callback(void *arg) +{ + const struct race_thread *t = arg; + unsigned long pass = 0; + unsigned long miss = 0; + int err = 0; + + while (!err && !kthread_should_stop()) { + struct dma_fence *f1, *f2; + struct simple_cb cb; + + f1 = mock_fence(); + if (!f1) { + err = -ENOMEM; + break; + } + + rcu_assign_pointer(t->fences[t->id], f1); + smp_wmb(); + + rcu_read_lock(); + do { + f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); + } while (!f2 && !kthread_should_stop()); + rcu_read_unlock(); + + if (t->before) + dma_fence_signal(f1); + + smp_store_mb(cb.seen, false); + if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback)) + miss++, cb.seen = true; + + if (!t->before) + dma_fence_signal(f1); + + if (!cb.seen) { + dma_fence_wait(f2, false); + __wait_for_callbacks(f2); + } + + if (!READ_ONCE(cb.seen)) { + pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n", + t->id, pass, miss, + t->before ? "before" : "after", + dma_fence_is_signaled(f2) ? "yes" : "no"); + err = -EINVAL; + } + + dma_fence_put(f2); + + rcu_assign_pointer(t->fences[t->id], NULL); + smp_wmb(); + + dma_fence_put(f1); + + pass++; + } + + pr_info("%s[%d] completed %lu passes, %lu misses\n", + __func__, t->id, pass, miss); + return err; +} + +static int race_signal_callback(void *arg) +{ + struct dma_fence __rcu *f[2] = {}; + int ret = 0; + int pass; + + for (pass = 0; !ret && pass <= 1; pass++) { + struct race_thread t[2]; + int i; + + for (i = 0; i < ARRAY_SIZE(t); i++) { + t[i].fences = f; + t[i].id = i; + t[i].before = pass; + t[i].task = kthread_run(thread_signal_callback, &t[i], + "dma-fence:%d", i); + get_task_struct(t[i].task); + } + + msleep(50); + + for (i = 0; i < ARRAY_SIZE(t); i++) { + int err; + + err = kthread_stop(t[i].task); + if (err && !ret) + ret = err; + + put_task_struct(t[i].task); + } + } + + return ret; +} + +int dma_fence(void) +{ + static const struct subtest tests[] = { + SUBTEST(sanitycheck), + SUBTEST(test_signaling), + SUBTEST(test_add_callback), + SUBTEST(test_late_add_callback), + SUBTEST(test_rm_callback), + SUBTEST(test_late_rm_callback), + SUBTEST(test_status), + SUBTEST(test_error), + SUBTEST(test_wait), + SUBTEST(test_wait_timeout), + SUBTEST(test_stub), + SUBTEST(race_signal_callback), + }; + int ret; + + pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence)); + + slab_fences = KMEM_CACHE(mock_fence, + SLAB_TYPESAFE_BY_RCU | + SLAB_HWCACHE_ALIGN); + if (!slab_fences) + return -ENOMEM; + + ret = subtests(tests, NULL); + + kmem_cache_destroy(slab_fences); + + return ret; +} diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 051f6c2873c7..6713cfb1995c 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence) { struct sync_pt *pt = dma_fence_to_sync_pt(fence); struct sync_timeline *parent = dma_fence_parent(fence); + unsigned long flags; + spin_lock_irqsave(fence->lock, flags); if (!list_empty(&pt->link)) { - unsigned long flags; - - spin_lock_irqsave(fence->lock, flags); - if (!list_empty(&pt->link)) { - list_del(&pt->link); - rb_erase(&pt->node, &parent->pt_tree); - } - spin_unlock_irqrestore(fence->lock, flags); + list_del(&pt->link); + rb_erase(&pt->node, &parent->pt_tree); } + spin_unlock_irqrestore(fence->lock, flags); sync_timeline_put(parent); dma_fence_free(fence); @@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj, p = &parent->rb_left; } else { if (dma_fence_get_rcu(&other->base)) { - dma_fence_put(&pt->base); + sync_timeline_put(obj); + kfree(pt); pt = other; goto unlock; } diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index ee4d1a96d779..25c5c071645b 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -419,7 +419,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, * info->num_fences. */ if (!info.num_fences) { - info.status = dma_fence_is_signaled(sync_file->fence); + info.status = dma_fence_get_status(sync_file->fence); goto no_fences; } else { info.status = 1; |