aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/dma-buf.c44
-rw-r--r--drivers/dma-buf/dma-fence-array.c28
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c126
-rw-r--r--drivers/dma-buf/udmabuf.c43
4 files changed, 137 insertions, 104 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5ad0e9e2e1b9..5baa83b85515 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -60,7 +60,7 @@ static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
{
}
-static void __dma_buf_debugfs_list_del(struct file *file)
+static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
{
}
#endif
@@ -703,7 +703,7 @@ err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
/**
* dma_buf_fd - returns a file descriptor for the given struct dma_buf
@@ -727,7 +727,7 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags)
return fd;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
/**
* dma_buf_get - returns the struct dma_buf related to an fd
@@ -753,7 +753,7 @@ struct dma_buf *dma_buf_get(int fd)
return file->private_data;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");
/**
* dma_buf_put - decreases refcount of the buffer
@@ -772,7 +772,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
fput(dmabuf->file);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
static void mangle_sg_table(struct sg_table *sg_table)
{
@@ -978,7 +978,7 @@ err_unlock:
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
/**
* dma_buf_attach - Wrapper for dma_buf_dynamic_attach
@@ -993,7 +993,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
{
return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
static void __unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
@@ -1037,7 +1037,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
kfree(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");
/**
* dma_buf_pin - Lock down the DMA-buf
@@ -1067,7 +1067,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
/**
* dma_buf_unpin - Unpin a DMA-buf
@@ -1088,7 +1088,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
if (dmabuf->ops->unpin)
dmabuf->ops->unpin(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
@@ -1176,7 +1176,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
/**
* dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
@@ -1204,7 +1204,7 @@ dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
return sg_table;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
@@ -1236,7 +1236,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
dma_buf_unpin(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
/**
* dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
@@ -1261,7 +1261,7 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
dma_buf_unmap_attachment(attach, sg_table, direction);
dma_resv_unlock(attach->dmabuf->resv);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
@@ -1281,7 +1281,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf)
if (attach->importer_ops)
attach->importer_ops->move_notify(attach);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
/**
* DOC: cpu access
@@ -1429,7 +1429,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
@@ -1457,7 +1457,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
/**
@@ -1499,7 +1499,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return dmabuf->ops->mmap(dmabuf, vma);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");
/**
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
@@ -1552,7 +1552,7 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
return 0;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");
/**
* dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
@@ -1579,7 +1579,7 @@ int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
return ret;
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
@@ -1603,7 +1603,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
iosys_map_clear(&dmabuf->vmap_ptr);
}
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");
/**
* dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
@@ -1619,7 +1619,7 @@ void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
dma_buf_vunmap(dmabuf, map);
dma_resv_unlock(dmabuf->resv);
}
-EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
index 8a08ffde31e7..6657d4b30af9 100644
--- a/drivers/dma-buf/dma-fence-array.c
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
+ int num_pending;
+ unsigned int i;
- if (atomic_read(&array->num_pending) > 0)
+ /*
+ * We need to read num_pending before checking the enable_signal bit
+ * to avoid racing with the enable_signaling() implementation, which
+ * might decrement the counter, and cause a partial check.
+ * atomic_read_acquire() pairs with atomic_dec_and_test() in
+ * dma_fence_array_enable_signaling()
+ *
+ * The !--num_pending check is here to account for the any_signaled case
+ * if we race with enable_signaling(), that means the !num_pending check
+ * in the is_signalling_enabled branch might be outdated (num_pending
+ * might have been decremented), but that's fine. The user will get the
+ * right value when testing again later.
+ */
+ num_pending = atomic_read_acquire(&array->num_pending);
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
+ if (num_pending <= 0)
+ goto signal;
return false;
+ }
+
+ for (i = 0; i < array->num_fences; ++i) {
+ if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
+ goto signal;
+ }
+ return false;
+signal:
dma_fence_array_clear_pending_error(array);
return true;
}
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 628af51c81af..6345062731f1 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -12,6 +12,7 @@
#include <linux/dma-fence-chain.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/slab.h>
+#include <linux/sort.h>
/* Internal helper to start new array iteration, don't use directly */
static struct dma_fence *
@@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
}
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
+
+static int fence_cmp(const void *_a, const void *_b)
+{
+ struct dma_fence *a = *(struct dma_fence **)_a;
+ struct dma_fence *b = *(struct dma_fence **)_b;
+
+ if (a->context < b->context)
+ return -1;
+ else if (a->context > b->context)
+ return 1;
+
+ if (dma_fence_is_later(b, a))
+ return 1;
+ else if (dma_fence_is_later(a, b))
+ return -1;
+
+ return 0;
+}
+
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
@@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence_array *result;
struct dma_fence *tmp, **array;
ktime_t timestamp;
- unsigned int i;
- size_t count;
+ int i, j, count;
count = 0;
timestamp = ns_to_ktime(0);
@@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
if (!array)
return NULL;
- /*
- * This trashes the input fence array and uses it as position for the
- * following merge loop. This works because the dma_fence_merge()
- * wrapper macro is creating this temporary array on the stack together
- * with the iterators.
- */
- for (i = 0; i < num_fences; ++i)
- fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
-
count = 0;
- do {
- unsigned int sel;
-
-restart:
- tmp = NULL;
- for (i = 0; i < num_fences; ++i) {
- struct dma_fence *next;
-
- while (fences[i] && dma_fence_is_signaled(fences[i]))
- fences[i] = dma_fence_unwrap_next(&iter[i]);
-
- next = fences[i];
- if (!next)
- continue;
-
- /*
- * We can't guarantee that inpute fences are ordered by
- * context, but it is still quite likely when this
- * function is used multiple times. So attempt to order
- * the fences by context as we pass over them and merge
- * fences with the same context.
- */
- if (!tmp || tmp->context > next->context) {
- tmp = next;
- sel = i;
-
- } else if (tmp->context < next->context) {
- continue;
-
- } else if (dma_fence_is_later(tmp, next)) {
- fences[i] = dma_fence_unwrap_next(&iter[i]);
- goto restart;
+ for (i = 0; i < num_fences; ++i) {
+ dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+ if (!dma_fence_is_signaled(tmp)) {
+ array[count++] = dma_fence_get(tmp);
} else {
- fences[sel] = dma_fence_unwrap_next(&iter[sel]);
- goto restart;
+ ktime_t t = dma_fence_timestamp(tmp);
+
+ if (ktime_after(t, timestamp))
+ timestamp = t;
}
}
+ }
- if (tmp) {
- array[count++] = dma_fence_get(tmp);
- fences[sel] = dma_fence_unwrap_next(&iter[sel]);
- }
- } while (tmp);
+ if (count == 0 || count == 1)
+ goto return_fastpath;
- if (count == 0) {
- tmp = dma_fence_allocate_private_stub(ktime_get());
- goto return_tmp;
- }
+ sort(array, count, sizeof(*array), fence_cmp, NULL);
- if (count == 1) {
- tmp = array[0];
- goto return_tmp;
+ /*
+ * Only keep the most recent fence for each context.
+ */
+ j = 0;
+ for (i = 1; i < count; i++) {
+ if (array[i]->context == array[j]->context)
+ dma_fence_put(array[i]);
+ else
+ array[++j] = array[i];
}
-
- result = dma_fence_array_create(count, array,
- dma_fence_context_alloc(1),
- 1, false);
- if (!result) {
- tmp = NULL;
- goto return_tmp;
+ count = ++j;
+
+ if (count > 1) {
+ result = dma_fence_array_create(count, array,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!result) {
+ for (i = 0; i < count; i++)
+ dma_fence_put(array[i]);
+ tmp = NULL;
+ goto return_tmp;
+ }
+ return &result->base;
}
- return &result->base;
+
+return_fastpath:
+ if (count == 0)
+ tmp = dma_fence_allocate_private_stub(timestamp);
+ else
+ tmp = array[0];
return_tmp:
kfree(array);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 8ce1f074c2d3..cc7398cc17d6 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -297,7 +297,7 @@ static const struct dma_buf_ops udmabuf_ops = {
};
#define SEALS_WANTED (F_SEAL_SHRINK)
-#define SEALS_DENIED (F_SEAL_WRITE)
+#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
static int check_memfd_seals(struct file *memfd)
{
@@ -317,12 +317,10 @@ static int check_memfd_seals(struct file *memfd)
return 0;
}
-static int export_udmabuf(struct udmabuf *ubuf,
- struct miscdevice *device,
- u32 flags)
+static struct dma_buf *export_udmabuf(struct udmabuf *ubuf,
+ struct miscdevice *device)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *buf;
ubuf->device = device;
exp_info.ops = &udmabuf_ops;
@@ -330,11 +328,7 @@ static int export_udmabuf(struct udmabuf *ubuf,
exp_info.priv = ubuf;
exp_info.flags = O_RDWR;
- buf = dma_buf_export(&exp_info);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- return dma_buf_fd(buf, flags);
+ return dma_buf_export(&exp_info);
}
static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
@@ -391,6 +385,7 @@ static long udmabuf_create(struct miscdevice *device,
struct folio **folios = NULL;
pgoff_t pgcnt = 0, pglimit;
struct udmabuf *ubuf;
+ struct dma_buf *dmabuf;
long ret = -EINVAL;
u32 i, flags;
@@ -436,23 +431,39 @@ static long udmabuf_create(struct miscdevice *device,
goto err;
}
+ /*
+ * Take the inode lock to protect against concurrent
+ * memfd_add_seals(), which takes this lock in write mode.
+ */
+ inode_lock_shared(file_inode(memfd));
ret = check_memfd_seals(memfd);
- if (ret < 0) {
- fput(memfd);
- goto err;
- }
+ if (ret)
+ goto out_unlock;
ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset,
list[i].size, folios);
+out_unlock:
+ inode_unlock_shared(file_inode(memfd));
fput(memfd);
if (ret)
goto err;
}
flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0;
- ret = export_udmabuf(ubuf, device, flags);
- if (ret < 0)
+ dmabuf = export_udmabuf(ubuf, device);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
goto err;
+ }
+ /*
+ * Ownership of ubuf is held by the dmabuf from here.
+ * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the
+ * dmabuf and the ubuf (through udmabuf_ops.release).
+ */
+
+ ret = dma_buf_fd(dmabuf, flags);
+ if (ret < 0)
+ dma_buf_put(dmabuf);
kvfree(folios);
return ret;