summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-26 14:27:14 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-07-02 14:32:44 -0300
commit24917f6b1041a73993178920656e13364f847995 (patch)
tree3257cca350460fae4d73b136b5de7c1c91d4915a /kernel
parent514caf23a70fd697fa2ece238b2cd8dcc73fb16f (diff)
memremap: provide an optional internal refcount in struct dev_pagemap
Provide an internal refcounting logic if no ->ref field is provided in the pagemap passed into devm_memremap_pages so that callers don't have to reinvent it poorly. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/memremap.c64
1 files changed, 51 insertions, 13 deletions
diff --git a/kernel/memremap.c b/kernel/memremap.c
index eee490e7d7e1..bea6f887adad 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -29,7 +29,7 @@ static void devmap_managed_enable_put(void *data)
static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
{
- if (!pgmap->ops->page_free) {
+ if (!pgmap->ops || !pgmap->ops->page_free) {
WARN(1, "Missing page_free method\n");
return -EINVAL;
}
@@ -75,6 +75,24 @@ static unsigned long pfn_next(unsigned long pfn)
#define for_each_device_pfn(pfn, map) \
for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
+static void dev_pagemap_kill(struct dev_pagemap *pgmap)
+{
+ if (pgmap->ops && pgmap->ops->kill)
+ pgmap->ops->kill(pgmap);
+ else
+ percpu_ref_kill(pgmap->ref);
+}
+
+static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
+{
+ if (pgmap->ops && pgmap->ops->cleanup) {
+ pgmap->ops->cleanup(pgmap);
+ } else {
+ wait_for_completion(&pgmap->done);
+ percpu_ref_exit(pgmap->ref);
+ }
+}
+
static void devm_memremap_pages_release(void *data)
{
struct dev_pagemap *pgmap = data;
@@ -84,10 +102,10 @@ static void devm_memremap_pages_release(void *data)
unsigned long pfn;
int nid;
- pgmap->ops->kill(pgmap);
+ dev_pagemap_kill(pgmap);
for_each_device_pfn(pfn, pgmap)
put_page(pfn_to_page(pfn));
- pgmap->ops->cleanup(pgmap);
+ dev_pagemap_cleanup(pgmap);
/* pages are dead and unused, undo the arch mapping */
align_start = res->start & ~(SECTION_SIZE - 1);
@@ -114,20 +132,29 @@ static void devm_memremap_pages_release(void *data)
"%s: failed to free all reserved pages\n", __func__);
}
+static void dev_pagemap_percpu_release(struct percpu_ref *ref)
+{
+ struct dev_pagemap *pgmap =
+ container_of(ref, struct dev_pagemap, internal_ref);
+
+ complete(&pgmap->done);
+}
+
/**
* devm_memremap_pages - remap and provide memmap backing for the given resource
* @dev: hosting device for @res
* @pgmap: pointer to a struct dev_pagemap
*
* Notes:
- * 1/ At a minimum the res, ref and type and ops members of @pgmap must be
- * initialized by the caller before passing it to this function
+ * 1/ At a minimum the res and type members of @pgmap must be initialized
+ * by the caller before passing it to this function
*
* 2/ The altmap field may optionally be initialized, in which case
* PGMAP_ALTMAP_VALID must be set in pgmap->flags.
*
- * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped
- * at devm_memremap_pages_release() time, or if this routine fails.
+ * 3/ The ref field may optionally be provided, in which pgmap->ref must be
+ * 'live' on entry and will be killed and reaped at
+ * devm_memremap_pages_release() time, or if this routine fails.
*
* 4/ res is expected to be a host memory range that could feasibly be
* treated as a "System RAM" range, i.e. not a device mmio range, but
@@ -175,10 +202,21 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
break;
}
- if (!pgmap->ref || !pgmap->ops || !pgmap->ops->kill ||
- !pgmap->ops->cleanup) {
- WARN(1, "Missing reference count teardown definition\n");
- return ERR_PTR(-EINVAL);
+ if (!pgmap->ref) {
+ if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
+ return ERR_PTR(-EINVAL);
+
+ init_completion(&pgmap->done);
+ error = percpu_ref_init(&pgmap->internal_ref,
+ dev_pagemap_percpu_release, 0, GFP_KERNEL);
+ if (error)
+ return ERR_PTR(error);
+ pgmap->ref = &pgmap->internal_ref;
+ } else {
+ if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
+ WARN(1, "Missing reference count teardown definition\n");
+ return ERR_PTR(-EINVAL);
+ }
}
if (need_devmap_managed) {
@@ -296,8 +334,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
err_pfn_remap:
pgmap_array_delete(res);
err_array:
- pgmap->ops->kill(pgmap);
- pgmap->ops->cleanup(pgmap);
+ dev_pagemap_kill(pgmap);
+ dev_pagemap_cleanup(pgmap);
return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(devm_memremap_pages);