summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 11:51:51 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 11:51:51 -0700
commitff806d034ef8e9a95ff0b0532104dd65332e446b (patch)
tree6170ccd68e32ac47666879c35e9a2af922bbf446 /drivers/base
parent312c76f1a3989b8d0c0c13fee765bb2c41f2d114 (diff)
parentf70e3c4f8b6ab61f713e040822ec51f5de498146 (diff)
Merge branch 'for-v3.16' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping into next
Pull CMA and DMA-mapping fixes from Marek Szyprowski: "A few fixes for dma-mapping and CMA subsystems" * 'for-v3.16' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: CMA: correct unlock target drivers/base/dma-contiguous.c: erratum of dev_get_cma_area arm: dma-mapping: add checking cma area initialized arm: dma-iommu: Clean up redundant variable cma: Remove potential deadlock situation
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/dma-contiguous.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 165c2c299e57..c34ec3364243 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -37,6 +37,7 @@ struct cma {
unsigned long base_pfn;
unsigned long count;
unsigned long *bitmap;
+ struct mutex lock;
};
struct cma *dma_contiguous_default_area;
@@ -161,6 +162,7 @@ static int __init cma_activate_area(struct cma *cma)
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i);
+ mutex_init(&cma->lock);
return 0;
}
@@ -261,6 +263,13 @@ err:
return ret;
}
+static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
+{
+ mutex_lock(&cma->lock);
+ bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+ mutex_unlock(&cma->lock);
+}
+
/**
* dma_alloc_from_contiguous() - allocate pages from contiguous area
* @dev: Pointer to device for which the allocation is performed.
@@ -269,7 +278,7 @@ err:
*
* This function allocates memory buffer for specified device. It uses
* device specific contiguous memory area if available or the default
- * global one. Requires architecture specific get_dev_cma_area() helper
+ * global one. Requires architecture specific dev_get_cma_area() helper
* function.
*/
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
@@ -294,30 +303,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
mask = (1 << align) - 1;
- mutex_lock(&cma_mutex);
for (;;) {
+ mutex_lock(&cma->lock);
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
start, count, mask);
- if (pageno >= cma->count)
+ if (pageno >= cma->count) {
+ mutex_unlock(&cma->lock);
break;
+ }
+ bitmap_set(cma->bitmap, pageno, count);
+ /*
+ * It's safe to drop the lock here. We've marked this region for
+ * our exclusive use. If the migration fails we will take the
+ * lock again and unmark it.
+ */
+ mutex_unlock(&cma->lock);
pfn = cma->base_pfn + pageno;
+ mutex_lock(&cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ mutex_unlock(&cma_mutex);
if (ret == 0) {
- bitmap_set(cma->bitmap, pageno, count);
page = pfn_to_page(pfn);
break;
} else if (ret != -EBUSY) {
+ clear_cma_bitmap(cma, pfn, count);
break;
}
+ clear_cma_bitmap(cma, pfn, count);
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
/* try again with a bit different memory target */
start = pageno + mask + 1;
}
- mutex_unlock(&cma_mutex);
pr_debug("%s(): returned %p\n", __func__, page);
return page;
}
@@ -350,10 +370,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
- mutex_lock(&cma_mutex);
- bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
free_contig_range(pfn, count);
- mutex_unlock(&cma_mutex);
+ clear_cma_bitmap(cma, pfn, count);
return true;
}