summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-11-25 15:52:51 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-11-25 15:52:51 +0000
commit5edf71ae129167ac276ebac18b25ccc7bec6ac3e (patch)
tree619daac39d08dbf66fb19698205b263e21bba3d0 /arch/arm/mm
parent3c0bdac3875505516eda1c6b6e68dd84eff3b231 (diff)
[ARM] Do not call flush_tlb_kernel_range() with IRQs disabled.
We must not call TLB maintainence operations with interrupts disabled, otherwise we risk a lockup in the SMP IPI code. This means that consistent_free() can not be called from a context with IRQs disabled. In addition, we must not hold the lock in consistent_free when we call flush_tlb_kernel_range(). However, we must continue to prevent consistent_alloc() from re-using the memory region until we've finished tearing down the mapping and dealing with the TLB. Therefore, leave the vm_region entry in the list, but mark it inactive before dropping the lock and starting the tear-down process. After the mapping has been torn down, re-acquire the lock and remove the entry from the list. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/consistent.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 47b0b767f080..dbfe9e891f01 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -66,6 +66,7 @@ struct vm_region {
unsigned long vm_start;
unsigned long vm_end;
struct page *vm_pages;
+ int vm_active;
};
static struct vm_region consistent_head = {
@@ -104,6 +105,7 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
list_add_tail(&new->vm_list, &c->vm_list);
new->vm_start = addr;
new->vm_end = addr + size;
+ new->vm_active = 1;
spin_unlock_irqrestore(&consistent_lock, flags);
return new;
@@ -120,7 +122,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
struct vm_region *c;
list_for_each_entry(c, &head->vm_list, vm_list) {
- if (c->vm_start == addr)
+ if (c->vm_active && c->vm_start == addr)
goto out;
}
c = NULL;
@@ -319,6 +321,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
/*
* free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
*/
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
@@ -326,14 +329,18 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
unsigned long flags, addr;
pte_t *ptep;
+ WARN_ON(irqs_disabled());
+
size = PAGE_ALIGN(size);
spin_lock_irqsave(&consistent_lock, flags);
-
c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
if (!c)
goto no_area;
+ c->vm_active = 0;
+ spin_unlock_irqrestore(&consistent_lock, flags);
+
if ((c->vm_end - c->vm_start) != size) {
printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
@@ -372,8 +379,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
flush_tlb_kernel_range(c->vm_start, c->vm_end);
+ spin_lock_irqsave(&consistent_lock, flags);
list_del(&c->vm_list);
-
spin_unlock_irqrestore(&consistent_lock, flags);
kfree(c);