diff options
Diffstat (limited to 'sound/core')
-rw-r--r-- | sound/core/memalloc.c | 391 | ||||
-rw-r--r-- | sound/core/memalloc_local.h | 18 | ||||
-rw-r--r-- | sound/core/pcm_local.h | 5 | ||||
-rw-r--r-- | sound/core/pcm_memory.c | 21 | ||||
-rw-r--r-- | sound/core/pcm_native.c | 20 | ||||
-rw-r--r-- | sound/core/sgbuf.c | 90 |
6 files changed, 322 insertions, 223 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 966bef5acc75..ad68bcdf82cf 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -15,99 +15,27 @@ #include <asm/set_memory.h> #endif #include <sound/memalloc.h> +#include "memalloc_local.h" -/* - * - * Bus-specific memory allocators - * - */ +static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab); -#ifdef CONFIG_HAS_DMA -/* allocate the coherent DMA pages */ -static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size) -{ - gfp_t gfp_flags; - - gfp_flags = GFP_KERNEL - | __GFP_COMP /* compound page lets parts be mapped */ - | __GFP_NORETRY /* don't trigger OOM-killer */ - | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ - dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, - gfp_flags); -#ifdef CONFIG_X86 - if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) - set_memory_wc((unsigned long)dmab->area, - PAGE_ALIGN(size) >> PAGE_SHIFT); -#endif -} - -/* free the coherent DMA pages */ -static void snd_free_dev_pages(struct snd_dma_buffer *dmab) +/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */ +static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab, + gfp_t default_gfp) { -#ifdef CONFIG_X86 - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) - set_memory_wb((unsigned long)dmab->area, - PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); -#endif - dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); + if (!dmab->dev.dev) + return default_gfp; + else + return (__force gfp_t)(unsigned long)dmab->dev.dev; } -#ifdef CONFIG_GENERIC_ALLOCATOR -/** - * snd_malloc_dev_iram - allocate memory from on-chip internal ram - * @dmab: buffer allocation record to store the allocated data - * @size: number of bytes to allocate from the iram - * - * This function requires iram phandle provided via of_node - */ -static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size) +static int __snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size) { - struct device *dev = dmab->dev.dev; - struct gen_pool *pool = NULL; - - dmab->area = NULL; - dmab->addr = 0; - - if (dev->of_node) - pool = of_gen_pool_get(dev->of_node, "iram", 0); - - if (!pool) - return; - - /* Assign the pool into private_data field */ - dmab->private_data = pool; + const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); - dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, - PAGE_SIZE); -} - -/** - * snd_free_dev_iram - free allocated specific memory from on-chip internal ram - * @dmab: buffer allocation record to store the allocated data - */ -static void snd_free_dev_iram(struct snd_dma_buffer *dmab) -{ - struct gen_pool *pool = dmab->private_data; - - if (pool && dmab->area) - gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); -} -#endif /* CONFIG_GENERIC_ALLOCATOR */ -#endif /* CONFIG_HAS_DMA */ - -/* - * - * ALSA generic memory management - * - */ - -static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev, - gfp_t default_gfp) -{ - if (!dev) - return default_gfp; - else - return (__force gfp_t)(unsigned long)dev; + if (WARN_ON_ONCE(!ops || !ops->alloc)) + return -EINVAL; + return ops->alloc(dmab, size); } /** @@ -126,7 +54,7 @@ static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev, int snd_dma_alloc_pages(int type, struct device *device, size_t size, struct snd_dma_buffer *dmab) { - gfp_t gfp; + int err; if (WARN_ON(!size)) return -ENXIO; @@ -140,43 +68,10 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size, dmab->area = NULL; dmab->addr = 0; dmab->private_data = NULL; - switch (type) { - case SNDRV_DMA_TYPE_CONTINUOUS: - gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL); - dmab->area = alloc_pages_exact(size, gfp); - break; - case SNDRV_DMA_TYPE_VMALLOC: - gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM); - dmab->area = __vmalloc(size, gfp); - break; -#ifdef CONFIG_HAS_DMA -#ifdef CONFIG_GENERIC_ALLOCATOR - case SNDRV_DMA_TYPE_DEV_IRAM: - snd_malloc_dev_iram(dmab, size); - if (dmab->area) - break; - /* Internal memory might have limited size and no enough space, - * so if we fail to malloc, try to fetch memory traditionally. - */ - dmab->dev.type = SNDRV_DMA_TYPE_DEV; - fallthrough; -#endif /* CONFIG_GENERIC_ALLOCATOR */ - case SNDRV_DMA_TYPE_DEV: - case SNDRV_DMA_TYPE_DEV_UC: - snd_malloc_dev_pages(dmab, size); - break; -#endif -#ifdef CONFIG_SND_DMA_SGBUF - case SNDRV_DMA_TYPE_DEV_SG: - case SNDRV_DMA_TYPE_DEV_UC_SG: - snd_malloc_sgbuf_pages(device, size, dmab, NULL); - break; -#endif - default: - pr_err("snd-malloc: invalid device type %d\n", type); - return -ENXIO; - } - if (! dmab->area) + err = __snd_dma_alloc_pages(dmab, size); + if (err < 0) + return err; + if (!dmab->area) return -ENOMEM; dmab->bytes = size; return 0; @@ -217,7 +112,6 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, } EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); - /** * snd_dma_free_pages - release the allocated buffer * @dmab: the buffer allocation record to release @@ -226,32 +120,235 @@ EXPORT_SYMBOL(snd_dma_alloc_pages_fallback); */ void snd_dma_free_pages(struct snd_dma_buffer *dmab) { - switch (dmab->dev.type) { - case SNDRV_DMA_TYPE_CONTINUOUS: - free_pages_exact(dmab->area, dmab->bytes); - break; - case SNDRV_DMA_TYPE_VMALLOC: - vfree(dmab->area); - break; + const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); + + if (ops && ops->free) + ops->free(dmab); +} +EXPORT_SYMBOL(snd_dma_free_pages); + +/** + * snd_sgbuf_get_addr - return the physical address at the corresponding offset + * @dmab: buffer allocation information + * @offset: offset in the ring buffer + */ +dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset) +{ + const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); + + if (ops && ops->get_addr) + return ops->get_addr(dmab, offset); + else + return dmab->addr + offset; +} +EXPORT_SYMBOL(snd_sgbuf_get_addr); + +/** + * snd_sgbuf_get_page - return the physical page at the corresponding offset + * @dmab: buffer allocation information + * @offset: offset in the ring buffer + */ +struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset) +{ + const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); + + if (ops && ops->get_page) + return ops->get_page(dmab, offset); + else + return virt_to_page(dmab->area + offset); +} +EXPORT_SYMBOL(snd_sgbuf_get_page); + +/** + * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages + * on sg-buffer + * @dmab: buffer allocation information + * @ofs: offset in the ring buffer + * @size: the requested size + */ +unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, + unsigned int ofs, unsigned int size) +{ + const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab); + + if (ops && ops->get_chunk_size) + return ops->get_chunk_size(dmab, ofs, size); + else + return size; +} +EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); + +/* + * Continuous pages allocator + */ +static int snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size) +{ + gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL); + + dmab->area = alloc_pages_exact(size, gfp); + return 0; +} + +static void snd_dma_continuous_free(struct snd_dma_buffer *dmab) +{ + free_pages_exact(dmab->area, dmab->bytes); +} + +static const struct snd_malloc_ops snd_dma_continuous_ops = { + .alloc = snd_dma_continuous_alloc, + .free = snd_dma_continuous_free, +}; + +/* + * VMALLOC allocator + */ +static int snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size) +{ + gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL | __GFP_HIGHMEM); + + dmab->area = __vmalloc(size, gfp); + return 0; +} + +static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab) +{ + vfree(dmab->area); +} + +static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab, + size_t offset) +{ + return page_to_phys(vmalloc_to_page(dmab->area + offset)) + + offset % PAGE_SIZE; +} + +static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab, + size_t offset) +{ + return vmalloc_to_page(dmab->area + offset); +} + +static unsigned int +snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab, + unsigned int ofs, unsigned int size) +{ + ofs %= PAGE_SIZE; + size += ofs; + if (size > PAGE_SIZE) + size = PAGE_SIZE; + return size - ofs; +} + +static const struct snd_malloc_ops snd_dma_vmalloc_ops = { + .alloc = snd_dma_vmalloc_alloc, + .free = snd_dma_vmalloc_free, + .get_addr = snd_dma_vmalloc_get_addr, + .get_page = snd_dma_vmalloc_get_page, + .get_chunk_size = snd_dma_vmalloc_get_chunk_size, +}; + #ifdef CONFIG_HAS_DMA +/* + * IRAM allocator + */ #ifdef CONFIG_GENERIC_ALLOCATOR - case SNDRV_DMA_TYPE_DEV_IRAM: - snd_free_dev_iram(dmab); - break; +static int snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size) +{ + struct device *dev = dmab->dev.dev; + struct gen_pool *pool; + + if (dev->of_node) { + pool = of_gen_pool_get(dev->of_node, "iram", 0); + /* Assign the pool into private_data field */ + dmab->private_data = pool; + + dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr, + PAGE_SIZE); + if (dmab->area) + return 0; + } + + /* Internal memory might have limited size and no enough space, + * so if we fail to malloc, try to fetch memory traditionally. + */ + dmab->dev.type = SNDRV_DMA_TYPE_DEV; + return __snd_dma_alloc_pages(dmab, size); +} + +static void snd_dma_iram_free(struct snd_dma_buffer *dmab) +{ + struct gen_pool *pool = dmab->private_data; + + if (pool && dmab->area) + gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes); +} + +static const struct snd_malloc_ops snd_dma_iram_ops = { + .alloc = snd_dma_iram_alloc, + .free = snd_dma_iram_free, +}; #endif /* CONFIG_GENERIC_ALLOCATOR */ - case SNDRV_DMA_TYPE_DEV: - case SNDRV_DMA_TYPE_DEV_UC: - snd_free_dev_pages(dmab); - break; + +/* + * Coherent device pages allocator + */ +static int snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) +{ + gfp_t gfp_flags; + + gfp_flags = GFP_KERNEL + | __GFP_COMP /* compound page lets parts be mapped */ + | __GFP_NORETRY /* don't trigger OOM-killer */ + | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ + dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, + gfp_flags); +#ifdef CONFIG_X86 + if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) + set_memory_wc((unsigned long)dmab->area, + PAGE_ALIGN(size) >> PAGE_SHIFT); #endif + return 0; +} + +static void snd_dma_dev_free(struct snd_dma_buffer *dmab) +{ +#ifdef CONFIG_X86 + if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC) + set_memory_wb((unsigned long)dmab->area, + PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT); +#endif + dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); +} + +static const struct snd_malloc_ops snd_dma_dev_ops = { + .alloc = snd_dma_dev_alloc, + .free = snd_dma_dev_free, +}; +#endif /* CONFIG_HAS_DMA */ + +/* + * Entry points + */ +static const struct snd_malloc_ops *dma_ops[] = { + [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops, + [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops, +#ifdef CONFIG_HAS_DMA + [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops, + [SNDRV_DMA_TYPE_DEV_UC] = &snd_dma_dev_ops, +#ifdef CONFIG_GENERIC_ALLOCATOR + [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, +#endif /* CONFIG_GENERIC_ALLOCATOR */ +#endif /* CONFIG_HAS_DMA */ #ifdef CONFIG_SND_DMA_SGBUF - case SNDRV_DMA_TYPE_DEV_SG: - case SNDRV_DMA_TYPE_DEV_UC_SG: - snd_free_sgbuf_pages(dmab); - break; + [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, + [SNDRV_DMA_TYPE_DEV_UC_SG] = &snd_dma_sg_ops, #endif - default: - pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type); - } +}; + +static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) +{ + if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN || + dmab->dev.type >= ARRAY_SIZE(dma_ops))) + return NULL; + return dma_ops[dmab->dev.type]; } -EXPORT_SYMBOL(snd_dma_free_pages); diff --git a/sound/core/memalloc_local.h b/sound/core/memalloc_local.h new file mode 100644 index 000000000000..fe55416253bf --- /dev/null +++ b/sound/core/memalloc_local.h @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __MEMALLOC_LOCAL_H +#define __MEMALLOC_LOCAL_H + +struct snd_malloc_ops { + int (*alloc)(struct snd_dma_buffer *dmab, size_t size); + void (*free)(struct snd_dma_buffer *dmab); + dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset); + struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset); + unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab, + unsigned int ofs, unsigned int size); +}; + +#ifdef CONFIG_SND_DMA_SGBUF +extern const struct snd_malloc_ops snd_dma_sg_ops; +#endif + +#endif /* __MEMALLOC_LOCAL_H */ diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h index e3b3558aeab6..fe9689b8a6a6 100644 --- a/sound/core/pcm_local.h +++ b/sound/core/pcm_local.h @@ -65,11 +65,6 @@ void __snd_pcm_xrun(struct snd_pcm_substream *substream); void snd_pcm_group_init(struct snd_pcm_group *group); void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq); -#ifdef CONFIG_SND_DMA_SGBUF -struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, - unsigned long offset); -#endif - #define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime) /* loop over all PCM substreams */ diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c index 542a75babdee..d7621ed105bd 100644 --- a/sound/core/pcm_memory.c +++ b/sound/core/pcm_memory.c @@ -337,27 +337,6 @@ void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type, } EXPORT_SYMBOL(snd_pcm_set_managed_buffer_all); -#ifdef CONFIG_SND_DMA_SGBUF -/* - * snd_pcm_sgbuf_ops_page - get the page struct at the given offset - * @substream: the pcm substream instance - * @offset: the buffer offset - * - * Used as the page callback of PCM ops. - * - * Return: The page struct at the given buffer offset. %NULL on failure. - */ -struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigned long offset) -{ - struct snd_sg_buf *sgbuf = snd_pcm_substream_sgbuf(substream); - - unsigned int idx = offset >> PAGE_SHIFT; - if (idx >= (unsigned int)sgbuf->pages) - return NULL; - return sgbuf->page_table[idx]; -} -#endif /* CONFIG_SND_DMA_SGBUF */ - /** * snd_pcm_lib_malloc_pages - allocate the DMA buffer * @substream: the substream to allocate the DMA buffer to diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index eb468573f070..48d63dbb17ba 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3644,24 +3644,6 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file } #endif /* coherent mmap */ -static inline struct page * -snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) -{ - void *vaddr = substream->runtime->dma_area + ofs; - - switch (substream->dma_buffer.dev.type) { -#ifdef CONFIG_SND_DMA_SGBUF - case SNDRV_DMA_TYPE_DEV_SG: - case SNDRV_DMA_TYPE_DEV_UC_SG: - return snd_pcm_sgbuf_ops_page(substream, ofs); -#endif /* CONFIG_SND_DMA_SGBUF */ - case SNDRV_DMA_TYPE_VMALLOC: - return vmalloc_to_page(vaddr); - default: - return virt_to_page(vaddr); - } -} - /* * fault callback for mmapping a RAM page */ @@ -3683,7 +3665,7 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf) if (substream->ops->page) page = substream->ops->page(substream, offset); else - page = snd_pcm_default_page_ops(substream, offset); + page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset); if (!page) return VM_FAULT_SIGBUS; get_page(page); diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c index 29ddb76187e5..232cf3f1bcb3 100644 --- a/sound/core/sgbuf.c +++ b/sound/core/sgbuf.c @@ -10,20 +10,34 @@ #include <linux/vmalloc.h> #include <linux/export.h> #include <sound/memalloc.h> - +#include "memalloc_local.h" + +struct snd_sg_page { + void *buf; + dma_addr_t addr; +}; + +struct snd_sg_buf { + int size; /* allocated byte size */ + int pages; /* allocated pages */ + int tblsize; /* allocated table size */ + struct snd_sg_page *table; /* address table */ + struct page **page_table; /* page table (for vmap/vunmap) */ + struct device *dev; +}; /* table entries are align to 32 */ #define SGBUF_TBL_ALIGN 32 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) -int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) +static void snd_dma_sg_free(struct snd_dma_buffer *dmab) { struct snd_sg_buf *sgbuf = dmab->private_data; struct snd_dma_buffer tmpb; int i; - if (! sgbuf) - return -EINVAL; + if (!sgbuf) + return; vunmap(dmab->area); dmab->area = NULL; @@ -45,15 +59,11 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab) kfree(sgbuf->page_table); kfree(sgbuf); dmab->private_data = NULL; - - return 0; } #define MAX_ALLOC_PAGES 32 -void *snd_malloc_sgbuf_pages(struct device *device, - size_t size, struct snd_dma_buffer *dmab, - size_t *res_size) +static int snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) { struct snd_sg_buf *sgbuf; unsigned int i, pages, chunk, maxpages; @@ -63,18 +73,16 @@ void *snd_malloc_sgbuf_pages(struct device *device, int type = SNDRV_DMA_TYPE_DEV; pgprot_t prot = PAGE_KERNEL; - dmab->area = NULL; - dmab->addr = 0; dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); - if (! sgbuf) - return NULL; + if (!sgbuf) + return -ENOMEM; if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) { type = SNDRV_DMA_TYPE_DEV_UC; #ifdef pgprot_noncached prot = pgprot_noncached(PAGE_KERNEL); #endif } - sgbuf->dev = device; + sgbuf->dev = dmab->dev.dev; pages = snd_sgbuf_aligned_pages(size); sgbuf->tblsize = sgbuf_align_table(pages); table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); @@ -94,12 +102,10 @@ void *snd_malloc_sgbuf_pages(struct device *device, if (chunk > maxpages) chunk = maxpages; chunk <<= PAGE_SHIFT; - if (snd_dma_alloc_pages_fallback(type, device, + if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev, chunk, &tmpb) < 0) { if (!sgbuf->pages) goto _failed; - if (!res_size) - goto _failed; size = sgbuf->pages * PAGE_SIZE; break; } @@ -124,27 +130,42 @@ void *snd_malloc_sgbuf_pages(struct device *device, dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot); if (! dmab->area) goto _failed; - if (res_size) - *res_size = sgbuf->size; - return dmab->area; + return 0; _failed: - snd_free_sgbuf_pages(dmab); /* free the table */ - return NULL; + snd_dma_sg_free(dmab); /* free the table */ + return -ENOMEM; } -/* - * compute the max chunk size with continuous pages on sg-buffer - */ -unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, - unsigned int ofs, unsigned int size) +static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab, + size_t offset) +{ + struct snd_sg_buf *sgbuf = dmab->private_data; + dma_addr_t addr; + + addr = sgbuf->table[offset >> PAGE_SHIFT].addr; + addr &= ~((dma_addr_t)PAGE_SIZE - 1); + return addr + offset % PAGE_SIZE; +} + +static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab, + size_t offset) +{ + struct snd_sg_buf *sgbuf = dmab->private_data; + unsigned int idx = offset >> PAGE_SHIFT; + + if (idx >= (unsigned int)sgbuf->pages) + return NULL; + return sgbuf->page_table[idx]; +} + +static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab, + unsigned int ofs, + unsigned int size) { struct snd_sg_buf *sg = dmab->private_data; unsigned int start, end, pg; - if (!sg) - return size; - start = ofs >> PAGE_SHIFT; end = (ofs + size - 1) >> PAGE_SHIFT; /* check page continuity */ @@ -160,4 +181,11 @@ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, /* ok, all on continuous pages */ return size; } -EXPORT_SYMBOL(snd_sgbuf_get_chunk_size); + +const struct snd_malloc_ops snd_dma_sg_ops = { + .alloc = snd_dma_sg_alloc, + .free = snd_dma_sg_free, + .get_addr = snd_dma_sg_get_addr, + .get_page = snd_dma_sg_get_page, + .get_chunk_size = snd_dma_sg_get_chunk_size, +}; |