diff options
author | Mikael Starvik <mikael.starvik@axis.com> | 2005-07-27 11:44:40 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-27 16:26:00 -0700 |
commit | 59c61138a556cf89692e0d5bd2c9de5df54b824f (patch) | |
tree | d068341a2b6384a5b8be6b86b85a2b1073f43a19 /include/asm-cris/dma-mapping.h | |
parent | 4f18cfbf0990bfc2e8e7706eeb9e5bef898ae923 (diff) |
[PATCH] CRIS update: pci
Patches to make it possible to add PCI support.
Signed-off-by: Mikael Starvik <starvik@axis.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-cris/dma-mapping.h')
-rw-r--r-- | include/asm-cris/dma-mapping.h | 170 |
1 files changed, 112 insertions, 58 deletions
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h index 0d770f60127a..0b5c3fdaefe1 100644 --- a/include/asm-cris/dma-mapping.h +++ b/include/asm-cris/dma-mapping.h @@ -1,125 +1,179 @@ +/* DMA mapping. Nothing tricky here, just virt_to_phys */ + #ifndef _ASM_CRIS_DMA_MAPPING_H #define _ASM_CRIS_DMA_MAPPING_H -#include "scatterlist.h" +#include <linux/mm.h> +#include <linux/kernel.h> -static inline int -dma_supported(struct device *dev, u64 mask) -{ - BUG(); - return 0; -} +#include <asm/cache.h> +#include <asm/io.h> +#include <asm/scatterlist.h> -static inline int -dma_set_mask(struct device *dev, u64 dma_mask) -{ - BUG(); - return 1; -} +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + +#ifdef CONFIG_PCI +void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, int flag); +void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); +#else static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - int flag) + int flag) { - BUG(); - return NULL; + BUG(); + return NULL; } static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) + dma_addr_t dma_handle) { - BUG(); + BUG(); } - +#endif static inline dma_addr_t -dma_map_single(struct device *dev, void *cpu_addr, size_t size, +dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { - BUG(); - return 0; + BUG_ON(direction == DMA_NONE); + return virt_to_phys(ptr); } static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction) { - BUG(); + BUG_ON(direction == DMA_NONE); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + printk("Map sg\n"); + return nents; } static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction) { - BUG(); - return 0; + BUG_ON(direction == DMA_NONE); + return page_to_phys(page) + offset; } static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { - BUG(); + BUG_ON(direction == DMA_NONE); } -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - BUG(); - return 1; -} static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction) { - BUG(); + BUG_ON(direction == DMA_NONE); } static inline void -dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) { - BUG(); } static inline void -dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) { - BUG(); } -/* Now for the API extensions over the pci_ one */ +static inline void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d) (1) +static inline void +dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ +} -static inline int -dma_get_cache_alignment(void) +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) { - /* no easy way to get cache size on all processors, so return - * the maximum possible, to be safe */ - return (1 << L1_CACHE_SHIFT_MAX); } static inline void -dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) { - BUG(); } +static inline int +dma_mapping_error(dma_addr_t dma_addr) +{ + return 0; +} + +static inline int +dma_supported(struct device *dev, u64 mask) +{ + /* + * we fall back to GFP_DMA when the mask isn't all 1s, + * so we can't guarantee allocations that must be + * within a tighter range than GFP_DMA.. + */ + if(mask < 0x00ffffff) + return 0; + + return 1; +} + +static inline int +dma_set_mask(struct device *dev, u64 mask) +{ + if(!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + + *dev->dma_mask = mask; + + return 0; +} + +static inline int +dma_get_cache_alignment(void) +{ + return (1 << L1_CACHE_SHIFT_MAX); +} + +#define dma_is_consistent(d) (1) + static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) { - BUG(); } -#endif +#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY +extern int +dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, + dma_addr_t device_addr, size_t size, int flags); + +extern void +dma_release_declared_memory(struct device *dev); +extern void * +dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); + +#endif |