aboutsummaryrefslogtreecommitdiff
path: root/arch/avr32
diff options
context:
space:
mode:
authorAkinobu Mita2015-06-25 15:00:48 -0700
committerLinus Torvalds2015-06-25 17:00:37 -0700
commit20342f1db5f4ee7ec916dfe248340c78d65a3230 (patch)
tree477e961a744c30ef7c155533e3e3bae447e027a2 /arch/avr32
parent0989e1f98e3ae30deebf732d0861ca47bb55e25b (diff)
avr32: use for_each_sg()
This replaces the plain loop over the sglist array with for_each_sg() macro which consists of sg_next() function calls. Since avr32 doesn't select ARCH_HAS_SG_CHAIN, it is not necessary to use for_each_sg() in order to loop over each sg element. But this can help find problems with drivers that do not properly initialize their sg tables when CONFIG_DEBUG_SG is enabled. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Haavard Skinnemoen <hskinnemoen@gmail.com> Acked-by: Hans-Christian Egtvedt <egtvedt@samfundet.no> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/avr32')
-rw-r--r--arch/avr32/include/asm/dma-mapping.h19
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index b3d18f9f3e8d..ae7ac9205d20 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -209,17 +209,18 @@ dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
* the same here.
*/
static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nents; i++) {
+ for_each_sg(sglist, sg, nents, i) {
char *virt;
- sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset;
- virt = sg_virt(&sg[i]);
- dma_cache_sync(dev, virt, sg[i].length, direction);
+ sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
+ virt = sg_virt(sg);
+ dma_cache_sync(dev, virt, sg->length, direction);
}
return nents;
@@ -321,14 +322,14 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
}
static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nents; i++) {
- dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction);
- }
+ for_each_sg(sglist, sg, nents, i)
+ dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
}
/* Now for the API extensions over the pci_ one */