dma-mapping: add __dma_from_device_group_begin()/end()
When a structure contains a buffer that DMA writes to alongside fields
that the CPU writes to, cache line sharing between the DMA buffer and
CPU-written fields can cause data corruption on non-cache-coherent
platforms.
Add __dma_from_device_group_begin()/end() annotations to ensure proper
alignment to prevent this:
struct my_device {
spinlock_t lock1;
__dma_from_device_group_begin();
char dma_buffer1[16];
char dma_buffer2[16];
__dma_from_device_group_end();
spinlock_t lock2;
};
Message-ID: <19163086d5e4704c316f18f6da06bc1c72968904.1767601130.git.mst@redhat.com>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Petr Tesarik <ptesarik@suse.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
f6a15d8549
commit
ca085faabb
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/dma-direction.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
/**
|
||||
* List of possible attributes associated with a DMA mapping. The semantics
|
||||
|
|
@ -703,6 +704,18 @@ static inline int dma_get_cache_alignment(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef ARCH_HAS_DMA_MINALIGN
|
||||
#define ____dma_from_device_aligned __aligned(ARCH_DMA_MINALIGN)
|
||||
#else
|
||||
#define ____dma_from_device_aligned
|
||||
#endif
|
||||
/* Mark start of DMA buffer */
|
||||
#define __dma_from_device_group_begin(GROUP) \
|
||||
__cacheline_group_begin(GROUP) ____dma_from_device_aligned
|
||||
/* Mark end of DMA buffer */
|
||||
#define __dma_from_device_group_end(GROUP) \
|
||||
__cacheline_group_end(GROUP) ____dma_from_device_aligned
|
||||
|
||||
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
|
|
|
|||
Loading…
Reference in New Issue