blob: 9f88d12d55d7f310f62c2e3daa8db8113589daea [file] [log] [blame]
Christoph Hellwig782e6762018-04-16 15:24:51 +02001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_NONCOHERENT_H
3#define _LINUX_DMA_NONCOHERENT_H 1
4
5#include <linux/dma-mapping.h>
Christoph Hellwig419e2f12019-08-26 09:03:44 +02006#include <asm/pgtable.h>
Christoph Hellwig782e6762018-04-16 15:24:51 +02007
Christoph Hellwigf3ecc0f2018-08-19 14:53:20 +02008#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
9#include <asm/dma-coherence.h>
10#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
11 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
12 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
13static inline bool dev_is_dma_coherent(struct device *dev)
14{
15 return dev->dma_coherent;
16}
17#else
18static inline bool dev_is_dma_coherent(struct device *dev)
19{
20 return true;
21}
22#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
23
Liam Mark707e9d32020-02-20 10:19:06 -080024#ifdef CONFIG_DMA_COHERENT_HINT_CACHED
25static inline bool dev_is_dma_coherent_hint_cached(struct device *dev)
26{
27 return dev->dma_coherent_hint_cached;
28}
29#else
30static inline bool dev_is_dma_coherent_hint_cached(struct device *dev)
31{
32 return false;
33}
34#endif
35
Christoph Hellwig4b85fae2019-06-14 16:06:10 +020036/*
37 * Check if an allocation needs to be marked uncached to be coherent.
38 */
Christoph Hellwig15ffe5e2019-07-08 12:55:27 -070039static __always_inline bool dma_alloc_need_uncached(struct device *dev,
Christoph Hellwig4b85fae2019-06-14 16:06:10 +020040 unsigned long attrs)
41{
Raghavendra Rao Ananta67843752019-08-02 10:55:28 -070042 if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
43 return true;
44 if (attrs & DMA_ATTR_FORCE_COHERENT)
45 return false;
Christoph Hellwig4b85fae2019-06-14 16:06:10 +020046 if (dev_is_dma_coherent(dev))
47 return false;
Christoph Hellwigd98849a2019-06-14 16:17:27 +020048 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
49 return false;
Christoph Hellwig4b85fae2019-06-14 16:06:10 +020050 if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
51 (attrs & DMA_ATTR_NON_CONSISTENT))
52 return false;
53 return true;
54}
55
Christoph Hellwig782e6762018-04-16 15:24:51 +020056void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
57 gfp_t gfp, unsigned long attrs);
58void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
59 dma_addr_t dma_addr, unsigned long attrs);
Christoph Hellwig58b04402018-09-11 08:55:28 +020060long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
61 dma_addr_t dma_addr);
Christoph Hellwig33dcb372019-07-26 09:26:40 +020062
63#ifdef CONFIG_MMU
Christoph Hellwig419e2f12019-08-26 09:03:44 +020064/*
65 * Page protection so that devices that can't snoop CPU caches can use the
66 * memory coherently. We default to pgprot_noncached which is usually used
67 * for ioremap as a safe bet, but architectures can override this with less
68 * strict semantics if possible.
69 */
70#ifndef pgprot_dmacoherent
71#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
72#endif
73
Christoph Hellwig33dcb372019-07-26 09:26:40 +020074pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
Christoph Hellwig58b04402018-09-11 08:55:28 +020075#else
Christoph Hellwig33dcb372019-07-26 09:26:40 +020076static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
77 unsigned long attrs)
78{
79 return prot; /* no protection bits supported without page tables */
80}
81#endif /* CONFIG_MMU */
Christoph Hellwig782e6762018-04-16 15:24:51 +020082
83#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
84void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
85 enum dma_data_direction direction);
86#else
Christoph Hellwig356da6d2018-12-06 13:39:32 -080087static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
88 size_t size, enum dma_data_direction direction)
89{
90}
Christoph Hellwig782e6762018-04-16 15:24:51 +020091#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
92
93#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
Christoph Hellwig9690e342019-11-07 18:03:11 +010094void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
95 enum dma_data_direction dir);
Christoph Hellwig782e6762018-04-16 15:24:51 +020096#else
Christoph Hellwig9690e342019-11-07 18:03:11 +010097static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
98 enum dma_data_direction dir)
Christoph Hellwig782e6762018-04-16 15:24:51 +020099{
100}
101#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
102
103#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
Christoph Hellwig9690e342019-11-07 18:03:11 +0100104void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
105 enum dma_data_direction dir);
Christoph Hellwig782e6762018-04-16 15:24:51 +0200106#else
Christoph Hellwig9690e342019-11-07 18:03:11 +0100107static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
108 enum dma_data_direction dir)
Christoph Hellwig782e6762018-04-16 15:24:51 +0200109{
110}
111#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
112
Christoph Hellwigfaef8772018-06-15 13:08:51 +0200113#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
Christoph Hellwig9690e342019-11-07 18:03:11 +0100114void arch_sync_dma_for_cpu_all(void);
Christoph Hellwigfaef8772018-06-15 13:08:51 +0200115#else
Christoph Hellwig9690e342019-11-07 18:03:11 +0100116static inline void arch_sync_dma_for_cpu_all(void)
Christoph Hellwigfaef8772018-06-15 13:08:51 +0200117{
118}
119#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
120
Christoph Hellwig13bf5ce2019-03-25 15:44:06 +0100121#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
Christoph Hellwig0c3b3172018-11-04 20:29:28 +0100122void arch_dma_prep_coherent(struct page *page, size_t size);
Christoph Hellwig13bf5ce2019-03-25 15:44:06 +0100123#else
124static inline void arch_dma_prep_coherent(struct page *page, size_t size)
125{
126}
127#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
Christoph Hellwig0c3b3172018-11-04 20:29:28 +0100128
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200129void *uncached_kernel_address(void *addr);
130void *cached_kernel_address(void *addr);
131
Christoph Hellwig782e6762018-04-16 15:24:51 +0200132#endif /* _LINUX_DMA_NONCOHERENT_H */