| Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 2 | #ifndef _LINUX_MEMREMAP_H_ |
| 3 | #define _LINUX_MEMREMAP_H_ |
| Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 4 | #include <linux/ioport.h> |
| 5 | #include <linux/percpu-refcount.h> |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 6 | |
| 7 | struct resource; |
| 8 | struct device; |
| Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 9 | |
| 10 | /** |
| 11 | * struct vmem_altmap - pre-allocated storage for vmemmap_populate |
| 12 | * @base_pfn: base of the entire dev_pagemap mapping |
| 13 | * @reserve: pages mapped, but reserved for driver use (relative to @base) |
| 14 | * @free: free pages set aside in the mapping for memmap storage |
| 15 | * @align: pages reserved to meet allocation alignments |
| 16 | * @alloc: track pages consumed, private to vmemmap_populate() |
| 17 | */ |
| 18 | struct vmem_altmap { |
| 19 | const unsigned long base_pfn; |
| Aneesh Kumar K.V | cf387d9 | 2019-09-10 11:58:25 +0530 | [diff] [blame] | 20 | const unsigned long end_pfn; |
| Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 21 | const unsigned long reserve; |
| 22 | unsigned long free; |
| 23 | unsigned long align; |
| 24 | unsigned long alloc; |
| 25 | }; |
| 26 | |
| Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 27 | /* |
| 28 | * Specialize ZONE_DEVICE memory into multiple types each having differents |
| 29 | * usage. |
| 30 | * |
| Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 31 | * MEMORY_DEVICE_PRIVATE: |
| 32 | * Device memory that is not directly addressable by the CPU: CPU can neither |
| 33 | * read nor write private memory. In this case, we do still have struct pages |
| 34 | * backing the device memory. Doing so simplifies the implementation, but it is |
| 35 | * important to remember that there are certain points at which the struct page |
| 36 | * must be treated as an opaque object, rather than a "normal" struct page. |
| 37 | * |
| 38 | * A more complete discussion of unaddressable memory may be found in |
| Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 39 | * include/linux/hmm.h and Documentation/vm/hmm.rst. |
| Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 40 | * |
| Dan Williams | e7638488 | 2018-05-16 11:46:08 -0700 | [diff] [blame] | 41 | * MEMORY_DEVICE_FS_DAX: |
| 42 | * Host memory that has similar access semantics as System RAM i.e. DMA |
| 43 | * coherent and supports page pinning. In support of coordinating page |
| 44 | * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a |
| 45 | * wakeup event whenever a page is unpinned and becomes idle. This |
| 46 | * wakeup is used to coordinate physical address space management (ex: |
| 47 | * fs truncate/hole punch) vs pinned pages (ex: device dma). |
| Logan Gunthorpe | 5291698 | 2018-10-04 15:27:35 -0600 | [diff] [blame] | 48 | * |
| Christoph Hellwig | 3ed2dcd | 2019-06-26 14:27:07 +0200 | [diff] [blame] | 49 | * MEMORY_DEVICE_DEVDAX: |
| 50 | * Host memory that has similar access semantics as System RAM i.e. DMA |
| 51 | * coherent and supports page pinning. In contrast to |
| 52 | * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax |
| 53 | * character device. |
| 54 | * |
| Logan Gunthorpe | 5291698 | 2018-10-04 15:27:35 -0600 | [diff] [blame] | 55 | * MEMORY_DEVICE_PCI_P2PDMA: |
| 56 | * Device memory residing in a PCI BAR intended for use with Peer-to-Peer |
| 57 | * transactions. |
| Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 58 | */ |
| 59 | enum memory_type { |
| Christoph Hellwig | 3ed2dcd | 2019-06-26 14:27:07 +0200 | [diff] [blame] | 60 | /* 0 is reserved to catch uninitialized type fields */ |
| Dan Williams | e7638488 | 2018-05-16 11:46:08 -0700 | [diff] [blame] | 61 | MEMORY_DEVICE_PRIVATE = 1, |
| Dan Williams | e7638488 | 2018-05-16 11:46:08 -0700 | [diff] [blame] | 62 | MEMORY_DEVICE_FS_DAX, |
| Christoph Hellwig | 3ed2dcd | 2019-06-26 14:27:07 +0200 | [diff] [blame] | 63 | MEMORY_DEVICE_DEVDAX, |
| Logan Gunthorpe | 5291698 | 2018-10-04 15:27:35 -0600 | [diff] [blame] | 64 | MEMORY_DEVICE_PCI_P2PDMA, |
| Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 65 | }; |
| 66 | |
| Christoph Hellwig | 1e240e8 | 2019-06-26 14:27:08 +0200 | [diff] [blame] | 67 | struct dev_pagemap_ops { |
| 68 | /* |
| 69 | * Called once the page refcount reaches 1. (ZONE_DEVICE pages never |
| 70 | * reach 0 refcount unless there is a refcount bug. This allows the |
| 71 | * device driver to implement its own memory management.) |
| 72 | */ |
| Christoph Hellwig | 80a72d0 | 2019-06-26 14:27:12 +0200 | [diff] [blame] | 73 | void (*page_free)(struct page *page); |
| Christoph Hellwig | 1e240e8 | 2019-06-26 14:27:08 +0200 | [diff] [blame] | 74 | |
| 75 | /* |
| 76 | * Transition the refcount in struct dev_pagemap to the dead state. |
| 77 | */ |
| Christoph Hellwig | d8668bb | 2019-06-26 14:27:09 +0200 | [diff] [blame] | 78 | void (*kill)(struct dev_pagemap *pgmap); |
| Christoph Hellwig | 1e240e8 | 2019-06-26 14:27:08 +0200 | [diff] [blame] | 79 | |
| 80 | /* |
| 81 | * Wait for refcount in struct dev_pagemap to be idle and reap it. |
| 82 | */ |
| Christoph Hellwig | d8668bb | 2019-06-26 14:27:09 +0200 | [diff] [blame] | 83 | void (*cleanup)(struct dev_pagemap *pgmap); |
| Christoph Hellwig | 897e636 | 2019-06-26 14:27:11 +0200 | [diff] [blame] | 84 | |
| 85 | /* |
| 86 | * Used for private (un-addressable) device memory only. Must migrate |
| 87 | * the page back to a CPU accessible page. |
| 88 | */ |
| 89 | vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); |
| Christoph Hellwig | 1e240e8 | 2019-06-26 14:27:08 +0200 | [diff] [blame] | 90 | }; |
| Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 91 | |
| Christoph Hellwig | 514caf2 | 2019-06-26 14:27:13 +0200 | [diff] [blame] | 92 | #define PGMAP_ALTMAP_VALID (1 << 0) |
| 93 | |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 94 | /** |
| 95 | * struct dev_pagemap - metadata for ZONE_DEVICE mappings |
| Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 96 | * @altmap: pre-allocated/reserved memory for vmemmap allocations |
| Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 97 | * @res: physical address range covered by @ref |
| 98 | * @ref: reference count that pins the devm_memremap_pages() mapping |
| Christoph Hellwig | 24917f6 | 2019-06-26 14:27:14 +0200 | [diff] [blame] | 99 | * @internal_ref: internal reference if @ref is not provided by the caller |
| 100 | * @done: completion for @internal_ref |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 101 | * @dev: host device of the mapping for debug |
| Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 102 | * @data: private data pointer for page_free() |
| 103 | * @type: memory type: see MEMORY_* in memory_hotplug.h |
| Christoph Hellwig | 514caf2 | 2019-06-26 14:27:13 +0200 | [diff] [blame] | 104 | * @flags: PGMAP_* flags to specify defailed behavior |
| Christoph Hellwig | 1e240e8 | 2019-06-26 14:27:08 +0200 | [diff] [blame] | 105 | * @ops: method table |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 106 | */ |
| 107 | struct dev_pagemap { |
| Logan Gunthorpe | e7744aa | 2017-12-29 08:54:04 +0100 | [diff] [blame] | 108 | struct vmem_altmap altmap; |
| Logan Gunthorpe | e7744aa | 2017-12-29 08:54:04 +0100 | [diff] [blame] | 109 | struct resource res; |
| Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 110 | struct percpu_ref *ref; |
| Christoph Hellwig | 24917f6 | 2019-06-26 14:27:14 +0200 | [diff] [blame] | 111 | struct percpu_ref internal_ref; |
| 112 | struct completion done; |
| Jérôme Glisse | 5042db4 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 113 | enum memory_type type; |
| Christoph Hellwig | 514caf2 | 2019-06-26 14:27:13 +0200 | [diff] [blame] | 114 | unsigned int flags; |
| Christoph Hellwig | 1e240e8 | 2019-06-26 14:27:08 +0200 | [diff] [blame] | 115 | const struct dev_pagemap_ops *ops; |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 116 | }; |
| 117 | |
| Christoph Hellwig | 514caf2 | 2019-06-26 14:27:13 +0200 | [diff] [blame] | 118 | static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) |
| 119 | { |
| 120 | if (pgmap->flags & PGMAP_ALTMAP_VALID) |
| 121 | return &pgmap->altmap; |
| 122 | return NULL; |
| 123 | } |
| 124 | |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 125 | #ifdef CONFIG_ZONE_DEVICE |
| Christoph Hellwig | 6869b7b | 2019-08-18 11:05:57 +0200 | [diff] [blame] | 126 | void *memremap_pages(struct dev_pagemap *pgmap, int nid); |
| 127 | void memunmap_pages(struct dev_pagemap *pgmap); |
| Christoph Hellwig | e8d5134 | 2017-12-29 08:54:05 +0100 | [diff] [blame] | 128 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); |
| Dan Williams | 2e3f139 | 2019-06-13 15:56:21 -0700 | [diff] [blame] | 129 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); |
| Christoph Hellwig | 0822acb | 2017-12-29 08:54:00 +0100 | [diff] [blame] | 130 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, |
| 131 | struct dev_pagemap *pgmap); |
| Jérôme Glisse | 7b2d55d2 | 2017-09-08 16:11:46 -0700 | [diff] [blame] | 132 | |
| Christoph Hellwig | 8e37d00 | 2017-12-29 08:53:50 +0100 | [diff] [blame] | 133 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); |
| 134 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 135 | #else |
| 136 | static inline void *devm_memremap_pages(struct device *dev, |
| Christoph Hellwig | e8d5134 | 2017-12-29 08:54:05 +0100 | [diff] [blame] | 137 | struct dev_pagemap *pgmap) |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 138 | { |
| 139 | /* |
| 140 | * Fail attempts to call devm_memremap_pages() without |
| 141 | * ZONE_DEVICE support enabled, this requires callers to fall |
| 142 | * back to plain devm_memremap() based on config |
| 143 | */ |
| 144 | WARN_ON_ONCE(1); |
| 145 | return ERR_PTR(-ENXIO); |
| 146 | } |
| 147 | |
| Dan Williams | 2e3f139 | 2019-06-13 15:56:21 -0700 | [diff] [blame] | 148 | static inline void devm_memunmap_pages(struct device *dev, |
| 149 | struct dev_pagemap *pgmap) |
| 150 | { |
| 151 | } |
| 152 | |
| Christoph Hellwig | 0822acb | 2017-12-29 08:54:00 +0100 | [diff] [blame] | 153 | static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, |
| 154 | struct dev_pagemap *pgmap) |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 155 | { |
| 156 | return NULL; |
| 157 | } |
| Christoph Hellwig | 8e37d00 | 2017-12-29 08:53:50 +0100 | [diff] [blame] | 158 | |
| 159 | static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
| 160 | { |
| 161 | return 0; |
| 162 | } |
| 163 | |
| 164 | static inline void vmem_altmap_free(struct vmem_altmap *altmap, |
| 165 | unsigned long nr_pfns) |
| 166 | { |
| 167 | } |
| 168 | #endif /* CONFIG_ZONE_DEVICE */ |
| Jérôme Glisse | 7b2d55d2 | 2017-09-08 16:11:46 -0700 | [diff] [blame] | 169 | |
| Dan Williams | 5c2c258 | 2016-01-15 16:56:49 -0800 | [diff] [blame] | 170 | static inline void put_dev_pagemap(struct dev_pagemap *pgmap) |
| 171 | { |
| 172 | if (pgmap) |
| 173 | percpu_ref_put(pgmap->ref); |
| 174 | } |
| Dan Williams | 9476df7 | 2016-01-15 16:56:19 -0800 | [diff] [blame] | 175 | #endif /* _LINUX_MEMREMAP_H_ */ |