blob: 72a7f03a59f4c44f0aef6f50f740f6b12df977ad [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07002#ifndef _LINUX_DAX_H
3#define _LINUX_DAX_H
4
5#include <linux/fs.h>
6#include <linux/mm.h>
Jan Kara4f622932016-05-12 18:29:17 +02007#include <linux/radix-tree.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07008#include <asm/pgtable.h>
9
Pankaj Guptafefc1d92019-07-05 19:33:24 +053010/* Flag for synchronous flush */
11#define DAXDEV_F_SYNC (1UL << 0)
12
Matthew Wilcox27359fd2018-11-30 11:05:06 -050013typedef unsigned long dax_entry_t;
14
Christoph Hellwiga254e562016-09-19 11:24:49 +100015struct iomap_ops;
Dan Williams6568b082017-01-24 18:44:18 -080016struct dax_device;
17struct dax_operations {
18 /*
19 * direct_access: translate a device-relative
20 * logical-page-offset into an absolute physical pfn. Return the
21 * number of pages available for DAX at that pfn.
22 */
23 long (*direct_access)(struct dax_device *, pgoff_t, long,
24 void **, pfn_t *);
Dan Williams7bf7eac2019-05-16 13:26:29 -070025 /*
26 * Validate whether this device is usable as an fsdax backing
27 * device.
28 */
29 bool (*dax_supported)(struct dax_device *, struct block_device *, int,
30 sector_t, sector_t);
Dan Williams5d61e432017-06-27 13:06:22 -070031 /* copy_from_iter: required operation for fs-dax direct-i/o */
Dan Williams0aed55a2017-05-29 12:22:50 -070032 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
33 struct iov_iter *);
Dan Williamsb3a9a0c2018-05-02 06:46:33 -070034 /* copy_to_iter: required operation for fs-dax direct-i/o */
35 size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
36 struct iov_iter *);
Dan Williams6568b082017-01-24 18:44:18 -080037};
Christoph Hellwiga254e562016-09-19 11:24:49 +100038
Dan Williams6e0c90d2017-06-26 21:28:41 -070039extern struct attribute_group dax_attribute_group;
40
Dan Williamsef510422017-05-08 10:55:27 -070041#if IS_ENABLED(CONFIG_DAX)
42struct dax_device *dax_get_by_host(const char *host);
Dan Williams976431b2018-03-29 17:22:13 -070043struct dax_device *alloc_dax(void *private, const char *host,
Pankaj Guptafefc1d92019-07-05 19:33:24 +053044 const struct dax_operations *ops, unsigned long flags);
Dan Williamsef510422017-05-08 10:55:27 -070045void put_dax(struct dax_device *dax_dev);
Dan Williams976431b2018-03-29 17:22:13 -070046void kill_dax(struct dax_device *dax_dev);
47void dax_write_cache(struct dax_device *dax_dev, bool wc);
48bool dax_write_cache_enabled(struct dax_device *dax_dev);
Pankaj Guptafefc1d92019-07-05 19:33:24 +053049bool __dax_synchronous(struct dax_device *dax_dev);
50static inline bool dax_synchronous(struct dax_device *dax_dev)
51{
52 return __dax_synchronous(dax_dev);
53}
54void __set_dax_synchronous(struct dax_device *dax_dev);
55static inline void set_dax_synchronous(struct dax_device *dax_dev)
56{
57 __set_dax_synchronous(dax_dev);
58}
Jan Karaef6458f2020-09-21 11:33:23 +020059bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
60 int blocksize, sector_t start, sector_t len);
Pankaj Gupta32de1482019-07-05 19:33:26 +053061/*
62 * Check if given mapping is supported by the file / underlying device.
63 */
64static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
65 struct dax_device *dax_dev)
66{
67 if (!(vma->vm_flags & VM_SYNC))
68 return true;
69 if (!IS_DAX(file_inode(vma->vm_file)))
70 return false;
71 return dax_synchronous(dax_dev);
72}
Dan Williamsef510422017-05-08 10:55:27 -070073#else
74static inline struct dax_device *dax_get_by_host(const char *host)
75{
76 return NULL;
77}
Dan Williams976431b2018-03-29 17:22:13 -070078static inline struct dax_device *alloc_dax(void *private, const char *host,
Pankaj Guptafefc1d92019-07-05 19:33:24 +053079 const struct dax_operations *ops, unsigned long flags)
Dan Williams976431b2018-03-29 17:22:13 -070080{
81 /*
82 * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
83 * NULL is an error or expected.
84 */
85 return NULL;
86}
Dan Williamsef510422017-05-08 10:55:27 -070087static inline void put_dax(struct dax_device *dax_dev)
88{
89}
Dan Williams976431b2018-03-29 17:22:13 -070090static inline void kill_dax(struct dax_device *dax_dev)
91{
92}
93static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
94{
95}
96static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
97{
98 return false;
99}
Pankaj Guptafefc1d92019-07-05 19:33:24 +0530100static inline bool dax_synchronous(struct dax_device *dax_dev)
101{
102 return true;
103}
104static inline void set_dax_synchronous(struct dax_device *dax_dev)
105{
106}
Jan Karaef6458f2020-09-21 11:33:23 +0200107static inline bool dax_supported(struct dax_device *dax_dev,
108 struct block_device *bdev, int blocksize, sector_t start,
109 sector_t len)
110{
111 return false;
112}
Pankaj Gupta32de1482019-07-05 19:33:26 +0530113static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
114 struct dax_device *dax_dev)
115{
116 return !(vma->vm_flags & VM_SYNC);
117}
Dan Williamsef510422017-05-08 10:55:27 -0700118#endif
119
Dan Williamsf44c7762018-03-07 15:26:44 -0800120struct writeback_control;
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700121int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
122#if IS_ENABLED(CONFIG_FS_DAX)
Dave Jiang80660f22018-05-30 13:03:46 -0700123bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
124static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700125{
Darrick J. Wongba23cba2018-05-30 13:03:45 -0700126 return __bdev_dax_supported(bdev, blocksize);
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700127}
128
Dan Williams7bf7eac2019-05-16 13:26:29 -0700129bool __generic_fsdax_supported(struct dax_device *dax_dev,
130 struct block_device *bdev, int blocksize, sector_t start,
131 sector_t sectors);
132static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
133 struct block_device *bdev, int blocksize, sector_t start,
134 sector_t sectors)
135{
136 return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
137 sectors);
138}
139
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700140static inline struct dax_device *fs_dax_get_by_host(const char *host)
141{
142 return dax_get_by_host(host);
143}
144
145static inline void fs_put_dax(struct dax_device *dax_dev)
146{
147 put_dax(dax_dev);
148}
149
Dan Williams78f35472017-08-30 09:16:38 -0700150struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
Dan Williamsf44c7762018-03-07 15:26:44 -0800151int dax_writeback_mapping_range(struct address_space *mapping,
152 struct block_device *bdev, struct writeback_control *wbc);
Dan Williams5fac7402018-03-09 17:44:31 -0800153
154struct page *dax_layout_busy_page(struct address_space *mapping);
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500155dax_entry_t dax_lock_page(struct page *page);
156void dax_unlock_page(struct page *page, dax_entry_t cookie);
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700157#else
Dave Jiang80660f22018-05-30 13:03:46 -0700158static inline bool bdev_dax_supported(struct block_device *bdev,
Darrick J. Wongba23cba2018-05-30 13:03:45 -0700159 int blocksize)
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700160{
Dave Jiang80660f22018-05-30 13:03:46 -0700161 return false;
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700162}
163
Dan Williams7bf7eac2019-05-16 13:26:29 -0700164static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
165 struct block_device *bdev, int blocksize, sector_t start,
166 sector_t sectors)
167{
168 return false;
169}
170
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700171static inline struct dax_device *fs_dax_get_by_host(const char *host)
172{
173 return NULL;
174}
175
176static inline void fs_put_dax(struct dax_device *dax_dev)
177{
178}
Dan Williams78f35472017-08-30 09:16:38 -0700179
180static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
181{
182 return NULL;
183}
Dan Williamsf44c7762018-03-07 15:26:44 -0800184
Dan Williams5fac7402018-03-09 17:44:31 -0800185static inline struct page *dax_layout_busy_page(struct address_space *mapping)
186{
187 return NULL;
188}
189
Dan Williamsf44c7762018-03-07 15:26:44 -0800190static inline int dax_writeback_mapping_range(struct address_space *mapping,
191 struct block_device *bdev, struct writeback_control *wbc)
192{
193 return -EOPNOTSUPP;
194}
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700195
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500196static inline dax_entry_t dax_lock_page(struct page *page)
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700197{
198 if (IS_DAX(page->mapping->host))
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500199 return ~0UL;
200 return 0;
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700201}
202
Matthew Wilcox27359fd2018-11-30 11:05:06 -0500203static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
Dan Williamsc2a7d2a2018-07-13 21:50:16 -0700204{
205}
Dan Williamsf5705aa8c2017-05-13 16:31:05 -0700206#endif
207
Jan Karad6712ee2020-09-20 08:54:42 -0700208#if IS_ENABLED(CONFIG_DAX)
Dan Williams7b6be842017-04-11 09:49:49 -0700209int dax_read_lock(void);
210void dax_read_unlock(int id);
Jan Karad6712ee2020-09-20 08:54:42 -0700211#else
212static inline int dax_read_lock(void)
213{
214 return 0;
215}
216
217static inline void dax_read_unlock(int id)
218{
219}
220#endif /* CONFIG_DAX */
Dan Williamsc1d6e822017-01-24 23:02:09 -0800221bool dax_alive(struct dax_device *dax_dev);
Dan Williamsc1d6e822017-01-24 23:02:09 -0800222void *dax_get_private(struct dax_device *dax_dev);
Dan Williamsb0686262017-01-26 20:37:35 -0800223long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
224 void **kaddr, pfn_t *pfn);
Dan Williams7e026c82017-05-29 12:57:56 -0700225size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
226 size_t bytes, struct iov_iter *i);
Dan Williamsb3a9a0c2018-05-02 06:46:33 -0700227size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
228 size_t bytes, struct iov_iter *i);
Mikulas Patockac3ca0152017-08-31 21:47:43 -0400229void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
Dan Williams7b6be842017-04-11 09:49:49 -0700230
Ross Zwisler11c59c92016-11-08 11:32:46 +1100231ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800232 const struct iomap_ops *ops);
Souptick Joarderf77bc3a2018-06-27 23:26:17 -0700233vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
Jan Karac0b24622018-01-07 16:38:43 -0500234 pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
Souptick Joarderab77dab2018-06-07 17:04:29 -0700235vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
236 enum page_entry_size pe_size, pfn_t pfn);
Jan Karaac401cc2016-05-12 18:29:18 +0200237int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
Jan Karac6dcf522016-08-10 17:22:44 +0200238int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
239 pgoff_t index);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -0800240
241#ifdef CONFIG_FS_DAX
Dan Williamscccbce62017-01-27 13:31:42 -0800242int __dax_zero_page_range(struct block_device *bdev,
243 struct dax_device *dax_dev, sector_t sector,
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200244 unsigned int offset, unsigned int length);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -0800245#else
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200246static inline int __dax_zero_page_range(struct block_device *bdev,
Dan Williamscccbce62017-01-27 13:31:42 -0800247 struct dax_device *dax_dev, sector_t sector,
248 unsigned int offset, unsigned int length)
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200249{
250 return -ENXIO;
251}
Dan Williamsd1a5f2b42016-01-28 20:25:31 -0800252#endif
253
Ross Zwislerf9fe48b2016-01-22 15:10:40 -0800254static inline bool dax_mapping(struct address_space *mapping)
255{
256 return mapping->host && IS_DAX(mapping->host);
257}
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800258
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -0700259#endif