blob: bdd628c8182fded6bdf7b44719594f704e426af0 [file] [log] [blame]
Christoph Hellwigfadccd82019-02-18 09:37:13 +01001/* SPDX-License-Identifier: GPL-2.0 */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05002/*
3 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04004 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05005 */
6
7#ifndef _LINUX_NVME_H
8#define _LINUX_NVME_H
9
Christoph Hellwig28046822022-12-21 10:30:45 +010010#include <linux/bits.h>
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020011#include <linux/types.h>
Christoph Hellwig8e412262017-05-17 09:54:27 +020012#include <linux/uuid.h>
Christoph Hellwigeb793e22016-06-13 16:45:25 +020013
14/* NQN names in commands fields specified one size */
15#define NVMF_NQN_FIELD_LEN 256
16
17/* However the max length of a qualified name is another size */
18#define NVMF_NQN_SIZE 223
19
20#define NVMF_TRSVCID_SIZE 32
21#define NVMF_TRADDR_SIZE 256
22#define NVMF_TSAS_SIZE 256
23
24#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
25
26#define NVME_RDMA_IP_PORT 4420
27
Arnav Dawn62346ea2017-07-12 16:11:53 +053028#define NVME_NSID_ALL 0xffffffff
29
Christoph Hellwigeb793e22016-06-13 16:45:25 +020030enum nvme_subsys_type {
31 NVME_NQN_DISC = 1, /* Discovery type target subsystem */
32 NVME_NQN_NVME = 2, /* NVME type target subsystem */
33};
34
35/* Address Family codes for Discovery Log Page entry ADRFAM field */
36enum {
37 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
38 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
39 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
40 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
41 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
42};
43
44/* Transport Type codes for Discovery Log Page entry TRTYPE field */
45enum {
46 NVMF_TRTYPE_RDMA = 1, /* RDMA */
47 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
Sagi Grimbergfc221d02018-12-03 17:52:14 -080048 NVMF_TRTYPE_TCP = 3, /* TCP/IP */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020049 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
50 NVMF_TRTYPE_MAX,
51};
52
53/* Transport Requirements codes for Discovery Log Page entry TREQ field */
54enum {
Sagi Grimberg9b95d2f2018-11-20 10:34:19 +010055 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
56 NVMF_TREQ_REQUIRED = 1, /* Required */
57 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
Sagi Grimberg0445e1b52018-11-19 14:11:13 -080058#define NVME_TREQ_SECURE_CHANNEL_MASK \
59 (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
Sagi Grimberg9b95d2f2018-11-20 10:34:19 +010060
61 NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020062};
63
64/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
65 * RDMA_QPTYPE field
66 */
67enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080068 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
69 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020070};
71
Hannes Reinecke5b289f22024-06-17 09:27:27 +020072/* RDMA Provider Type codes for Discovery Log Page entry TSAS
73 * RDMA_PRTYPE field
Christoph Hellwigeb793e22016-06-13 16:45:25 +020074 */
75enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080076 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
77 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
78 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
79 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
80 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020081};
82
83/* RDMA Connection Management Service Type codes for Discovery Log Page
84 * entry TSAS RDMA_CMS field
85 */
86enum {
Roland Dreierbf17aa32017-03-01 18:22:01 -080087 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020088};
89
Sagi Grimberg7aa1f422017-06-18 16:15:59 +030090#define NVME_AQ_DEPTH 32
Keith Busch38dabe22017-11-07 15:13:10 -070091#define NVME_NR_AEN_COMMANDS 1
92#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
93
94/*
95 * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
96 * NVM-Express 1.2 specification, section 4.1.2.
97 */
98#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020099
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100100enum {
101 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
102 NVME_REG_VS = 0x0008, /* Version */
103 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800104 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100105 NVME_REG_CC = 0x0014, /* Controller Configuration */
106 NVME_REG_CSTS = 0x001c, /* Controller Status */
107 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
108 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
109 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800110 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600111 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100112 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600113 NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
114 NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
115 NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
116 * Location
117 */
118 NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
119 NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
120 NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
121 NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
122 * Buffer Size
123 */
124 NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
125 * Write Throughput
126 */
Xu Yu97f6ef62017-05-24 16:39:55 +0800127 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500128};
129
Keith Buscha0cadb82012-07-27 13:57:23 -0400130#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400131#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400132#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -0600133#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -0600134#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -0600135#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400136
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600137#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
138#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600139
Christoph Hellwig88de4592017-12-20 14:50:00 +0100140enum {
141 NVME_CMBSZ_SQS = 1 << 0,
142 NVME_CMBSZ_CQS = 1 << 1,
143 NVME_CMBSZ_LISTS = 1 << 2,
144 NVME_CMBSZ_RDS = 1 << 3,
145 NVME_CMBSZ_WDS = 1 << 4,
146
147 NVME_CMBSZ_SZ_SHIFT = 12,
148 NVME_CMBSZ_SZ_MASK = 0xfffff,
149
150 NVME_CMBSZ_SZU_SHIFT = 8,
151 NVME_CMBSZ_SZU_MASK = 0xf,
152};
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600153
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200154/*
155 * Submission and Completion Queue Entry Sizes for the NVM command set.
156 * (In bytes and specified as a power of two (2^n)).
157 */
Benjamin Herrenschmidtc1e0cc72019-08-07 17:51:20 +1000158#define NVME_ADM_SQES 6
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200159#define NVME_NVM_IOSQES 6
160#define NVME_NVM_IOCQES 4
161
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500162enum {
163 NVME_CC_ENABLE = 1 << 0,
164 NVME_CC_CSS_NVM = 0 << 4,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300165 NVME_CC_EN_SHIFT = 0,
166 NVME_CC_CSS_SHIFT = 4,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500167 NVME_CC_MPS_SHIFT = 7,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300168 NVME_CC_AMS_SHIFT = 11,
169 NVME_CC_SHN_SHIFT = 14,
170 NVME_CC_IOSQES_SHIFT = 16,
171 NVME_CC_IOCQES_SHIFT = 20,
Max Gurtovoy60b43f62017-08-13 19:21:07 +0300172 NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
173 NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
174 NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300175 NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
176 NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
177 NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
178 NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
179 NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
180 NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500181 NVME_CSTS_RDY = 1 << 0,
182 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -0600183 NVME_CSTS_NSSRO = 1 << 4,
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530184 NVME_CSTS_PP = 1 << 5,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500185 NVME_CSTS_SHST_NORMAL = 0 << 2,
186 NVME_CSTS_SHST_OCCUR = 1 << 2,
187 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -0600188 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500189};
190
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200191struct nvme_id_power_state {
192 __le16 max_power; /* centiwatts */
193 __u8 rsvd2;
194 __u8 flags;
195 __le32 entry_lat; /* microseconds */
196 __le32 exit_lat; /* microseconds */
197 __u8 read_tput;
198 __u8 read_lat;
199 __u8 write_tput;
200 __u8 write_lat;
201 __le16 idle_power;
202 __u8 idle_scale;
203 __u8 rsvd19;
204 __le16 active_power;
205 __u8 active_work_scale;
206 __u8 rsvd23[9];
207};
208
209enum {
210 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
211 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
212};
213
Sagi Grimberg12b21172018-11-02 10:28:12 -0700214enum nvme_ctrl_attr {
215 NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
Sagi Grimberg6e3ca03e2018-11-02 10:28:15 -0700216 NVME_CTRL_ATTR_TBKAS = (1 << 6),
Sagi Grimberg12b21172018-11-02 10:28:12 -0700217};
218
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200219struct nvme_id_ctrl {
220 __le16 vid;
221 __le16 ssvid;
222 char sn[20];
223 char mn[40];
224 char fr[8];
225 __u8 rab;
226 __u8 ieee[3];
Christoph Hellwiga446c082016-09-30 13:51:06 +0200227 __u8 cmic;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200228 __u8 mdts;
Christoph Hellwig08c69642015-10-02 15:27:16 +0200229 __le16 cntlid;
230 __le32 ver;
Christoph Hellwig14e974a2016-06-06 23:20:43 +0200231 __le32 rtd3r;
232 __le32 rtd3e;
233 __le32 oaes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200234 __le32 ctratt;
Keith Busch49cd84b2018-11-27 09:40:57 -0700235 __u8 rsvd100[28];
236 __le16 crdt1;
237 __le16 crdt2;
238 __le16 crdt3;
239 __u8 rsvd134[122];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200240 __le16 oacs;
241 __u8 acl;
242 __u8 aerl;
243 __u8 frmw;
244 __u8 lpa;
245 __u8 elpe;
246 __u8 npss;
247 __u8 avscc;
248 __u8 apsta;
249 __le16 wctemp;
250 __le16 cctemp;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200251 __le16 mtfa;
252 __le32 hmpre;
253 __le32 hmmin;
254 __u8 tnvmcap[16];
255 __u8 unvmcap[16];
256 __le32 rpmbs;
Guan Junxiong435e8092017-06-13 09:26:15 +0800257 __le16 edstt;
258 __u8 dsto;
259 __u8 fwug;
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200260 __le16 kas;
Guan Junxiong435e8092017-06-13 09:26:15 +0800261 __le16 hctma;
262 __le16 mntmt;
263 __le16 mxtmt;
264 __le32 sanicap;
Christoph Hellwig044a9df2017-09-11 12:09:28 -0400265 __le32 hmminds;
266 __le16 hmmaxd;
Christoph Hellwig1a376212018-05-13 18:53:57 +0200267 __u8 rsvd338[4];
268 __u8 anatt;
269 __u8 anacap;
270 __le32 anagrpmax;
271 __le32 nanagrpid;
272 __u8 rsvd352[160];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200273 __u8 sqes;
274 __u8 cqes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200275 __le16 maxcmd;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200276 __le32 nn;
277 __le16 oncs;
278 __le16 fuses;
279 __u8 fna;
280 __u8 vwc;
281 __le16 awun;
282 __le16 awupf;
283 __u8 nvscc;
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700284 __u8 nwpc;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200285 __le16 acwu;
286 __u8 rsvd534[2];
287 __le32 sgls;
Christoph Hellwig1a376212018-05-13 18:53:57 +0200288 __le32 mnan;
289 __u8 rsvd544[224];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200290 char subnqn[256];
291 __u8 rsvd1024[768];
292 __le32 ioccsz;
293 __le32 iorcsz;
294 __le16 icdoff;
295 __u8 ctrattr;
296 __u8 msdbd;
297 __u8 rsvd1804[244];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200298 struct nvme_id_power_state psd[32];
299 __u8 vs[1024];
300};
301
302enum {
303 NVME_CTRL_ONCS_COMPARE = 1 << 0,
304 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
305 NVME_CTRL_ONCS_DSM = 1 << 2,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800306 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200307 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200308 NVME_CTRL_VWC_PRESENT = 1 << 0,
Scott Bauer8a9ae522017-02-17 13:59:40 +0100309 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
Jens Axboef5d11842017-06-27 12:03:06 -0600310 NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
Changpeng Liu223694b2017-08-31 11:22:49 +0800311 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
Keith Busch84fef622017-11-07 10:28:32 -0700312 NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600313 NVME_CTRL_CTRATT_128_ID = 1 << 0,
314 NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
315 NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
316 NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
317 NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
318 NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
319 NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
320 NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200321};
322
323struct nvme_lbaf {
324 __le16 ms;
325 __u8 ds;
326 __u8 rp;
327};
328
329struct nvme_id_ns {
330 __le64 nsze;
331 __le64 ncap;
332 __le64 nuse;
333 __u8 nsfeat;
334 __u8 nlbaf;
335 __u8 flbas;
336 __u8 mc;
337 __u8 dpc;
338 __u8 dps;
339 __u8 nmic;
340 __u8 rescap;
341 __u8 fpi;
Bart Van Assche6605bdd2019-06-28 09:53:29 -0700342 __u8 dlfeat;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200343 __le16 nawun;
344 __le16 nawupf;
345 __le16 nacwu;
346 __le16 nabsn;
347 __le16 nabo;
348 __le16 nabspf;
Scott Bauer6b8190d2017-06-15 10:44:30 -0600349 __le16 noiob;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200350 __u8 nvmcap[16];
Bart Van Assche6605bdd2019-06-28 09:53:29 -0700351 __le16 npwg;
352 __le16 npwa;
353 __le16 npdg;
354 __le16 npda;
355 __le16 nows;
356 __u8 rsvd74[18];
Christoph Hellwig1a376212018-05-13 18:53:57 +0200357 __le32 anagrpid;
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700358 __u8 rsvd96[3];
359 __u8 nsattr;
Bart Van Assche6605bdd2019-06-28 09:53:29 -0700360 __le16 nvmsetid;
361 __le16 endgid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200362 __u8 nguid[16];
363 __u8 eui64[8];
364 struct nvme_lbaf lbaf[16];
365 __u8 rsvd192[192];
366 __u8 vs[3712];
367};
368
369enum {
Christoph Hellwig329dd762016-09-30 13:51:08 +0200370 NVME_ID_CNS_NS = 0x00,
371 NVME_ID_CNS_CTRL = 0x01,
372 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200373 NVME_ID_CNS_NS_DESC_LIST = 0x03,
Christoph Hellwig329dd762016-09-30 13:51:08 +0200374 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
375 NVME_ID_CNS_NS_PRESENT = 0x11,
376 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
377 NVME_ID_CNS_CTRL_LIST = 0x13,
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600378 NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
379 NVME_ID_CNS_NS_GRANULARITY = 0x16,
380 NVME_ID_CNS_UUID_LIST = 0x17,
Christoph Hellwig329dd762016-09-30 13:51:08 +0200381};
382
383enum {
Jens Axboef5d11842017-06-27 12:03:06 -0600384 NVME_DIR_IDENTIFY = 0x00,
385 NVME_DIR_STREAMS = 0x01,
386 NVME_DIR_SND_ID_OP_ENABLE = 0x01,
387 NVME_DIR_SND_ST_OP_REL_ID = 0x01,
388 NVME_DIR_SND_ST_OP_REL_RSC = 0x02,
389 NVME_DIR_RCV_ID_OP_PARAM = 0x01,
390 NVME_DIR_RCV_ST_OP_PARAM = 0x01,
391 NVME_DIR_RCV_ST_OP_STATUS = 0x02,
392 NVME_DIR_RCV_ST_OP_RESOURCE = 0x03,
393 NVME_DIR_ENDIR = 0x01,
394};
395
396enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200397 NVME_NS_FEAT_THIN = 1 << 0,
398 NVME_NS_FLBAS_LBA_MASK = 0xf,
399 NVME_NS_FLBAS_META_EXT = 0x10,
400 NVME_LBAF_RP_BEST = 0,
401 NVME_LBAF_RP_BETTER = 1,
402 NVME_LBAF_RP_GOOD = 2,
403 NVME_LBAF_RP_DEGRADED = 3,
404 NVME_NS_DPC_PI_LAST = 1 << 4,
405 NVME_NS_DPC_PI_FIRST = 1 << 3,
406 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
407 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
408 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
409 NVME_NS_DPS_PI_FIRST = 1 << 3,
410 NVME_NS_DPS_PI_MASK = 0x7,
411 NVME_NS_DPS_PI_TYPE1 = 1,
412 NVME_NS_DPS_PI_TYPE2 = 2,
413 NVME_NS_DPS_PI_TYPE3 = 3,
414};
415
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200416struct nvme_ns_id_desc {
417 __u8 nidt;
418 __u8 nidl;
419 __le16 reserved;
420};
421
422#define NVME_NIDT_EUI64_LEN 8
423#define NVME_NIDT_NGUID_LEN 16
424#define NVME_NIDT_UUID_LEN 16
425
426enum {
427 NVME_NIDT_EUI64 = 0x01,
428 NVME_NIDT_NGUID = 0x02,
429 NVME_NIDT_UUID = 0x03,
430};
431
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200432struct nvme_smart_log {
433 __u8 critical_warning;
434 __u8 temperature[2];
435 __u8 avail_spare;
436 __u8 spare_thresh;
437 __u8 percent_used;
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600438 __u8 endu_grp_crit_warn_sumry;
439 __u8 rsvd7[25];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200440 __u8 data_units_read[16];
441 __u8 data_units_written[16];
442 __u8 host_reads[16];
443 __u8 host_writes[16];
444 __u8 ctrl_busy_time[16];
445 __u8 power_cycles[16];
446 __u8 power_on_hours[16];
447 __u8 unsafe_shutdowns[16];
448 __u8 media_errors[16];
449 __u8 num_err_log_entries[16];
450 __le32 warning_temp_time;
451 __le32 critical_comp_time;
452 __le16 temp_sensor[8];
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600453 __le32 thm_temp1_trans_count;
454 __le32 thm_temp2_trans_count;
455 __le32 thm_temp1_total_time;
456 __le32 thm_temp2_total_time;
457 __u8 rsvd232[280];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200458};
459
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530460struct nvme_fw_slot_info_log {
461 __u8 afi;
462 __u8 rsvd1[7];
463 __le64 frs[7];
464 __u8 rsvd64[448];
465};
466
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200467enum {
Keith Busch84fef622017-11-07 10:28:32 -0700468 NVME_CMD_EFFECTS_CSUPP = 1 << 0,
469 NVME_CMD_EFFECTS_LBCC = 1 << 1,
470 NVME_CMD_EFFECTS_NCC = 1 << 2,
471 NVME_CMD_EFFECTS_NIC = 1 << 3,
472 NVME_CMD_EFFECTS_CCC = 1 << 4,
Christoph Hellwig28046822022-12-21 10:30:45 +0100473 NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600474 NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
Keith Busch84fef622017-11-07 10:28:32 -0700475};
476
477struct nvme_effects_log {
478 __le32 acs[256];
479 __le32 iocs[256];
480 __u8 resv[2048];
481};
482
Christoph Hellwig1a376212018-05-13 18:53:57 +0200483enum nvme_ana_state {
484 NVME_ANA_OPTIMIZED = 0x01,
485 NVME_ANA_NONOPTIMIZED = 0x02,
486 NVME_ANA_INACCESSIBLE = 0x03,
487 NVME_ANA_PERSISTENT_LOSS = 0x04,
488 NVME_ANA_CHANGE = 0x0f,
489};
490
491struct nvme_ana_group_desc {
492 __le32 grpid;
493 __le32 nnsids;
494 __le64 chgcnt;
495 __u8 state;
Hannes Reinecke8b92d0e2018-08-08 08:35:29 +0200496 __u8 rsvd17[15];
Christoph Hellwig1a376212018-05-13 18:53:57 +0200497 __le32 nsids[];
498};
499
500/* flag for the log specific field of the ANA log */
501#define NVME_ANA_LOG_RGO (1 << 0)
502
503struct nvme_ana_rsp_hdr {
504 __le64 chgcnt;
505 __le16 ngrps;
506 __le16 rsvd10[3];
507};
508
Keith Busch84fef622017-11-07 10:28:32 -0700509enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200510 NVME_SMART_CRIT_SPARE = 1 << 0,
511 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
512 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
513 NVME_SMART_CRIT_MEDIA = 1 << 3,
514 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
515};
516
517enum {
Keith Busche3d78742017-11-07 15:13:14 -0700518 NVME_AER_ERROR = 0,
519 NVME_AER_SMART = 1,
Christoph Hellwig868c2392018-05-22 11:09:54 +0200520 NVME_AER_NOTICE = 2,
Keith Busche3d78742017-11-07 15:13:14 -0700521 NVME_AER_CSS = 6,
522 NVME_AER_VS = 7,
Christoph Hellwig868c2392018-05-22 11:09:54 +0200523};
524
525enum {
Michael Kelleyeaaa0c62022-06-08 11:52:21 -0700526 NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
527};
528
529enum {
Christoph Hellwig868c2392018-05-22 11:09:54 +0200530 NVME_AER_NOTICE_NS_CHANGED = 0x00,
531 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
Christoph Hellwig1a376212018-05-13 18:53:57 +0200532 NVME_AER_NOTICE_ANA = 0x03,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800533 NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200534};
535
Hannes Reineckeaafd3af2018-05-25 17:34:00 +0200536enum {
Jay Sternberg7114dde2018-11-12 13:56:34 -0800537 NVME_AEN_BIT_NS_ATTR = 8,
538 NVME_AEN_BIT_FW_ACT = 9,
539 NVME_AEN_BIT_ANA_CHANGE = 11,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800540 NVME_AEN_BIT_DISC_CHANGE = 31,
Jay Sternberg7114dde2018-11-12 13:56:34 -0800541};
542
543enum {
544 NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
545 NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
546 NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800547 NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
Hannes Reineckeaafd3af2018-05-25 17:34:00 +0200548};
549
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200550struct nvme_lba_range_type {
551 __u8 type;
552 __u8 attributes;
553 __u8 rsvd2[14];
554 __u64 slba;
555 __u64 nlb;
556 __u8 guid[16];
557 __u8 rsvd48[16];
558};
559
560enum {
561 NVME_LBART_TYPE_FS = 0x01,
562 NVME_LBART_TYPE_RAID = 0x02,
563 NVME_LBART_TYPE_CACHE = 0x03,
564 NVME_LBART_TYPE_SWAP = 0x04,
565
566 NVME_LBART_ATTRIB_TEMP = 1 << 0,
567 NVME_LBART_ATTRIB_HIDE = 1 << 1,
568};
569
570struct nvme_reservation_status {
571 __le32 gen;
572 __u8 rtype;
573 __u8 regctl[2];
574 __u8 resv5[2];
575 __u8 ptpls;
576 __u8 resv10[13];
577 struct {
578 __le16 cntlid;
579 __u8 rcsts;
580 __u8 resv3[5];
581 __le64 hostid;
582 __le64 rkey;
583 } regctl_ds[];
584};
585
Christoph Hellwig79f370e2016-06-06 23:20:46 +0200586enum nvme_async_event_type {
587 NVME_AER_TYPE_ERROR = 0,
588 NVME_AER_TYPE_SMART = 1,
589 NVME_AER_TYPE_NOTICE = 2,
590};
591
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200592/* I/O commands */
593
594enum nvme_opcode {
595 nvme_cmd_flush = 0x00,
596 nvme_cmd_write = 0x01,
597 nvme_cmd_read = 0x02,
598 nvme_cmd_write_uncor = 0x04,
599 nvme_cmd_compare = 0x05,
600 nvme_cmd_write_zeroes = 0x08,
601 nvme_cmd_dsm = 0x09,
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600602 nvme_cmd_verify = 0x0c,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200603 nvme_cmd_resv_register = 0x0d,
604 nvme_cmd_resv_report = 0x0e,
605 nvme_cmd_resv_acquire = 0x11,
606 nvme_cmd_resv_release = 0x15,
607};
608
Minwoo Im26f29902019-06-12 21:45:30 +0900609#define nvme_opcode_name(opcode) { opcode, #opcode }
610#define show_nvm_opcode_name(val) \
611 __print_symbolic(val, \
612 nvme_opcode_name(nvme_cmd_flush), \
613 nvme_opcode_name(nvme_cmd_write), \
614 nvme_opcode_name(nvme_cmd_read), \
615 nvme_opcode_name(nvme_cmd_write_uncor), \
616 nvme_opcode_name(nvme_cmd_compare), \
617 nvme_opcode_name(nvme_cmd_write_zeroes), \
618 nvme_opcode_name(nvme_cmd_dsm), \
619 nvme_opcode_name(nvme_cmd_resv_register), \
620 nvme_opcode_name(nvme_cmd_resv_report), \
621 nvme_opcode_name(nvme_cmd_resv_acquire), \
622 nvme_opcode_name(nvme_cmd_resv_release))
623
624
James Smart3972be22016-06-06 23:20:47 +0200625/*
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200626 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
627 *
628 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
629 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
James Smartd85cf202017-09-07 13:20:23 -0700630 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200631 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
632 * request subtype
633 */
634enum {
635 NVME_SGL_FMT_ADDRESS = 0x00,
636 NVME_SGL_FMT_OFFSET = 0x01,
James Smartd85cf202017-09-07 13:20:23 -0700637 NVME_SGL_FMT_TRANSPORT_A = 0x0A,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200638 NVME_SGL_FMT_INVALIDATE = 0x0f,
639};
640
641/*
642 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
643 *
644 * For struct nvme_sgl_desc:
645 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
646 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
647 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
648 *
649 * For struct nvme_keyed_sgl_desc:
650 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
James Smartd85cf202017-09-07 13:20:23 -0700651 *
652 * Transport-specific SGL types:
653 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200654 */
655enum {
656 NVME_SGL_FMT_DATA_DESC = 0x00,
657 NVME_SGL_FMT_SEG_DESC = 0x02,
658 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
659 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
James Smartd85cf202017-09-07 13:20:23 -0700660 NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200661};
662
663struct nvme_sgl_desc {
664 __le64 addr;
665 __le32 length;
666 __u8 rsvd[3];
667 __u8 type;
668};
669
670struct nvme_keyed_sgl_desc {
671 __le64 addr;
672 __u8 length[3];
673 __u8 key[4];
674 __u8 type;
675};
676
677union nvme_data_ptr {
678 struct {
679 __le64 prp1;
680 __le64 prp2;
681 };
682 struct nvme_sgl_desc sgl;
683 struct nvme_keyed_sgl_desc ksgl;
684};
685
686/*
James Smart3972be22016-06-06 23:20:47 +0200687 * Lowest two bits of our flags field (FUSE field in the spec):
688 *
689 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
690 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
691 *
692 * Highest two bits in our flags field (PSDT field in the spec):
693 *
694 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
695 * If used, MPTR contains addr of single physical buffer (byte aligned).
696 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
697 * If used, MPTR contains an address of an SGL segment containing
698 * exactly 1 SGL descriptor (qword aligned).
699 */
700enum {
701 NVME_CMD_FUSE_FIRST = (1 << 0),
702 NVME_CMD_FUSE_SECOND = (1 << 1),
703
704 NVME_CMD_SGL_METABUF = (1 << 6),
705 NVME_CMD_SGL_METASEG = (1 << 7),
706 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
707};
708
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200709struct nvme_common_command {
710 __u8 opcode;
711 __u8 flags;
712 __u16 command_id;
713 __le32 nsid;
714 __le32 cdw2[2];
715 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200716 union nvme_data_ptr dptr;
Chaitanya Kulkarnib7c8f362018-12-12 15:11:37 -0800717 __le32 cdw10;
718 __le32 cdw11;
719 __le32 cdw12;
720 __le32 cdw13;
721 __le32 cdw14;
722 __le32 cdw15;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200723};
724
725struct nvme_rw_command {
726 __u8 opcode;
727 __u8 flags;
728 __u16 command_id;
729 __le32 nsid;
730 __u64 rsvd2;
731 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200732 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200733 __le64 slba;
734 __le16 length;
735 __le16 control;
736 __le32 dsmgmt;
737 __le32 reftag;
738 __le16 apptag;
739 __le16 appmask;
740};
741
742enum {
743 NVME_RW_LR = 1 << 15,
744 NVME_RW_FUA = 1 << 14,
745 NVME_RW_DSM_FREQ_UNSPEC = 0,
746 NVME_RW_DSM_FREQ_TYPICAL = 1,
747 NVME_RW_DSM_FREQ_RARE = 2,
748 NVME_RW_DSM_FREQ_READS = 3,
749 NVME_RW_DSM_FREQ_WRITES = 4,
750 NVME_RW_DSM_FREQ_RW = 5,
751 NVME_RW_DSM_FREQ_ONCE = 6,
752 NVME_RW_DSM_FREQ_PREFETCH = 7,
753 NVME_RW_DSM_FREQ_TEMP = 8,
754 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
755 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
756 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
757 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
758 NVME_RW_DSM_SEQ_REQ = 1 << 6,
759 NVME_RW_DSM_COMPRESSED = 1 << 7,
760 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
761 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
762 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
763 NVME_RW_PRINFO_PRACT = 1 << 13,
Jens Axboef5d11842017-06-27 12:03:06 -0600764 NVME_RW_DTYPE_STREAMS = 1 << 4,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200765};
766
767struct nvme_dsm_cmd {
768 __u8 opcode;
769 __u8 flags;
770 __u16 command_id;
771 __le32 nsid;
772 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200773 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200774 __le32 nr;
775 __le32 attributes;
776 __u32 rsvd12[4];
777};
778
779enum {
780 NVME_DSMGMT_IDR = 1 << 0,
781 NVME_DSMGMT_IDW = 1 << 1,
782 NVME_DSMGMT_AD = 1 << 2,
783};
784
Christoph Hellwigb35ba012017-02-08 14:46:50 +0100785#define NVME_DSM_MAX_RANGES 256
786
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200787struct nvme_dsm_range {
788 __le32 cattr;
789 __le32 nlb;
790 __le64 slba;
791};
792
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800793struct nvme_write_zeroes_cmd {
794 __u8 opcode;
795 __u8 flags;
796 __u16 command_id;
797 __le32 nsid;
798 __u64 rsvd2;
799 __le64 metadata;
800 union nvme_data_ptr dptr;
801 __le64 slba;
802 __le16 length;
803 __le16 control;
804 __le32 dsmgmt;
805 __le32 reftag;
806 __le16 apptag;
807 __le16 appmask;
808};
809
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800810/* Features */
811
812struct nvme_feat_auto_pst {
813 __le64 entries[32];
814};
815
Christoph Hellwig39673e12017-01-09 15:36:28 +0100816enum {
817 NVME_HOST_MEM_ENABLE = (1 << 0),
818 NVME_HOST_MEM_RETURN = (1 << 1),
819};
820
Keith Busch49cd84b2018-11-27 09:40:57 -0700821struct nvme_feat_host_behavior {
822 __u8 acre;
823 __u8 resv1[511];
824};
825
826enum {
827 NVME_ENABLE_ACRE = 1,
828};
829
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200830/* Admin commands */
831
832enum nvme_admin_opcode {
833 nvme_admin_delete_sq = 0x00,
834 nvme_admin_create_sq = 0x01,
835 nvme_admin_get_log_page = 0x02,
836 nvme_admin_delete_cq = 0x04,
837 nvme_admin_create_cq = 0x05,
838 nvme_admin_identify = 0x06,
839 nvme_admin_abort_cmd = 0x08,
840 nvme_admin_set_features = 0x09,
841 nvme_admin_get_features = 0x0a,
842 nvme_admin_async_event = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200843 nvme_admin_ns_mgmt = 0x0d,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200844 nvme_admin_activate_fw = 0x10,
845 nvme_admin_download_fw = 0x11,
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600846 nvme_admin_dev_self_test = 0x14,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200847 nvme_admin_ns_attach = 0x15,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200848 nvme_admin_keep_alive = 0x18,
Jens Axboef5d11842017-06-27 12:03:06 -0600849 nvme_admin_directive_send = 0x19,
850 nvme_admin_directive_recv = 0x1a,
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600851 nvme_admin_virtual_mgmt = 0x1c,
852 nvme_admin_nvme_mi_send = 0x1d,
853 nvme_admin_nvme_mi_recv = 0x1e,
Helen Koikef9f38e32017-04-10 12:51:07 -0300854 nvme_admin_dbbuf = 0x7C,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200855 nvme_admin_format_nvm = 0x80,
856 nvme_admin_security_send = 0x81,
857 nvme_admin_security_recv = 0x82,
Keith Busch84fef622017-11-07 10:28:32 -0700858 nvme_admin_sanitize_nvm = 0x84,
Minwoo Imc6389842019-08-04 16:50:47 +0900859 nvme_admin_get_lba_status = 0x86,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200860};
861
Minwoo Im26f29902019-06-12 21:45:30 +0900862#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
863#define show_admin_opcode_name(val) \
864 __print_symbolic(val, \
865 nvme_admin_opcode_name(nvme_admin_delete_sq), \
866 nvme_admin_opcode_name(nvme_admin_create_sq), \
867 nvme_admin_opcode_name(nvme_admin_get_log_page), \
868 nvme_admin_opcode_name(nvme_admin_delete_cq), \
869 nvme_admin_opcode_name(nvme_admin_create_cq), \
870 nvme_admin_opcode_name(nvme_admin_identify), \
871 nvme_admin_opcode_name(nvme_admin_abort_cmd), \
872 nvme_admin_opcode_name(nvme_admin_set_features), \
873 nvme_admin_opcode_name(nvme_admin_get_features), \
874 nvme_admin_opcode_name(nvme_admin_async_event), \
875 nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
876 nvme_admin_opcode_name(nvme_admin_activate_fw), \
877 nvme_admin_opcode_name(nvme_admin_download_fw), \
878 nvme_admin_opcode_name(nvme_admin_ns_attach), \
879 nvme_admin_opcode_name(nvme_admin_keep_alive), \
880 nvme_admin_opcode_name(nvme_admin_directive_send), \
881 nvme_admin_opcode_name(nvme_admin_directive_recv), \
882 nvme_admin_opcode_name(nvme_admin_dbbuf), \
883 nvme_admin_opcode_name(nvme_admin_format_nvm), \
884 nvme_admin_opcode_name(nvme_admin_security_send), \
885 nvme_admin_opcode_name(nvme_admin_security_recv), \
Minwoo Ima5ef7572019-08-04 16:50:48 +0900886 nvme_admin_opcode_name(nvme_admin_sanitize_nvm), \
887 nvme_admin_opcode_name(nvme_admin_get_lba_status))
Minwoo Im26f29902019-06-12 21:45:30 +0900888
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200889enum {
890 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
891 NVME_CQ_IRQ_ENABLED = (1 << 1),
892 NVME_SQ_PRIO_URGENT = (0 << 1),
893 NVME_SQ_PRIO_HIGH = (1 << 1),
894 NVME_SQ_PRIO_MEDIUM = (2 << 1),
895 NVME_SQ_PRIO_LOW = (3 << 1),
896 NVME_FEAT_ARBITRATION = 0x01,
897 NVME_FEAT_POWER_MGMT = 0x02,
898 NVME_FEAT_LBA_RANGE = 0x03,
899 NVME_FEAT_TEMP_THRESH = 0x04,
900 NVME_FEAT_ERR_RECOVERY = 0x05,
901 NVME_FEAT_VOLATILE_WC = 0x06,
902 NVME_FEAT_NUM_QUEUES = 0x07,
903 NVME_FEAT_IRQ_COALESCE = 0x08,
904 NVME_FEAT_IRQ_CONFIG = 0x09,
905 NVME_FEAT_WRITE_ATOMIC = 0x0a,
906 NVME_FEAT_ASYNC_EVENT = 0x0b,
907 NVME_FEAT_AUTO_PST = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200908 NVME_FEAT_HOST_MEM_BUF = 0x0d,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200909 NVME_FEAT_TIMESTAMP = 0x0e,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200910 NVME_FEAT_KATO = 0x0f,
Revanth Rajashekar40c6f9c2018-06-15 12:39:27 -0600911 NVME_FEAT_HCTM = 0x10,
912 NVME_FEAT_NOPSC = 0x11,
913 NVME_FEAT_RRL = 0x12,
914 NVME_FEAT_PLM_CONFIG = 0x13,
915 NVME_FEAT_PLM_WINDOW = 0x14,
Keith Busch49cd84b2018-11-27 09:40:57 -0700916 NVME_FEAT_HOST_BEHAVIOR = 0x16,
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600917 NVME_FEAT_SANITIZE = 0x17,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200918 NVME_FEAT_SW_PROGRESS = 0x80,
919 NVME_FEAT_HOST_ID = 0x81,
920 NVME_FEAT_RESV_MASK = 0x82,
921 NVME_FEAT_RESV_PERSIST = 0x83,
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700922 NVME_FEAT_WRITE_PROTECT = 0x84,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200923 NVME_LOG_ERROR = 0x01,
924 NVME_LOG_SMART = 0x02,
925 NVME_LOG_FW_SLOT = 0x03,
Christoph Hellwigb3984e02018-05-25 17:18:33 +0200926 NVME_LOG_CHANGED_NS = 0x04,
Keith Busch84fef622017-11-07 10:28:32 -0700927 NVME_LOG_CMD_EFFECTS = 0x05,
Revanth Rajashekar24561f52019-10-14 11:16:07 -0600928 NVME_LOG_DEVICE_SELF_TEST = 0x06,
929 NVME_LOG_TELEMETRY_HOST = 0x07,
930 NVME_LOG_TELEMETRY_CTRL = 0x08,
931 NVME_LOG_ENDURANCE_GROUP = 0x09,
Christoph Hellwig1a376212018-05-13 18:53:57 +0200932 NVME_LOG_ANA = 0x0c,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200933 NVME_LOG_DISC = 0x70,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200934 NVME_LOG_RESERVATION = 0x80,
935 NVME_FWACT_REPL = (0 << 3),
936 NVME_FWACT_REPL_ACTV = (1 << 3),
937 NVME_FWACT_ACTV = (2 << 3),
938};
939
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700940/* NVMe Namespace Write Protect State */
941enum {
942 NVME_NS_NO_WRITE_PROTECT = 0,
943 NVME_NS_WRITE_PROTECT,
944 NVME_NS_WRITE_PROTECT_POWER_CYCLE,
945 NVME_NS_WRITE_PROTECT_PERMANENT,
946};
947
Christoph Hellwigb3984e02018-05-25 17:18:33 +0200948#define NVME_MAX_CHANGED_NAMESPACES 1024
949
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200950struct nvme_identify {
951 __u8 opcode;
952 __u8 flags;
953 __u16 command_id;
954 __le32 nsid;
955 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200956 union nvme_data_ptr dptr;
Parav Pandit986994a2017-01-26 17:17:28 +0200957 __u8 cns;
958 __u8 rsvd3;
959 __le16 ctrlid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200960 __u32 rsvd11[5];
961};
962
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200963#define NVME_IDENTIFY_DATA_SIZE 4096
964
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200965struct nvme_features {
966 __u8 opcode;
967 __u8 flags;
968 __u16 command_id;
969 __le32 nsid;
970 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200971 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200972 __le32 fid;
973 __le32 dword11;
Arnav Dawnb85cf732017-05-12 17:12:03 +0200974 __le32 dword12;
975 __le32 dword13;
976 __le32 dword14;
977 __le32 dword15;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200978};
979
Christoph Hellwig39673e12017-01-09 15:36:28 +0100980struct nvme_host_mem_buf_desc {
981 __le64 addr;
982 __le32 size;
983 __u32 rsvd;
984};
985
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200986struct nvme_create_cq {
987 __u8 opcode;
988 __u8 flags;
989 __u16 command_id;
990 __u32 rsvd1[5];
991 __le64 prp1;
992 __u64 rsvd8;
993 __le16 cqid;
994 __le16 qsize;
995 __le16 cq_flags;
996 __le16 irq_vector;
997 __u32 rsvd12[4];
998};
999
1000struct nvme_create_sq {
1001 __u8 opcode;
1002 __u8 flags;
1003 __u16 command_id;
1004 __u32 rsvd1[5];
1005 __le64 prp1;
1006 __u64 rsvd8;
1007 __le16 sqid;
1008 __le16 qsize;
1009 __le16 sq_flags;
1010 __le16 cqid;
1011 __u32 rsvd12[4];
1012};
1013
1014struct nvme_delete_queue {
1015 __u8 opcode;
1016 __u8 flags;
1017 __u16 command_id;
1018 __u32 rsvd1[9];
1019 __le16 qid;
1020 __u16 rsvd10;
1021 __u32 rsvd11[5];
1022};
1023
1024struct nvme_abort_cmd {
1025 __u8 opcode;
1026 __u8 flags;
1027 __u16 command_id;
1028 __u32 rsvd1[9];
1029 __le16 sqid;
1030 __u16 cid;
1031 __u32 rsvd11[5];
1032};
1033
1034struct nvme_download_firmware {
1035 __u8 opcode;
1036 __u8 flags;
1037 __u16 command_id;
1038 __u32 rsvd1[5];
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001039 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001040 __le32 numd;
1041 __le32 offset;
1042 __u32 rsvd12[4];
1043};
1044
1045struct nvme_format_cmd {
1046 __u8 opcode;
1047 __u8 flags;
1048 __u16 command_id;
1049 __le32 nsid;
1050 __u64 rsvd2[4];
1051 __le32 cdw10;
1052 __u32 rsvd11[5];
1053};
1054
Armen Baloyan725b3582016-06-06 23:20:44 +02001055struct nvme_get_log_page_command {
1056 __u8 opcode;
1057 __u8 flags;
1058 __u16 command_id;
1059 __le32 nsid;
1060 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001061 union nvme_data_ptr dptr;
Armen Baloyan725b3582016-06-06 23:20:44 +02001062 __u8 lid;
Christoph Hellwig9b89bc32018-05-12 18:18:12 +02001063 __u8 lsp; /* upper 4 bits reserved */
Armen Baloyan725b3582016-06-06 23:20:44 +02001064 __le16 numdl;
1065 __le16 numdu;
1066 __u16 rsvd11;
Keith Buschd808b7f2019-04-09 10:03:59 -06001067 union {
1068 struct {
1069 __le32 lpol;
1070 __le32 lpou;
1071 };
1072 __le64 lpo;
1073 };
Armen Baloyan725b3582016-06-06 23:20:44 +02001074 __u32 rsvd14[2];
1075};
1076
Jens Axboef5d11842017-06-27 12:03:06 -06001077struct nvme_directive_cmd {
1078 __u8 opcode;
1079 __u8 flags;
1080 __u16 command_id;
1081 __le32 nsid;
1082 __u64 rsvd2[2];
1083 union nvme_data_ptr dptr;
1084 __le32 numd;
1085 __u8 doper;
1086 __u8 dtype;
1087 __le16 dspec;
1088 __u8 endir;
1089 __u8 tdtype;
1090 __u16 rsvd15;
1091
1092 __u32 rsvd16[3];
1093};
1094
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001095/*
1096 * Fabrics subcommands.
1097 */
1098enum nvmf_fabrics_opcode {
1099 nvme_fabrics_command = 0x7f,
1100};
1101
1102enum nvmf_capsule_command {
1103 nvme_fabrics_type_property_set = 0x00,
1104 nvme_fabrics_type_connect = 0x01,
1105 nvme_fabrics_type_property_get = 0x04,
1106};
1107
Minwoo Imad795e42019-06-12 21:45:31 +09001108#define nvme_fabrics_type_name(type) { type, #type }
1109#define show_fabrics_type_name(type) \
1110 __print_symbolic(type, \
1111 nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
1112 nvme_fabrics_type_name(nvme_fabrics_type_connect), \
1113 nvme_fabrics_type_name(nvme_fabrics_type_property_get))
1114
1115/*
1116 * If not fabrics command, fctype will be ignored.
1117 */
1118#define show_opcode_name(qid, opcode, fctype) \
1119 ((opcode) == nvme_fabrics_command ? \
1120 show_fabrics_type_name(fctype) : \
1121 ((qid) ? \
1122 show_nvm_opcode_name(opcode) : \
1123 show_admin_opcode_name(opcode)))
1124
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001125struct nvmf_common_command {
1126 __u8 opcode;
1127 __u8 resv1;
1128 __u16 command_id;
1129 __u8 fctype;
1130 __u8 resv2[35];
1131 __u8 ts[24];
1132};
1133
1134/*
1135 * The legal cntlid range a NVMe Target will provide.
1136 * Note that cntlid of value 0 is considered illegal in the fabrics world.
1137 * Devices based on earlier specs did not have the subsystem concept;
1138 * therefore, those devices had their cntlid value set to 0 as a result.
1139 */
1140#define NVME_CNTLID_MIN 1
1141#define NVME_CNTLID_MAX 0xffef
1142#define NVME_CNTLID_DYNAMIC 0xffff
1143
1144#define MAX_DISC_LOGS 255
1145
1146/* Discovery log page entry */
1147struct nvmf_disc_rsp_page_entry {
1148 __u8 trtype;
1149 __u8 adrfam;
Christoph Hellwiga446c082016-09-30 13:51:06 +02001150 __u8 subtype;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001151 __u8 treq;
1152 __le16 portid;
1153 __le16 cntlid;
1154 __le16 asqsz;
1155 __u8 resv8[22];
1156 char trsvcid[NVMF_TRSVCID_SIZE];
1157 __u8 resv64[192];
1158 char subnqn[NVMF_NQN_FIELD_LEN];
1159 char traddr[NVMF_TRADDR_SIZE];
1160 union tsas {
1161 char common[NVMF_TSAS_SIZE];
1162 struct rdma {
1163 __u8 qptype;
1164 __u8 prtype;
1165 __u8 cms;
1166 __u8 resv3[5];
1167 __u16 pkey;
1168 __u8 resv10[246];
1169 } rdma;
1170 } tsas;
1171};
1172
1173/* Discovery log page header */
1174struct nvmf_disc_rsp_page_hdr {
1175 __le64 genctr;
1176 __le64 numrec;
1177 __le16 recfmt;
1178 __u8 resv14[1006];
1179 struct nvmf_disc_rsp_page_entry entries[0];
1180};
1181
Sagi Grimberge6a622f2018-11-19 14:11:12 -08001182enum {
1183 NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
1184};
1185
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001186struct nvmf_connect_command {
1187 __u8 opcode;
1188 __u8 resv1;
1189 __u16 command_id;
1190 __u8 fctype;
1191 __u8 resv2[19];
1192 union nvme_data_ptr dptr;
1193 __le16 recfmt;
1194 __le16 qid;
1195 __le16 sqsize;
1196 __u8 cattr;
1197 __u8 resv3;
1198 __le32 kato;
1199 __u8 resv4[12];
1200};
1201
1202struct nvmf_connect_data {
Christoph Hellwig8e412262017-05-17 09:54:27 +02001203 uuid_t hostid;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001204 __le16 cntlid;
1205 char resv4[238];
1206 char subsysnqn[NVMF_NQN_FIELD_LEN];
1207 char hostnqn[NVMF_NQN_FIELD_LEN];
1208 char resv5[256];
1209};
1210
1211struct nvmf_property_set_command {
1212 __u8 opcode;
1213 __u8 resv1;
1214 __u16 command_id;
1215 __u8 fctype;
1216 __u8 resv2[35];
1217 __u8 attrib;
1218 __u8 resv3[3];
1219 __le32 offset;
1220 __le64 value;
1221 __u8 resv4[8];
1222};
1223
1224struct nvmf_property_get_command {
1225 __u8 opcode;
1226 __u8 resv1;
1227 __u16 command_id;
1228 __u8 fctype;
1229 __u8 resv2[35];
1230 __u8 attrib;
1231 __u8 resv3[3];
1232 __le32 offset;
1233 __u8 resv4[16];
1234};
1235
Helen Koikef9f38e32017-04-10 12:51:07 -03001236struct nvme_dbbuf {
1237 __u8 opcode;
1238 __u8 flags;
1239 __u16 command_id;
1240 __u32 rsvd1[5];
1241 __le64 prp1;
1242 __le64 prp2;
1243 __u32 rsvd12[6];
1244};
1245
Jens Axboef5d11842017-06-27 12:03:06 -06001246struct streams_directive_params {
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +02001247 __le16 msl;
1248 __le16 nssa;
1249 __le16 nsso;
Jens Axboef5d11842017-06-27 12:03:06 -06001250 __u8 rsvd[10];
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +02001251 __le32 sws;
1252 __le16 sgs;
1253 __le16 nsa;
1254 __le16 nso;
Jens Axboef5d11842017-06-27 12:03:06 -06001255 __u8 rsvd2[6];
1256};
1257
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001258struct nvme_command {
1259 union {
1260 struct nvme_common_command common;
1261 struct nvme_rw_command rw;
1262 struct nvme_identify identify;
1263 struct nvme_features features;
1264 struct nvme_create_cq create_cq;
1265 struct nvme_create_sq create_sq;
1266 struct nvme_delete_queue delete_queue;
1267 struct nvme_download_firmware dlfw;
1268 struct nvme_format_cmd format;
1269 struct nvme_dsm_cmd dsm;
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001270 struct nvme_write_zeroes_cmd write_zeroes;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001271 struct nvme_abort_cmd abort;
Armen Baloyan725b3582016-06-06 23:20:44 +02001272 struct nvme_get_log_page_command get_log_page;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001273 struct nvmf_common_command fabrics;
1274 struct nvmf_connect_command connect;
1275 struct nvmf_property_set_command prop_set;
1276 struct nvmf_property_get_command prop_get;
Helen Koikef9f38e32017-04-10 12:51:07 -03001277 struct nvme_dbbuf dbbuf;
Jens Axboef5d11842017-06-27 12:03:06 -06001278 struct nvme_directive_cmd directive;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001279 };
1280};
1281
Minwoo Im7a1f46e2019-06-06 14:30:14 +09001282static inline bool nvme_is_fabrics(struct nvme_command *cmd)
1283{
1284 return cmd->common.opcode == nvme_fabrics_command;
1285}
1286
Chaitanya Kulkarnib34de7c2018-12-12 15:11:38 -08001287struct nvme_error_slot {
1288 __le64 error_count;
1289 __le16 sqid;
1290 __le16 cmdid;
1291 __le16 status_field;
1292 __le16 param_error_location;
1293 __le64 lba;
1294 __le32 nsid;
1295 __u8 vs;
1296 __u8 resv[3];
1297 __le64 cs;
1298 __u8 resv2[24];
1299};
1300
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001301static inline bool nvme_is_write(struct nvme_command *cmd)
1302{
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001303 /*
1304 * What a mess...
1305 *
1306 * Why can't we simply have a Fabrics In and Fabrics out command?
1307 */
Minwoo Im7a1f46e2019-06-06 14:30:14 +09001308 if (unlikely(nvme_is_fabrics(cmd)))
Jon Derrick2fd41672017-07-12 10:58:19 -06001309 return cmd->fabrics.fctype & 1;
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001310 return cmd->common.opcode & 1;
1311}
1312
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001313enum {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001314 /*
1315 * Generic Command Status:
1316 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001317 NVME_SC_SUCCESS = 0x0,
1318 NVME_SC_INVALID_OPCODE = 0x1,
1319 NVME_SC_INVALID_FIELD = 0x2,
1320 NVME_SC_CMDID_CONFLICT = 0x3,
1321 NVME_SC_DATA_XFER_ERROR = 0x4,
1322 NVME_SC_POWER_LOSS = 0x5,
1323 NVME_SC_INTERNAL = 0x6,
1324 NVME_SC_ABORT_REQ = 0x7,
1325 NVME_SC_ABORT_QUEUE = 0x8,
1326 NVME_SC_FUSED_FAIL = 0x9,
1327 NVME_SC_FUSED_MISSING = 0xa,
1328 NVME_SC_INVALID_NS = 0xb,
1329 NVME_SC_CMD_SEQ_ERROR = 0xc,
1330 NVME_SC_SGL_INVALID_LAST = 0xd,
1331 NVME_SC_SGL_INVALID_COUNT = 0xe,
1332 NVME_SC_SGL_INVALID_DATA = 0xf,
1333 NVME_SC_SGL_INVALID_METADATA = 0x10,
1334 NVME_SC_SGL_INVALID_TYPE = 0x11,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001335
1336 NVME_SC_SGL_INVALID_OFFSET = 0x16,
1337 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
1338
Revanth Rajashekar24561f52019-10-14 11:16:07 -06001339 NVME_SC_SANITIZE_FAILED = 0x1C,
1340 NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
1341
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -07001342 NVME_SC_NS_WRITE_PROTECTED = 0x20,
Revanth Rajashekar24561f52019-10-14 11:16:07 -06001343 NVME_SC_CMD_INTERRUPTED = 0x21,
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -07001344
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001345 NVME_SC_LBA_RANGE = 0x80,
1346 NVME_SC_CAP_EXCEEDED = 0x81,
1347 NVME_SC_NS_NOT_READY = 0x82,
1348 NVME_SC_RESERVATION_CONFLICT = 0x83,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001349
1350 /*
1351 * Command Specific Status:
1352 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001353 NVME_SC_CQ_INVALID = 0x100,
1354 NVME_SC_QID_INVALID = 0x101,
1355 NVME_SC_QUEUE_SIZE = 0x102,
1356 NVME_SC_ABORT_LIMIT = 0x103,
1357 NVME_SC_ABORT_MISSING = 0x104,
1358 NVME_SC_ASYNC_LIMIT = 0x105,
1359 NVME_SC_FIRMWARE_SLOT = 0x106,
1360 NVME_SC_FIRMWARE_IMAGE = 0x107,
1361 NVME_SC_INVALID_VECTOR = 0x108,
1362 NVME_SC_INVALID_LOG_PAGE = 0x109,
1363 NVME_SC_INVALID_FORMAT = 0x10a,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001364 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001365 NVME_SC_INVALID_QUEUE = 0x10c,
1366 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
1367 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
1368 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001369 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
1370 NVME_SC_FW_NEEDS_RESET = 0x111,
1371 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
Minwoo Im9581ae4f2019-05-11 22:42:54 +09001372 NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001373 NVME_SC_OVERLAPPING_RANGE = 0x114,
Minwoo Im9581ae4f2019-05-11 22:42:54 +09001374 NVME_SC_NS_INSUFFICIENT_CAP = 0x115,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001375 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
1376 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
1377 NVME_SC_NS_IS_PRIVATE = 0x119,
1378 NVME_SC_NS_NOT_ATTACHED = 0x11a,
1379 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
1380 NVME_SC_CTRL_LIST_INVALID = 0x11c,
Revanth Rajashekar24561f52019-10-14 11:16:07 -06001381 NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
1382 NVME_SC_PMR_SAN_PROHIBITED = 0x123,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001383
1384 /*
1385 * I/O Command Set Specific - NVM commands:
1386 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001387 NVME_SC_BAD_ATTRIBUTES = 0x180,
1388 NVME_SC_INVALID_PI = 0x181,
1389 NVME_SC_READ_ONLY = 0x182,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001390 NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001391
1392 /*
1393 * I/O Command Set Specific - Fabrics commands:
1394 */
1395 NVME_SC_CONNECT_FORMAT = 0x180,
1396 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
1397 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
1398 NVME_SC_CONNECT_RESTART_DISC = 0x183,
1399 NVME_SC_CONNECT_INVALID_HOST = 0x184,
1400
1401 NVME_SC_DISCOVERY_RESTART = 0x190,
1402 NVME_SC_AUTH_REQUIRED = 0x191,
1403
1404 /*
1405 * Media and Data Integrity Errors:
1406 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001407 NVME_SC_WRITE_FAULT = 0x280,
1408 NVME_SC_READ_ERROR = 0x281,
1409 NVME_SC_GUARD_CHECK = 0x282,
1410 NVME_SC_APPTAG_CHECK = 0x283,
1411 NVME_SC_REFTAG_CHECK = 0x284,
1412 NVME_SC_COMPARE_FAILED = 0x285,
1413 NVME_SC_ACCESS_DENIED = 0x286,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001414 NVME_SC_UNWRITTEN_BLOCK = 0x287,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001415
Christoph Hellwig1a376212018-05-13 18:53:57 +02001416 /*
1417 * Path-related Errors:
1418 */
1419 NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
1420 NVME_SC_ANA_INACCESSIBLE = 0x302,
1421 NVME_SC_ANA_TRANSITION = 0x303,
James Smart783f4a42018-09-27 16:58:54 -07001422 NVME_SC_HOST_PATH_ERROR = 0x370,
Max Gurtovoy46fab2d2019-10-13 19:57:35 +03001423 NVME_SC_HOST_ABORTED_CMD = 0x371,
Christoph Hellwig1a376212018-05-13 18:53:57 +02001424
Keith Busch49cd84b2018-11-27 09:40:57 -07001425 NVME_SC_CRD = 0x1800,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001426 NVME_SC_DNR = 0x4000,
1427};
1428
1429struct nvme_completion {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001430 /*
1431 * Used by Admin and Fabrics commands to return data:
1432 */
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001433 union nvme_result {
1434 __le16 u16;
1435 __le32 u32;
1436 __le64 u64;
1437 } result;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001438 __le16 sq_head; /* how much of this queue may be reclaimed */
1439 __le16 sq_id; /* submission queue that generated this entry */
1440 __u16 command_id; /* of the command which completed */
1441 __le16 status; /* did the command fail, and if so, why? */
1442};
1443
Gabriel Krisman Bertazi8ef20742016-10-19 09:51:05 -06001444#define NVME_VS(major, minor, tertiary) \
1445 (((major) << 16) | ((minor) << 8) | (tertiary))
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001446
Johannes Thumshirnc61d7882017-06-07 11:45:36 +02001447#define NVME_MAJOR(ver) ((ver) >> 16)
1448#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
1449#define NVME_TERTIARY(ver) ((ver) & 0xff)
1450
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001451#endif /* _LINUX_NVME_H */