NVIDIA DOCA SDK Data Center on a Chip Framework Documentation
doca_transport.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2024 NVIDIA CORPORATION AND AFFILIATES. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without modification, are permitted
5  * provided that the following conditions are met:
6  * * Redistributions of source code must retain the above copyright notice, this list of
7  * conditions and the following disclaimer.
8  * * Redistributions in binary form must reproduce the above copyright notice, this list of
9  * conditions and the following disclaimer in the documentation and/or other materials
10  * provided with the distribution.
11  * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
12  * to endorse or promote products derived from this software without specific prior written
13  * permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
21  * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
22  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  *
24  */
25 
26 #include "spdk/nvmf_transport.h"
27 #include "spdk/util.h"
28 #include "spdk/thread.h"
29 #include <spdk/nvme_spec.h>
30 
31 #include <doca_error.h>
32 #include <doca_log.h>
33 #include <doca_dev.h>
34 #include <doca_dpa.h>
35 #include <doca_pe.h>
36 #include <doca_transport_common.h>
37 
38 #include "nvme_pci_common.h"
39 #include "nvme_pci_type_config.h"
40 #include "nvmf_doca_io.h"
41 
42 DOCA_LOG_REGISTER(NVME_EMULATION_DOCA_TRANSPORT);
43 
44 #define HOTPLUG_TIMEOUT_IN_MICROS (5 * 1000 * 1000) /* Set timeout to 5 seconds */
45 
46 #define NVMF_DOCA_DEFAULT_MAX_QUEUE_DEPTH 512
47 #define NVMF_DOCA_DEFAULT_MAX_QPAIRS_PER_CTRLR 128
48 #define NVMF_DOCA_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
49 #define NVMF_DOCA_DEFAULT_MAX_IO_SIZE 131072
50 #define NVMF_DOCA_DEFAULT_IO_UINT_SIZE 128
51 #define NVMF_DOCA_DEFAULT_AQ_DEPTH 256
52 #define NVMF_DOCA_DEFAULT_NUM_SHARED_BUFFER 1
53 #define NVMF_DOCA_DEFAULT_BUFFER_CACHE_SIZE 0
54 #define NVMF_DOCA_DIF_INSERT_OR_STRIP false
55 #define NVMF_DOCA_DEFAULT_ABORT_TIMEOUT_SEC 1
56 
57 #define NVMF_ADMIN_QUEUE_ID 0
58 #define ADMIN_QP_POLL_RATE_LIMIT 1000
59 
60 /*
61  * A for-each loop that allows a node to be removed or freed within the loop.
62  */
63 #ifndef TAILQ_FOREACH_SAFE
64 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
65  for ((var) = ((head)->tqh_first); (var) && ((tvar) = ((var)->field.tqe_next), 1); (var) = (tvar))
66 #endif
67 
68 /*
69  * A struct that includes all needed info on registered kernels and is initialized during linkage by DPACC
70  * Variable name should be the token passed to DPACC with --app-name parameter
71  */
72 extern struct doca_dpa_app *nvmf_doca_transport_app;
73 
76  union spdk_nvme_cap_register cap;
77 
79  union spdk_nvme_vs_register vs;
80  uint32_t intms; /* interrupt mask set */
81  uint32_t intmc; /* interrupt mask clear */
82 
84  union spdk_nvme_cc_register cc;
85 
86  uint32_t reserved1;
87  union spdk_nvme_csts_register csts; /* controller status */
88  uint32_t nssr; /* NVM subsystem reset */
89 
91  union spdk_nvme_aqa_register aqa;
92 
93  uint64_t asq; /* admin submission queue base addr */
94  uint64_t acq; /* admin completion queue base addr */
95 };
96 
103 };
104 
106  struct doca_dev *emulation_manager;
107  struct doca_devemu_pci_type *pci_type;
108  struct doca_dpa *dpa;
109  TAILQ_ENTRY(nvmf_doca_emulation_manager) link;
110 };
111 
115  TAILQ_HEAD(, nvmf_doca_io) io_cqs;
116  TAILQ_HEAD(, nvmf_doca_sq) io_sqs;
117  bool stopping_all_io_cqs;
118 };
119 
121 
123  struct doca_mmap *host_mmap;
126  TAILQ_HEAD(, nvmf_doca_io) io_cqs;
127  struct nvmf_doca_poll_group *poll_group;
128  TAILQ_ENTRY(nvmf_doca_pci_dev_poll_group) link;
129 };
130 
132  struct spdk_nvmf_transport_poll_group pg;
133  struct doca_pe *pe;
134  struct doca_pe *admin_qp_pe;
136  TAILQ_HEAD(, nvmf_doca_pci_dev_poll_group) pci_dev_pg_list;
137  TAILQ_ENTRY(nvmf_doca_poll_group) link;
138 };
139 
141 
145  struct doca_devemu_pci_dev *pci_dev;
146  struct spdk_nvmf_subsystem *subsystem;
147  struct doca_dev_rep *dev_rep;
148  struct spdk_nvme_transport_id trid;
150  struct spdk_nvmf_ctrlr *ctrlr;
154  bool is_flr;
156  uint32_t ctlr_id;
157  TAILQ_ENTRY(nvmf_doca_pci_dev_admin) link;
158 };
159 
161  struct doca_pe *pe;
162  struct spdk_poller *poller;
163  struct spdk_thread *thread;
164  TAILQ_HEAD(, nvmf_doca_pci_dev_admin) pci_dev_admins;
165 };
166 
168  struct spdk_nvmf_transport transport;
169  TAILQ_HEAD(, nvmf_doca_emulation_manager) emulation_managers;
170  TAILQ_HEAD(, nvmf_doca_poll_group) poll_groups;
171  struct nvmf_doca_poll_group *last_selected_pg;
172  struct nvmf_doca_admin_poll_group admin_pg;
173  uint32_t num_of_listeners;
174 };
175 
176 /* Static functions forward declarations */
177 #define NVME_PAGE_SIZE 4096
178 
179 static void post_cqe_from_response(struct nvmf_doca_request *request, void *arg);
180 static void nvmf_doca_on_post_cqe_complete(struct nvmf_doca_cq *cq, union doca_data user_data);
181 static void nvmf_doca_on_fetch_sqe_complete(struct nvmf_doca_sq *sq, struct nvmf_doca_sqe *sqe, uint16_t sqe_idx);
182 static void nvmf_doca_on_copy_data_complete(struct nvmf_doca_sq *sq,
183  struct doca_buf *dst,
184  struct doca_buf *src,
185  union doca_data user_data);
186 static void nvmf_doca_on_post_nvm_cqe_complete(struct nvmf_doca_cq *cq, union doca_data user_data);
187 static void nvmf_doca_on_fetch_nvm_sqe_complete(struct nvmf_doca_sq *sq, struct nvmf_doca_sqe *sqe, uint16_t sqe_idx);
189  struct doca_buf *dst,
190  struct doca_buf *src,
191  union doca_data user_data);
192 static void nvmf_doca_pci_dev_admin_reset_continue(struct nvmf_doca_pci_dev_admin *pci_dev_admin);
193 static void handle_controller_register_events(struct doca_devemu_pci_dev *pci_dev,
194  const struct bar_region_config *config);
196 static void nvmf_doca_destroy_admin_qp_continue(struct nvmf_doca_pci_dev_admin *pci_dev_admin);
197 static void nvmf_doca_on_initialization_error(void *cb_arg);
198 static void nvmf_doca_on_admin_sq_stop(struct nvmf_doca_sq *sq);
199 static doca_error_t nvmf_doca_create_host_mmap(struct doca_devemu_pci_dev *pci_dev,
200  struct doca_dev *emulation_manager,
201  struct doca_mmap **mmap_out);
202 static void buffers_ready_copy_data_dpu_to_host(struct nvmf_doca_request *request);
203 static void buffers_ready_copy_data_host_to_dpu(struct nvmf_doca_request *request);
204 static void nvmf_doca_opts_init(struct spdk_nvmf_transport_opts *opts)
205 {
206  DOCA_LOG_DBG("Entering function %s", __func__);
207 
208  opts->max_queue_depth = NVMF_DOCA_DEFAULT_MAX_QUEUE_DEPTH;
209  opts->max_qpairs_per_ctrlr = NVMF_DOCA_DEFAULT_MAX_QPAIRS_PER_CTRLR;
210  opts->in_capsule_data_size = NVMF_DOCA_DEFAULT_IN_CAPSULE_DATA_SIZE;
211  opts->max_io_size = NVMF_DOCA_DEFAULT_MAX_IO_SIZE;
212  opts->io_unit_size = NVMF_DOCA_DEFAULT_IO_UINT_SIZE;
213  opts->max_aq_depth = NVMF_DOCA_DEFAULT_AQ_DEPTH;
214  opts->num_shared_buffers = NVMF_DOCA_DEFAULT_NUM_SHARED_BUFFER;
215  opts->buf_cache_size = NVMF_DOCA_DEFAULT_BUFFER_CACHE_SIZE;
216  opts->dif_insert_or_strip = NVMF_DOCA_DIF_INSERT_OR_STRIP;
217  opts->abort_timeout_sec = NVMF_DOCA_DEFAULT_ABORT_TIMEOUT_SEC;
218  opts->transport_specific = NULL;
219 }
220 
221 /*
222  * Selects a poll group from the system using the round-robin method
223  *
224  * @transport [in]: The doca transport that holds all the poll groups
225  * @return: the selected poll group
226  */
228 {
229  DOCA_LOG_DBG("Entering function %s", __func__);
230 
231  struct nvmf_doca_poll_group *poll_group;
232 
233  if (transport->last_selected_pg == NULL) {
234  transport->last_selected_pg = TAILQ_FIRST(&transport->poll_groups);
235  } else {
236  transport->last_selected_pg = TAILQ_NEXT(transport->last_selected_pg, link);
237  }
238 
239  if (transport->last_selected_pg == NULL) {
240  transport->last_selected_pg = TAILQ_FIRST(&transport->poll_groups);
241  }
242 
243  poll_group = transport->last_selected_pg;
244  return poll_group;
245 }
246 
247 /*
248  * Finds the matching nvmf_doca_pci_dev_poll_group by the given PCI device
249  *
250  * @doca_poll_group [in]: The poll group that includes the nvmf_doca_pci_dev_poll_group
251  * @pci_dev [in]: The PCI device
252  * @return: A pointer to the nvmf_doca_pci_dev_poll_group matching if found, or null if not
253  */
255  struct doca_devemu_pci_dev *pci_dev)
256 {
257  DOCA_LOG_DBG("Entering function %s", __func__);
258 
259  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg;
260 
261  TAILQ_FOREACH(pci_dev_pg, &doca_poll_group->pci_dev_pg_list, link)
262  {
263  if (pci_dev_pg->pci_dev_admin->pci_dev == pci_dev)
264  return pci_dev_pg;
265  }
266  return NULL;
267 }
268 
269 /*
270  * Destroys emulation manager
271  *
272  * @doca_emulation_manager [in]: The emulation anager context
273  * @return: DOCA_SUCCESS on success and other error code otherwise
274  */
276 {
277  DOCA_LOG_DBG("Entering function %s", __func__);
278 
279  doca_error_t ret;
280 
281  if (doca_emulation_manager->dpa != NULL) {
282  ret = doca_dpa_stop(doca_emulation_manager->dpa);
283  if (ret != DOCA_SUCCESS) {
284  DOCA_LOG_ERR("Failed to stop dpa: %s", doca_error_get_name(ret));
285  return ret;
286  }
287  ret = doca_dpa_destroy(doca_emulation_manager->dpa);
288  if (ret != DOCA_SUCCESS) {
289  DOCA_LOG_ERR("Failed to destroy dpa: %s", doca_error_get_name(ret));
290  return ret;
291  }
292  }
293 
294  cleanup_pci_resources(doca_emulation_manager->pci_type, doca_emulation_manager->emulation_manager);
295 
296  free(doca_emulation_manager);
297  return DOCA_SUCCESS;
298 }
299 
300 /*
301  * Creates and starts a pci type
302  *
303  * @doca_emulation_manager [in]: The emulation manager
304  * @return: DOCA_SUCCESS on success and other error code otherwise
305  */
307 {
308  DOCA_LOG_DBG("Entering function %s", __func__);
309 
310  const struct bar_memory_layout_config *layout_config;
311  const struct bar_db_region_config *db_config;
312  const struct bar_region_config *region_config;
313  int idx;
314  doca_error_t ret;
315 
316  ret = doca_devemu_pci_type_create(NVME_TYPE_NAME, &doca_emulation_manager->pci_type);
317  if (ret != DOCA_SUCCESS) {
318  DOCA_LOG_ERR("Failed to create pci type: %s", doca_error_get_name(ret));
319  return ret;
320  }
321 
322  ret = doca_devemu_pci_type_set_dev(doca_emulation_manager->pci_type, doca_emulation_manager->emulation_manager);
323  if (ret != DOCA_SUCCESS) {
324  DOCA_LOG_ERR("Failed to set device for pci type: %s", doca_error_get_name(ret));
325  goto destroy_pci_type;
326  }
327 
328  ret = doca_devemu_pci_type_set_device_id(doca_emulation_manager->pci_type, PCI_TYPE_DEVICE_ID);
329  if (ret != DOCA_SUCCESS) {
330  DOCA_LOG_ERR("Failed to set device ID for pci type: %s", doca_error_get_name(ret));
331  goto destroy_pci_type;
332  }
333 
334  ret = doca_devemu_pci_type_set_vendor_id(doca_emulation_manager->pci_type, PCI_TYPE_VENDOR_ID);
335  if (ret != DOCA_SUCCESS) {
336  DOCA_LOG_ERR("Failed to set vendor ID for pci type: %s", doca_error_get_name(ret));
337  goto destroy_pci_type;
338  }
339 
341  if (ret != DOCA_SUCCESS) {
342  DOCA_LOG_ERR("Failed to set subsystem ID for pci type: %s", doca_error_get_name(ret));
343  goto destroy_pci_type;
344  }
345 
346  ret = doca_devemu_pci_type_set_subsystem_vendor_id(doca_emulation_manager->pci_type,
348  if (ret != DOCA_SUCCESS) {
349  DOCA_LOG_ERR("Failed to set subsystem vendor ID for the given pci type: %s", doca_error_get_name(ret));
350  goto destroy_pci_type;
351  }
352 
354  if (ret != DOCA_SUCCESS) {
355  DOCA_LOG_ERR("Failed to set revision ID for pci type: %s", doca_error_get_name(ret));
356  goto destroy_pci_type;
357  }
358 
359  ret = doca_devemu_pci_type_set_class_code(doca_emulation_manager->pci_type, PCI_TYPE_CLASS_CODE);
360  if (ret != DOCA_SUCCESS) {
361  DOCA_LOG_ERR("Failed to set class code for pci type: %s", doca_error_get_name(ret));
362  goto destroy_pci_type;
363  }
364 
365  ret = doca_devemu_pci_type_set_num_msix(doca_emulation_manager->pci_type, PCI_TYPE_NUM_MSIX);
366  if (ret != DOCA_SUCCESS) {
367  DOCA_LOG_ERR("Failed to set the number of MSI-X for pci type: %s", doca_error_get_name(ret));
368  goto destroy_pci_type;
369  }
370 
371  for (idx = 0; idx < PCI_TYPE_NUM_BAR_MEMORY_LAYOUT; ++idx) {
372  layout_config = &layout_configs[idx];
373  ret = doca_devemu_pci_type_set_memory_bar_conf(doca_emulation_manager->pci_type,
374  layout_config->bar_id,
375  layout_config->log_size,
376  layout_config->memory_type,
377  layout_config->prefetchable);
378  if (ret != DOCA_SUCCESS) {
379  DOCA_LOG_ERR("Unable to set layout at index %d: %s", idx, doca_error_get_name(ret));
380  goto destroy_pci_type;
381  }
382  }
383 
384  for (idx = 0; idx < PCI_TYPE_NUM_BAR_DB_REGIONS; ++idx) {
385  db_config = &db_configs[idx];
386  if (db_config->with_data)
388  db_config->region.bar_id,
389  db_config->region.start_address,
390  db_config->region.size,
391  db_config->log_db_size,
392  db_config->db_id_msbyte,
393  db_config->db_id_lsbyte);
394  else
396  db_config->region.bar_id,
397  db_config->region.start_address,
398  db_config->region.size,
399  db_config->log_db_size,
400  db_config->log_db_stride_size);
401  if (ret != DOCA_SUCCESS) {
402  DOCA_LOG_ERR("Unable to set DB region at index %d: %s", idx, doca_error_get_name(ret));
403  goto destroy_pci_type;
404  }
405  }
406 
407  for (idx = 0; idx < PCI_TYPE_NUM_BAR_MSIX_TABLE_REGIONS; ++idx) {
408  region_config = &msix_table_configs[idx];
410  region_config->bar_id,
411  region_config->start_address,
412  region_config->size);
413  if (ret != DOCA_SUCCESS) {
414  DOCA_LOG_ERR("Unable to set MSI-X table region at index %d: %s", idx, doca_error_get_name(ret));
415  goto destroy_pci_type;
416  }
417  }
418 
419  for (idx = 0; idx < PCI_TYPE_NUM_BAR_MSIX_PBA_REGIONS; ++idx) {
420  region_config = &msix_pba_configs[idx];
421  ret = doca_devemu_pci_type_set_bar_msix_pba_region_conf(doca_emulation_manager->pci_type,
422  region_config->bar_id,
423  region_config->start_address,
424  region_config->size);
425  if (ret != DOCA_SUCCESS) {
426  DOCA_LOG_ERR("Unable to set MSI-X pending bit array region at index %d: %s",
427  idx,
428  doca_error_get_name(ret));
429  goto destroy_pci_type;
430  }
431  }
432 
433  for (idx = 0; idx < PCI_TYPE_NUM_BAR_STATEFUL_REGIONS; ++idx) {
434  region_config = &stateful_configs[idx];
435  ret = doca_devemu_pci_type_set_bar_stateful_region_conf(doca_emulation_manager->pci_type,
436  region_config->bar_id,
437  region_config->start_address,
438  region_config->size);
439  if (ret != DOCA_SUCCESS) {
440  DOCA_LOG_ERR("Unable to set Stateful region at index %d: %s", idx, doca_error_get_name(ret));
441  goto destroy_pci_type;
442  }
443  }
444 
445  ret = doca_devemu_pci_type_start(doca_emulation_manager->pci_type);
446  if (ret != DOCA_SUCCESS) {
447  DOCA_LOG_ERR("Failed to start pci type: %s", doca_error_get_name(ret));
448  goto destroy_pci_type;
449  }
450 
451  uint8_t data[128] = {};
452  struct nvmf_doca_nvme_registers *registers = (struct nvmf_doca_nvme_registers *)&data[0];
453  *registers = (struct nvmf_doca_nvme_registers){
454  .cap.bits =
455  {
457  .cqr = 0x1,
458  .to = 0xf0,
459  .css = 0x1,
460  },
461  .vs.bits =
462  {
463  .mjr = 0x1,
464  .mnr = 0x3,
465  },
466  };
468  0,
469  0,
470  data,
471  sizeof(data));
472  if (ret != DOCA_SUCCESS) {
473  DOCA_LOG_ERR("Failed to modify bar stateful region: %s", doca_error_get_name(ret));
474  doca_devemu_pci_type_stop(doca_emulation_manager->pci_type);
475  return ret;
476  }
477 
478  return ret;
479 
480 destroy_pci_type:
481  doca_devemu_pci_type_destroy(doca_emulation_manager->pci_type);
482  return ret;
483 }
484 
485 /*
486  * Creates emulation manager context
487  *
488  * @dev_info [in]: The device info
489  * @ret_emulation_manager [out]: The returned emulation manager context
490  * @return: DOCA_SUCCESS on success and other error code otherwise
491  */
493  struct nvmf_doca_emulation_manager **ret_emulation_manager)
494 {
495  DOCA_LOG_DBG("Entering function %s", __func__);
496 
497  struct nvmf_doca_emulation_manager *doca_emulation_manager;
498  doca_error_t ret;
499 
500  doca_emulation_manager =
501  (struct nvmf_doca_emulation_manager *)calloc(1, sizeof(struct nvmf_doca_emulation_manager));
502  if (doca_emulation_manager == NULL) {
503  DOCA_LOG_INFO("Failed to allocate memory for emultaion manager context");
504  return DOCA_ERROR_NO_MEMORY;
505  }
506 
507  ret = doca_dev_open(dev_info, &doca_emulation_manager->emulation_manager);
508  if (ret != DOCA_SUCCESS) {
509  DOCA_LOG_ERR("Failed to open doca device: %s", doca_error_get_name(ret));
510  nvmf_doca_destroy_emulation_manager(doca_emulation_manager);
511  return ret;
512  }
513 
514  ret = nvmf_doca_pci_type_create_and_start(doca_emulation_manager);
515  if (ret != DOCA_SUCCESS) {
516  DOCA_LOG_ERR("Failed to initialize PCI type: %s", doca_error_get_name(ret));
517  nvmf_doca_destroy_emulation_manager(doca_emulation_manager);
518  return ret;
519  }
520 
521  ret = doca_dpa_create(doca_emulation_manager->emulation_manager, &doca_emulation_manager->dpa);
522  if (ret != DOCA_SUCCESS) {
523  DOCA_LOG_ERR("Failed to create DPA context: %s", doca_error_get_name(ret));
524  nvmf_doca_destroy_emulation_manager(doca_emulation_manager);
525  return ret;
526  }
527 
528  ret = doca_dpa_set_app(doca_emulation_manager->dpa, nvmf_doca_transport_app);
529  if (ret != DOCA_SUCCESS) {
530  DOCA_LOG_ERR("Failed to set DPA app: %s", doca_error_get_name(ret));
531  nvmf_doca_destroy_emulation_manager(doca_emulation_manager);
532  return ret;
533  }
534 
535  ret = doca_dpa_start(doca_emulation_manager->dpa);
536  if (ret != DOCA_SUCCESS) {
537  DOCA_LOG_ERR("Failed to start DPA context: %s", doca_error_get_name(ret));
538  nvmf_doca_destroy_emulation_manager(doca_emulation_manager);
539  return ret;
540  }
541 
542  *ret_emulation_manager = doca_emulation_manager;
543  return ret;
544 }
545 
546 static int nvmf_doca_admin_poll_group_poll(void *arg)
547 {
548  struct nvmf_doca_admin_poll_group *admin_pg = arg;
549 
550  return doca_pe_progress(admin_pg->pe);
551 }
552 
554 {
555  if (admin_pg->poller != NULL) {
556  spdk_poller_unregister(&admin_pg->poller);
557  admin_pg->poller = NULL;
558  }
559 
560  doca_error_t ret = doca_pe_destroy(admin_pg->pe);
561  if (ret != DOCA_SUCCESS) {
562  DOCA_LOG_ERR("Failed to destroy admin progress engine: %s", doca_error_get_name(ret));
563  }
564 }
565 
567 {
568  doca_error_t ret;
569 
570  ret = doca_pe_create(&admin_pg->pe);
571  if (ret != DOCA_SUCCESS) {
572  DOCA_LOG_ERR("Failed to create admin progress engine: %s", doca_error_get_name(ret));
573  return ret;
574  }
575 
576  /* This poller will be used on application thread to poll PCI events */
577  admin_pg->poller = spdk_poller_register(nvmf_doca_admin_poll_group_poll, admin_pg, 0);
578  if (admin_pg->poller == NULL) {
579  DOCA_LOG_ERR("Failed to register admin poller");
580  doca_pe_destroy(admin_pg->pe);
582  }
583 
584  TAILQ_INIT(&admin_pg->pci_dev_admins);
585 
586  admin_pg->thread = spdk_get_thread();
587  assert(admin_pg->thread == spdk_thread_get_app_thread());
588 
589  return DOCA_SUCCESS;
590 }
591 
592 /*
593  * Creates the DOCA transport
594  *
595  * Callback invoked by the NVMf target once user issues the create transport RPC
596  * The callback is invoked after the nvmf_doca_opts_init callback
597  *
598  * @opts [in]: The transport options
599  * @return: The newly created DOCA transport on success and NULL otherwise
600  */
601 static struct spdk_nvmf_transport *nvmf_doca_create(struct spdk_nvmf_transport_opts *opts)
602 {
603  (void)opts;
604 
605  DOCA_LOG_DBG("Entering function %s", __func__);
606 
607  struct doca_devinfo **dev_list;
608  uint32_t nb_devs;
609  uint8_t is_hotplug_manager;
610  doca_error_t ret;
611 
612  struct nvmf_doca_transport *doca_transport =
613  (struct nvmf_doca_transport *)calloc(1, sizeof(struct nvmf_doca_transport));
614  if (doca_transport == NULL) {
615  DOCA_LOG_INFO("Failed to allocate memory for doca_transport");
616  return NULL;
617  }
618 
619  TAILQ_INIT(&doca_transport->poll_groups);
620  TAILQ_INIT(&doca_transport->emulation_managers);
621  doca_transport->last_selected_pg = NULL;
622 
623  ret = doca_devinfo_create_list(&dev_list, &nb_devs);
624  if (ret != DOCA_SUCCESS) {
625  DOCA_LOG_ERR("Method doca_devinfo_create_list failed: %s", doca_error_get_name(ret));
626  goto free_transport;
627  }
628 
629  for (uint32_t idx = 0; idx < nb_devs; idx++) {
630  ret = doca_devinfo_cap_is_hotplug_manager_supported(dev_list[idx], &is_hotplug_manager);
631  if (ret == DOCA_SUCCESS && is_hotplug_manager == 1) {
632  struct nvmf_doca_emulation_manager *doca_emulation_manager;
633 
634  ret = nvmf_doca_create_emulation_manager(dev_list[idx], &doca_emulation_manager);
635  if (ret != DOCA_SUCCESS) {
636  DOCA_LOG_ERR("Emulation manager initialization failed: %s", doca_error_get_name(ret));
637  } else {
638  TAILQ_INSERT_TAIL(&doca_transport->emulation_managers, doca_emulation_manager, link);
639  }
640  break;
641  }
642  }
643 
644  if (TAILQ_EMPTY(&doca_transport->emulation_managers)) {
645  DOCA_LOG_ERR("No emulation managers available");
646  goto destroy_list;
647  }
648 
649  ret = doca_devinfo_destroy_list(dev_list);
650  if (ret != DOCA_SUCCESS) {
651  DOCA_LOG_ERR("Failed to destroy devinfo list: %s", doca_error_get_name(ret));
652  goto free_transport;
653  }
654 
655  /* This poller will be used on application thread to poll PCI events */
656  ret = nvmf_doca_admin_poll_group_create(&doca_transport->admin_pg);
657  if (ret != DOCA_SUCCESS) {
658  goto free_transport;
659  }
660 
661  doca_transport->num_of_listeners = 0;
662 
663  return &doca_transport->transport;
664 
665 destroy_list:
666  ret = doca_devinfo_destroy_list(dev_list);
667  if (ret != DOCA_SUCCESS) {
668  DOCA_LOG_ERR("Failed to destroy devinfo list: %s", doca_error_get_name(ret));
669  }
670 free_transport:
671  free(doca_transport);
672 
673  return NULL;
674 }
675 
676 /*
677  * Dump transport-specific opts into JSON
678  *
679  * @transport [in]: The DOCA transport
680  * @w [out]: The JSON dump
681  */
682 static void nvmf_doca_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w)
683 {
684  DOCA_LOG_DBG("Entering function %s", __func__);
685 
686  (void)transport;
687  (void)w;
688 }
689 
690 /*
691  * Destroys the DOCA transport
692  *
693  * Callback invoked by the NVMf target once user issues the destroy transport RPC
694  *
695  * @transport [in]: The DOCA transport to destroy
696  * @cb_fn [in]: Callback to be invoked once destroy finished - can be NULL
697  * @cb_arg [in]: Argument to be passed to the callback
698  * @return: 0 on success and negative error code otherwise
699  */
700 static int nvmf_doca_destroy(struct spdk_nvmf_transport *transport,
701  spdk_nvmf_transport_destroy_done_cb cb_fn,
702  void *cb_arg)
703 {
704  DOCA_LOG_DBG("Entering function %s", __func__);
705 
706  struct nvmf_doca_transport *doca_transport;
707  struct nvmf_doca_emulation_manager *doca_emulation_manager, *temp;
708 
709  doca_transport = SPDK_CONTAINEROF(transport, struct nvmf_doca_transport, transport);
710 
711  nvmf_doca_admin_poll_group_destroy(&doca_transport->admin_pg);
712 
713  TAILQ_FOREACH_SAFE(doca_emulation_manager, &doca_transport->emulation_managers, link, temp)
714  {
715  TAILQ_REMOVE(&doca_transport->emulation_managers, doca_emulation_manager, link);
716  nvmf_doca_destroy_emulation_manager(doca_emulation_manager);
717  }
718 
719  free(doca_transport);
720 
721  if (cb_fn) {
722  cb_fn(cb_arg);
723  }
724  return 0;
725 }
726 
728  const char *vuid)
729 {
730  struct nvmf_doca_pci_dev_admin *pci_dev_admin;
731 
732  TAILQ_FOREACH(pci_dev_admin, &doca_transport->admin_pg.pci_dev_admins, link)
733  {
734  if ((strncmp(vuid, pci_dev_admin->trid.traddr, DOCA_DEVINFO_REP_VUID_SIZE) == 0)) {
735  return pci_dev_admin;
736  }
737  }
738 
739  return NULL;
740 }
741 
742 /*
743  * Checks if a PCI device with the given VUID exists within the specified transport.
744  *
745  * @doca_transport [in]: Doca transport.
746  * @vuid [in]: VUID to look for.
747  * @return: DOCA_SUCCESS on success and other error code otherwise
748  */
750 {
751  DOCA_LOG_DBG("Entering function %s", __func__);
752 
755 }
756 
757 /*
758  * Finds an emulation manager and a fuction with the requested VUID
759  *
760  * @doca_transport [in]: Doca transport
761  * @vuid [in]: vuid to look for
762  * @ret_emulation_manager [out]: emulation manager
763  * @ret_device_rep [out]: device rep
764  * @return: DOCA_SUCCESS on success and other error code otherwise
765  */
768  const char *vuid,
769  struct nvmf_doca_emulation_manager **ret_emulation_manager,
770  struct doca_dev_rep **ret_device_rep)
771 {
772  DOCA_LOG_DBG("Entering function %s", __func__);
773 
774  struct nvmf_doca_emulation_manager *doca_emulation_manager;
775  struct doca_devinfo_rep **devinfo_list_rep;
776  char rep_vuid[DOCA_DEVINFO_REP_VUID_SIZE];
777  struct doca_dev_rep *device_rep;
778  uint32_t nb_devs_rep;
779  doca_error_t ret;
780 
781  TAILQ_FOREACH(doca_emulation_manager, &doca_transport->emulation_managers, link)
782  {
783  ret = doca_devemu_pci_type_create_rep_list(doca_emulation_manager->pci_type,
784  &devinfo_list_rep,
785  &nb_devs_rep);
786  if (ret != DOCA_SUCCESS) {
787  DOCA_LOG_ERR("Couldn't create the device representors list: %s", doca_error_get_name(ret));
788  return ret;
789  }
790 
791  for (uint32_t idx = 0; idx < nb_devs_rep; idx++) {
792  ret = doca_devinfo_rep_get_vuid(devinfo_list_rep[idx], rep_vuid, DOCA_DEVINFO_REP_VUID_SIZE);
793  if (ret == DOCA_SUCCESS && (strncmp(vuid, rep_vuid, DOCA_DEVINFO_REP_VUID_SIZE) == 0)) {
794  ret = doca_dev_rep_open(devinfo_list_rep[idx], &device_rep);
795  if (ret != DOCA_SUCCESS) {
796  DOCA_LOG_ERR("Failed to open a device: %s", doca_error_get_name(ret));
797  doca_devinfo_rep_destroy_list(devinfo_list_rep);
798  return ret;
799  }
800  *ret_device_rep = device_rep;
801  *ret_emulation_manager = doca_emulation_manager;
802  doca_devinfo_rep_destroy_list(devinfo_list_rep);
803  return DOCA_SUCCESS;
804  }
805  }
806  }
807 
808  DOCA_LOG_ERR("Could not find an emulation manager and a fuction with the requested VUID");
809  doca_devinfo_rep_destroy_list(devinfo_list_rep);
810  return DOCA_ERROR_NOT_FOUND;
811 }
812 
813 /*
814  * Callback invoked once admin CQ has been stopped
815  *
816  * @io [in]: The NVMf DOCA IO that was stopped
817  */
819 {
820  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg = io->poll_group;
821  struct nvmf_doca_pci_dev_admin *pci_dev_admin = pci_dev_pg->pci_dev_admin;
822  struct nvmf_doca_admin_qp *admin_qp = pci_dev_pg->admin_qp;
823  struct nvmf_doca_poll_group *doca_poll_group = pci_dev_pg->poll_group;
824 
826  free(io);
827  admin_qp->admin_cq = NULL;
828 
829  DOCA_LOG_INFO("Destroying poll group %p PCI dev poll group %p", doca_poll_group, pci_dev_pg);
830  TAILQ_REMOVE(&doca_poll_group->pci_dev_pg_list, pci_dev_pg, link);
832 
834 }
835 
836 /*
837  * State changed callback
838  *
839  * @user_data [in]: Data user
840  * @ctx [in]: Doca context
841  * @prev_state [in]: Previous state
842  * @next_state [in]: Next state
843  */
844 static void devemu_state_changed_cb(const union doca_data user_data,
845  struct doca_ctx *ctx,
846  enum doca_ctx_states prev_state,
847  enum doca_ctx_states next_state)
848 {
849  DOCA_LOG_DBG("Entering function %s", __func__);
850 
851  (void)ctx;
852  (void)prev_state;
853 
854  struct nvmf_doca_pci_dev_admin *pci_dev_admin = user_data.ptr;
855 
856  switch (next_state) {
857  case DOCA_CTX_STATE_IDLE:
858  DOCA_LOG_DBG("DOCA_CTX_STATE_IDLE");
859  if (!pci_dev_admin->is_destroy_flow) {
861  }
862  pci_dev_admin->state = NVMF_DOCA_LISTENER_UNINITIALIZED;
863  break;
865  DOCA_LOG_DBG("DOCA_CTX_STATE_STARTING");
866  break;
868  DOCA_LOG_DBG("DOCA_CTX_STATE_RUNNING");
870  break;
872  DOCA_LOG_DBG("Devemu device has entered into stopping state. Unexpected!, destroy datapath resources!");
873  break;
874  default:
875  break;
876  }
877 }
878 
879 /*
880  * message to stop the IO SQ
881  *
882  * Must be executed by poll group that owns the SQ. The Admin poll group will send this as message to owner of the SQ
883  * This flow is async and once completed the nvmf_doca_pci_dev_poll_group_stop_io_sq_done() message will be sent
884  * back to the admin poll group
885  *
886  * @ctx [in]: The context of the message
887  */
889 {
890  struct nvmf_doca_sq *sq = ctx;
891 
892  nvmf_doca_sq_stop(sq);
893 }
894 
897 };
898 
899 /*
900  * Message to indicate that stopping of the IO SQ is done
901  *
902  * This message is used as a response to the nvmf_doca_pci_dev_poll_group_stop_io_sq() message
903  * This message is sent to admin poll group once IO SQ has been stopped
904  *
905  * @ctx [in]: The context of the message
906  */
908 {
909  struct nvmf_doca_sq *sq = ctx;
910  struct nvmf_doca_pci_dev_admin *pci_dev_admin = sq->io->poll_group->pci_dev_admin;
911  struct nvmf_doca_admin_qp *admin_qp = pci_dev_admin->admin_qp;
912  struct nvmf_doca_poll_group_delete_io_sq_ctx *delete_io_sq_ctx = sq->ctx;
913 
914  TAILQ_REMOVE(&admin_qp->io_sqs, sq, pci_dev_admin_link);
915  free(sq);
916 
917  if (delete_io_sq_ctx == NULL) {
918  /* Indicates that request to stop the IO SQ arrived from reset event */
920  } else {
921  /* Indicates that request to stop the IO SQ arrived delete IO SQ admin command */
922  struct nvmf_doca_request *request = delete_io_sq_ctx->request;
923 
924  free(delete_io_sq_ctx);
925 
926  request->request.rsp->nvme_cpl.cid = request->request.cmd->nvme_cmd.cid;
928  }
929 }
930 
931 /*
932  * Starts async flow of stopping all IO SQs each on their relevant poll group
933  *
934  * @admin_qp [in]: The PCI device admin QP context containing IO SQs from all poll groups
935  */
937 {
938  struct nvmf_doca_sq *sq;
939  struct nvmf_doca_sq *sq_tmp;
940  struct spdk_thread *thread;
941 
942  TAILQ_FOREACH_SAFE(sq, &admin_qp->io_sqs, pci_dev_admin_link, sq_tmp)
943  {
944  thread = sq->io->poll_group->poll_group->pg.group->thread;
945  spdk_thread_exec_msg(thread, nvmf_doca_pci_dev_poll_group_stop_io_sq, sq);
946  }
947 }
948 
949 /*
950  * message to stop the IO CQ
951  *
952  * Must be executed by poll group that owns the CQ. The Admin poll group will send this as message to owner of the CQ
953  * This flow is async and once completed then the nvmf_doca_pci_dev_poll_group_stop_io_cq_done() message will be sent
954  * back to the admin poll group
955  *
956  * @ctx [in]: The context of the message
957  */
959 {
960  struct nvmf_doca_io *io = ctx;
961 
962  nvmf_doca_io_stop(io);
963 }
964 
967 };
968 
969 /*
970  * message to indicate that stopping of the IO CQ is done
971  *
972  * This message is used as a response to the nvmf_doca_pci_dev_poll_group_stop_io_cq() message
973  * This message is sent to admin QP poll group once IO CQ has been stopped
974  *
975  * @ctx [in]: The context of the message
976  */
978 {
979  struct nvmf_doca_io *io = ctx;
980  struct nvmf_doca_pci_dev_admin *pci_dev_admin = io->pci_dev_admin;
981  struct nvmf_doca_admin_qp *admin_qp = pci_dev_admin->admin_qp;
982  struct nvmf_doca_poll_group_delete_io_cq_ctx *delete_io_cq_ctx = io->ctx;
983 
984  TAILQ_REMOVE(&admin_qp->io_cqs, io, pci_dev_admin_link);
985  free(io);
986 
987  if (delete_io_cq_ctx == NULL) {
988  /* Indicates that request to stop the IO CQ arrived from reset event */
990  } else {
991  /* Indicates that request to stop the IO CQ arrived delete IO CQ admin command */
992  struct nvmf_doca_request *request = delete_io_cq_ctx->request;
993 
994  free(delete_io_cq_ctx);
995 
996  request->request.rsp->nvme_cpl.cid = request->request.cmd->nvme_cmd.cid;
998  }
999 }
1000 
1001 /*
1002  * Starts async flow of stopping all IO CQs each on their relevant poll group
1003  *
1004  * @admin_qp [in]: The admin QP containing IO CQs from all poll groups
1005  */
1007 {
1008  struct nvmf_doca_io *io;
1009  struct nvmf_doca_io *io_tmp;
1010  struct spdk_thread *thread;
1011 
1012  if (admin_qp->stopping_all_io_cqs)
1013  return;
1014 
1015  TAILQ_FOREACH_SAFE(io, &admin_qp->io_cqs, pci_dev_admin_link, io_tmp)
1016  {
1017  thread = io->poll_group->poll_group->pg.group->thread;
1018  spdk_thread_exec_msg(thread, nvmf_doca_pci_dev_poll_group_stop_io_cq, io);
1019  }
1020 
1021  admin_qp->stopping_all_io_cqs = true;
1022 }
1023 
1025 {
1026  doca_error_t ret;
1027 
1028  if (pci_dev_pg != NULL) {
1029  ret = doca_mmap_destroy(pci_dev_pg->host_mmap);
1030  if (ret != DOCA_SUCCESS) {
1031  DOCA_LOG_ERR("Failed to destroy PCI device poll group: Failed to destroy mmap - %s",
1032  doca_error_get_name(ret));
1033  }
1034  pci_dev_pg->host_mmap = NULL;
1035  }
1036  DOCA_LOG_INFO("Destroyed PCI dev poll group %p", pci_dev_pg);
1037  memset(pci_dev_pg, 0, sizeof(*pci_dev_pg));
1038  free(pci_dev_pg);
1039 }
1040 
1041 /*
1042  * Creates a PCI device poll group object: nvmf_doca_pci_dev_poll_group
1043  *
1044  * @pci_dev_admin [in]: PCI device admin context
1045  * @admin_qp [in]: The admin QP context. Can be NULL in case this poll group does not poll the admin QP
1046  * @doca_poll_group [in]: Doca poll group
1047  * @ret_pci_dev_pg [out]: The newly created nvmf_doca_pci_dev_poll_group
1048  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
1049  */
1051  struct nvmf_doca_admin_qp *admin_qp,
1052  struct nvmf_doca_poll_group *doca_poll_group,
1053  struct nvmf_doca_pci_dev_poll_group **ret_pci_dev_pg)
1054 {
1055  DOCA_LOG_DBG("Entering function %s", __func__);
1056 
1057  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg =
1058  (struct nvmf_doca_pci_dev_poll_group *)calloc(1, sizeof(*pci_dev_pg));
1059  if (pci_dev_pg == NULL) {
1060  DOCA_LOG_ERR("Failed to allocate doca device poll group");
1061  return DOCA_ERROR_NO_MEMORY;
1062  }
1063 
1064  pci_dev_pg->poll_group = doca_poll_group;
1065  pci_dev_pg->admin_qp = admin_qp;
1066  pci_dev_pg->pci_dev_admin = pci_dev_admin;
1067  TAILQ_INIT(&pci_dev_pg->io_cqs);
1068 
1071  &pci_dev_pg->host_mmap);
1072  if (ret != DOCA_SUCCESS) {
1073  free(pci_dev_pg);
1074  return ret;
1075  }
1076 
1077  *ret_pci_dev_pg = pci_dev_pg;
1078 
1079  return DOCA_SUCCESS;
1080 }
1081 
1082 /*
1083  * message to indicate that destroy of the admin QP has finished
1084  *
1085  * This message is used as a response to the nvmf_doca_destroy_admin_qp() message
1086  * This message is sent to PCI device admin poll group once admin QP has been destroyed
1087  */
1088 static void nvmf_doca_destroy_admin_qp_done(void *cb_arg)
1089 {
1090  struct nvmf_doca_pci_dev_admin *pci_dev_admin = cb_arg;
1091 
1092  pci_dev_admin->admin_qp = NULL;
1093  pci_dev_admin->admin_qp_pg = NULL;
1094 
1096 }
1097 
1098 /*
1099  * Continue async flow of destroying admin QP
1100  */
1102 {
1103  struct nvmf_doca_admin_qp *admin_qp = pci_dev_admin->admin_qp;
1104 
1105  /* In case some IO CQs exist, then send message to all poll groups to delete their IO CQs */
1106  if (!TAILQ_EMPTY(&admin_qp->io_cqs)) {
1108  return;
1109  }
1110  admin_qp->stopping_all_io_cqs = false;
1111 
1112  /* In case no IO CQs exist, then we can attempt to destroy the admin SQ */
1113  if (admin_qp->admin_sq != NULL) {
1114  nvmf_doca_sq_stop(admin_qp->admin_sq);
1115  return;
1116  }
1117 
1118  /* In case no admin SQ exist then we can destroy admin CQ */
1119  if (admin_qp->admin_cq != NULL) {
1120  nvmf_doca_io_stop(admin_qp->admin_cq);
1121  return;
1122  }
1123 
1124  free(admin_qp);
1125 
1126  spdk_thread_exec_msg(pci_dev_admin->doca_transport->admin_pg.thread,
1128  pci_dev_admin);
1129 }
1130 
1131 /*
1132  * Starts async flow of destroying admin QP
1133  *
1134  * This message is sent from PCI device admin thread to admin QP thread
1135  * Once admin QP is destroyed the admin QP thread will respond with nvmf_doca_destroy_admin_qp_done()
1136  */
1137 static void nvmf_doca_destroy_admin_qp(void *cb_arg)
1138 {
1139  struct nvmf_doca_pci_dev_admin *pci_dev_admin = cb_arg;
1140  struct nvmf_doca_admin_qp *admin_qp = pci_dev_admin->admin_qp;
1141 
1142  /* In case some IO SQs exist, then send message to all poll groups to delete their IO SQs */
1143  if (!TAILQ_EMPTY(&admin_qp->io_sqs)) {
1145  return;
1146  }
1147 
1148  nvmf_doca_destroy_admin_qp_continue(pci_dev_admin);
1149 }
1150 
1154  uint64_t admin_cq_address;
1155  uint64_t admin_sq_address;
1156  uint16_t admin_cq_size;
1157  uint16_t admin_sq_size;
1159 };
1160 
1161 /*
1162  * Message to indicate that create of the admin QP has finished
1163  *
1164  * This message is used as a response to the nvmf_doca_create_admin_qp() message
1165  * This message is sent to PCI device admin poll group once admin QP has been destroyed
1166  */
1167 static void nvmf_doca_create_admin_qp_done(void *cb_arg)
1168 {
1169  doca_error_t ret;
1170  struct nvmf_doca_create_admin_qp_ctx *ctx = cb_arg;
1171  struct nvmf_doca_pci_dev_admin *pci_dev_admin = ctx->pci_dev_admin;
1172  struct nvmf_doca_admin_qp *admin_qp = ctx->admin_qp_out;
1173 
1174  free(ctx);
1175 
1176  pci_dev_admin->state = NVMF_DOCA_LISTENER_INITIALIZED;
1177  char ready = 0x01;
1178  ret = doca_devemu_pci_dev_modify_bar_stateful_region_values(pci_dev_admin->pci_dev, 0, 28, &ready, 1);
1179  if (ret != DOCA_SUCCESS) {
1180  DOCA_LOG_ERR("Failed to modify stateful region values %s", doca_error_get_name(ret));
1181  nvmf_doca_on_initialization_error(pci_dev_admin);
1182  return;
1183  }
1184 
1185  pci_dev_admin->admin_qp = admin_qp;
1186 }
1187 
1188 /*
1189  * Starts async flow of creating admin QP
1190  *
1191  * This message is sent from PCI device admin thread to admin QP thread
1192  * Once admin QP is created the admin QP thread will respond with nvmf_doca_create_admin_qp_done()
1193  */
1194 static void nvmf_doca_create_admin_qp(void *cb_arg)
1195 {
1196  struct nvmf_doca_create_admin_qp_ctx *ctx = cb_arg;
1197  struct nvmf_doca_pci_dev_admin *pci_dev_admin = ctx->pci_dev_admin;
1198  struct nvmf_doca_poll_group *doca_poll_group = ctx->doca_poll_group;
1199  struct spdk_thread *admin_thread = pci_dev_admin->doca_transport->admin_pg.thread;
1200 
1201  struct nvmf_doca_admin_qp *admin_qp = calloc(1, sizeof(*admin_qp));
1202  if (admin_qp == NULL) {
1203  DOCA_LOG_ERR("Failed to create admin QP: Out of memory");
1204  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
1205  return;
1206  }
1207  TAILQ_INIT(&admin_qp->io_cqs);
1208  TAILQ_INIT(&admin_qp->io_sqs);
1209 
1210  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg;
1211  doca_error_t ret = nvmf_doca_create_pci_dev_poll_group(pci_dev_admin, admin_qp, doca_poll_group, &pci_dev_pg);
1212  if (ret != DOCA_SUCCESS) {
1213  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
1214  return;
1215  }
1216  TAILQ_INSERT_TAIL(&doca_poll_group->pci_dev_pg_list, pci_dev_pg, link);
1217 
1218  struct nvmf_doca_io_create_attr io_attr = {
1219  .pe = doca_poll_group->admin_qp_pe,
1220  .dev = pci_dev_admin->emulation_manager->emulation_manager,
1221  .nvme_dev = pci_dev_admin->pci_dev,
1222  .dpa = pci_dev_admin->emulation_manager->dpa,
1223  .cq_id = NVMF_ADMIN_QUEUE_ID,
1224  .cq_depth = ctx->admin_cq_size,
1225  .host_cq_mmap = pci_dev_pg->host_mmap,
1226  .host_cq_address = ctx->admin_cq_address,
1227  .enable_msix = true,
1228  .msix_idx = 0,
1229  .max_num_sq = 1,
1230  .post_cqe_cb = nvmf_doca_on_post_cqe_complete,
1231  .fetch_sqe_cb = nvmf_doca_on_fetch_sqe_complete,
1232  .copy_data_cb = nvmf_doca_on_copy_data_complete,
1233  .stop_sq_cb = nvmf_doca_on_admin_sq_stop,
1234  .stop_io_cb = nvmf_doca_on_admin_cq_stop,
1235  };
1236 
1237  struct nvmf_doca_io *emulated_cq = calloc(1, sizeof(*emulated_cq));
1238  if (emulated_cq == NULL) {
1239  DOCA_LOG_ERR("Failed to create io: Failed to allocate IO struct");
1240  TAILQ_REMOVE(&doca_poll_group->pci_dev_pg_list, pci_dev_pg, link);
1242  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
1243  return;
1244  }
1245  ret = nvmf_doca_io_create(&io_attr, emulated_cq);
1246  if (ret != DOCA_SUCCESS) {
1247  DOCA_LOG_ERR("Failed to create io: %s", doca_error_get_name(ret));
1248  free(emulated_cq);
1249  TAILQ_REMOVE(&doca_poll_group->pci_dev_pg_list, pci_dev_pg, link);
1251  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
1252  return;
1253  }
1254  emulated_cq->poll_group = pci_dev_pg;
1255  emulated_cq->pci_dev_admin = pci_dev_admin;
1256  admin_qp->admin_cq = emulated_cq;
1257 
1258  struct nvmf_doca_sq *sq = calloc(1, sizeof(*sq));
1259  if (sq == NULL) {
1260  DOCA_LOG_ERR("Failed to create io: Failed to allocate SQ struct");
1261  nvmf_doca_io_destroy(emulated_cq);
1262  free(emulated_cq);
1263  TAILQ_REMOVE(&doca_poll_group->pci_dev_pg_list, pci_dev_pg, link);
1265  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
1266  return;
1267  }
1268 
1269  struct nvmf_doca_io_add_sq_attr sq_attr = {
1270  .pe = doca_poll_group->admin_qp_pe,
1271  .dev = pci_dev_admin->emulation_manager->emulation_manager,
1272  .nvme_dev = pci_dev_admin->pci_dev,
1273  .sq_depth = ctx->admin_sq_size,
1274  .host_sq_mmap = pci_dev_pg->host_mmap,
1275  .host_sq_address = ctx->admin_sq_address,
1276  .sq_id = NVMF_ADMIN_QUEUE_ID,
1277  .transport = doca_poll_group->pg.transport,
1278  };
1279 
1280  nvmf_doca_io_add_sq(emulated_cq, &sq_attr, sq);
1281  ctx->admin_qp_out = admin_qp;
1282  sq->ctx = ctx;
1283 }
1284 
1285 /*
1286  * Continues async flow of resetting the PCI device NVMf context
1287  *
1288  * @pci_dev_admin [in]: The PCI device admin context
1289  */
1291 {
1293 
1294  /* Indicates that admin QP is destroyed we can now finalize the reset */
1295  if (pci_dev_admin->state != NVMF_DOCA_LISTENER_UNINITIALIZED) {
1296  pci_dev_admin->state = NVMF_DOCA_LISTENER_UNINITIALIZED;
1297 
1298  struct nvmf_doca_nvme_registers *registers = pci_dev_admin->stateful_region_values;
1299  if (registers->cc.bits.shn == SPDK_NVME_SHN_NORMAL || registers->cc.bits.shn == SPDK_NVME_SHN_ABRUPT) {
1300  registers->csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1301  }
1302  if (registers->cc.bits.en == 0) {
1303  registers->csts.bits.rdy = 0;
1304  }
1306  0,
1307  offsetof(struct spdk_nvme_registers,
1308  csts),
1309  &registers->csts,
1310  1);
1311  if (result != DOCA_SUCCESS) {
1312  DOCA_LOG_ERR(
1313  "Failed to complete controller reset/shutdown: Failed to modify stateful region - %s",
1315  return;
1316  }
1317  }
1318 
1319  if (pci_dev_admin->is_flr) {
1321  pci_dev_admin->is_flr = false;
1322  }
1323 }
1324 
1325 /*
1326  * Starts async flow of resetting the PCI device NVMf context
1327  *
1328  * @pci_dev_admin [in]: The PCI device admin context
1329  */
1330 static void nvmf_doca_pci_dev_admin_reset(struct nvmf_doca_pci_dev_admin *pci_dev_admin)
1331 {
1332  if (pci_dev_admin->state != NVMF_DOCA_LISTENER_INITIALIZED &&
1333  pci_dev_admin->state != NVMF_DOCA_LISTENER_INITIALIZATION_ERROR) {
1334  return;
1335  }
1336  pci_dev_admin->state = NVMF_DOCA_LISTENER_RESETTING;
1337 
1338  /* In case admin QP exist send message to destroy it */
1339  if (pci_dev_admin->admin_qp != NULL) {
1340  spdk_thread_exec_msg(pci_dev_admin->admin_qp_pg->pg.group->thread,
1342  pci_dev_admin);
1343  return;
1344  }
1345 
1347 }
1348 
1349 /*
1350  * FLR event handler callback
1351  *
1352  * @pci_dev [in]: PCI device
1353  * @user_data [in]: Data user
1354  */
1355 static void flr_event_handler_cb(struct doca_devemu_pci_dev *pci_dev, union doca_data user_data)
1356 {
1357  DOCA_LOG_DBG("Entering function %s", __func__);
1358 
1359  (void)pci_dev;
1360 
1361  struct nvmf_doca_pci_dev_admin *pci_dev_admin = user_data.ptr;
1362 
1363  pci_dev_admin->is_flr = true;
1364 
1365  nvmf_doca_pci_dev_admin_reset(pci_dev_admin);
1366 }
1367 
1368 /*
1369  * Hotplugstate change handler
1370  *
1371  * @pci_dev [in]: PCI device
1372  * @user_data [in]: Data user
1373  */
1374 static void hotplug_state_change_handler_cb(struct doca_devemu_pci_dev *pci_dev, union doca_data user_data)
1375 {
1376  DOCA_LOG_DBG("Entering function %s", __func__);
1377 
1378  (void)user_data;
1379 
1380  enum doca_devemu_pci_hotplug_state hotplug_state;
1381  doca_error_t ret;
1382 
1383  DOCA_LOG_INFO("Emulated device's hotplug state has changed");
1384  ret = doca_devemu_pci_dev_get_hotplug_state(pci_dev, &hotplug_state);
1385  if (ret != DOCA_SUCCESS) {
1386  DOCA_LOG_ERR("Unable to get hotplug state: %s", doca_error_get_name(ret));
1387  return;
1388  }
1389  DOCA_LOG_INFO("Hotplug state changed to %s", hotplug_state_to_string(hotplug_state));
1390 }
1391 
1392 /*
1393  * Callback invoked once admin SQ has been stopped
1394  *
1395  * @sq [in]: The NVMf DOCA SQ that was stopped
1396  */
1398 {
1399  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg = sq->io->poll_group;
1400  struct nvmf_doca_pci_dev_admin *pci_dev_admin = pci_dev_pg->pci_dev_admin;
1401  struct nvmf_doca_admin_qp *admin_qp = pci_dev_pg->admin_qp;
1402 
1403  nvmf_doca_io_rm_sq(sq);
1404  free(sq);
1405  admin_qp->admin_sq = NULL;
1406 
1407  nvmf_doca_destroy_admin_qp_continue(pci_dev_admin);
1408 }
1409 
1410 /*
1411  * Handles errors that occur during the initialization flow
1412  *
1413  * @cb_arg [in]: The PCI device context
1414  */
1415 static void nvmf_doca_on_initialization_error(void *cb_arg)
1416 {
1417  struct nvmf_doca_pci_dev_admin *pci_dev_admin = cb_arg;
1418 
1420  nvmf_doca_pci_dev_admin_reset(pci_dev_admin);
1421 }
1422 
1423 /*
1424  * Handle events initiated by Host by writing to the controller registers
1425  *
1426  * @pci_dev [in]: The PCI device
1427  * @config [in]: The configuration of the stateful region describing location of the controller registers
1428  */
1429 static void handle_controller_register_events(struct doca_devemu_pci_dev *pci_dev,
1430  const struct bar_region_config *config)
1431 {
1432  DOCA_LOG_DBG("Entering function %s", __func__);
1433 
1434  union doca_data ctx_user_data;
1435  doca_error_t ret;
1436 
1437  ret = doca_ctx_get_user_data(doca_devemu_pci_dev_as_ctx(pci_dev), &ctx_user_data);
1438  if (ret != DOCA_SUCCESS) {
1439  DOCA_LOG_ERR("Failed to get the context user data: %s", doca_error_get_name(ret));
1440  return;
1441  }
1442 
1443  struct nvmf_doca_pci_dev_admin *pci_dev_admin = ctx_user_data.ptr;
1444 
1446  config->bar_id,
1447  config->start_address,
1448  pci_dev_admin->stateful_region_values,
1449  config->size);
1450  if (ret != DOCA_SUCCESS) {
1451  DOCA_LOG_ERR("Failed to query values of stateful region: %s", doca_error_get_name(ret));
1452  return;
1453  }
1454  if (pci_dev_admin->state == NVMF_DOCA_LISTENER_RESETTING ||
1455  pci_dev_admin->state == NVMF_DOCA_LISTENER_INITIALIZING)
1456  return;
1457 
1458  struct nvmf_doca_nvme_registers *registers =
1459  (struct nvmf_doca_nvme_registers *)pci_dev_admin->stateful_region_values;
1460 
1461  if (registers->cc.bits.en == 1 && pci_dev_admin->state == NVMF_DOCA_LISTENER_UNINITIALIZED) {
1462  DOCA_LOG_INFO("Creating controller");
1463 
1464  pci_dev_admin->state = NVMF_DOCA_LISTENER_INITIALIZING;
1465 
1466  struct nvmf_doca_transport *doca_transport = pci_dev_admin->doca_transport;
1467 
1468  struct nvmf_doca_create_admin_qp_ctx *ctx = calloc(1, sizeof(*ctx));
1469  if (ctx == NULL) {
1470  DOCA_LOG_ERR("Failed to create admin QP: Out of memory");
1472  return;
1473  }
1474 
1475  /* Choose any poll group to manage the admin QP */
1476  struct nvmf_doca_poll_group *doca_poll_group = choose_poll_group(doca_transport);
1477  *ctx = (struct nvmf_doca_create_admin_qp_ctx){
1478  .pci_dev_admin = pci_dev_admin,
1479  .doca_poll_group = doca_poll_group,
1480  .admin_cq_address = registers->acq,
1481  .admin_sq_address = registers->asq,
1482  .admin_cq_size = registers->aqa.bits.acqs + 1,
1483  .admin_sq_size = registers->aqa.bits.asqs + 1,
1484  };
1485  pci_dev_admin->admin_qp_pg = doca_poll_group;
1486 
1487  spdk_thread_exec_msg(doca_poll_group->pg.group->thread, nvmf_doca_create_admin_qp, ctx);
1488  return;
1489  }
1490 
1491  if (pci_dev_admin->state == NVMF_DOCA_LISTENER_INITIALIZED ||
1492  pci_dev_admin->state == NVMF_DOCA_LISTENER_INITIALIZATION_ERROR) {
1493  if (registers->cc.bits.shn == SPDK_NVME_SHN_NORMAL || registers->cc.bits.shn == SPDK_NVME_SHN_ABRUPT) {
1494  DOCA_LOG_INFO("Shut down controller");
1495  nvmf_doca_pci_dev_admin_reset(pci_dev_admin);
1496  return;
1497  }
1498 
1499  if (registers->cc.bits.en == 0) {
1500  DOCA_LOG_INFO("Resetting controller");
1501  nvmf_doca_pci_dev_admin_reset(pci_dev_admin);
1502  return;
1503  }
1504  }
1505 }
1506 
1507 /*
1508  * Stateful region write event handler
1509  *
1510  * @event [in]: stateful region write event
1511  * @user_data [in]: Data user
1512  */
1514  struct doca_devemu_pci_dev_event_bar_stateful_region_driver_write *event,
1515  union doca_data user_data)
1516 {
1517  DOCA_LOG_DBG("Entering function %s", __func__);
1518 
1519  struct doca_devemu_pci_dev *pci_dev =
1521  const struct bar_region_config *config = (const struct bar_region_config *)user_data.ptr;
1522 
1523  handle_controller_register_events(pci_dev, config);
1524 }
1525 
1526 /*
1527  * Register to the stateful region write event of the emulated device for all stateful regions of configured type
1528  *
1529  * @pci_dev [in]: The emulated device context
1530  * @pci_dev_admin [in]: NVMF device context
1531  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
1532  */
1533 static doca_error_t register_to_stateful_region_write_events(struct doca_devemu_pci_dev *pci_dev,
1534  struct nvmf_doca_pci_dev_admin *pci_dev_admin)
1535 {
1536  DOCA_LOG_DBG("Entering function %s", __func__);
1537 
1538  const struct bar_region_config *config;
1539  uint64_t region_idx;
1540  union doca_data user_data;
1541  doca_error_t ret;
1542  uint64_t max_region_size = 0;
1543 
1544  for (region_idx = 0; region_idx < PCI_TYPE_NUM_BAR_STATEFUL_REGIONS; region_idx++) {
1545  config = &stateful_configs[region_idx];
1546  user_data.ptr = (void *)config;
1548  pci_dev,
1550  config->bar_id,
1551  config->start_address,
1552  user_data);
1553 
1554  if (ret != DOCA_SUCCESS) {
1555  DOCA_LOG_ERR("Unable to register to emulated PCI device stateful region write event: %s",
1556  doca_error_get_name(ret));
1557  return ret;
1558  }
1559 
1560  max_region_size = max_region_size > config->size ? max_region_size : config->size;
1561  }
1562 
1563  user_data.ptr = (void *)pci_dev_admin;
1564  ret = doca_ctx_set_user_data(doca_devemu_pci_dev_as_ctx(pci_dev), user_data);
1565  if (ret != DOCA_SUCCESS) {
1566  DOCA_LOG_ERR("Unable to set context user data: %s", doca_error_get_name(ret));
1567  return ret;
1568  }
1569 
1570  /* Setup a buffer that can be used to query stateful region values once event is triggered */
1571  pci_dev_admin->stateful_region_values = calloc(1, max_region_size);
1572  if (pci_dev_admin->stateful_region_values == NULL) {
1573  DOCA_LOG_ERR("Unable to allocate buffer for storing stateful region values: out of memory");
1574  return DOCA_ERROR_NO_MEMORY;
1575  }
1576 
1577  return DOCA_SUCCESS;
1578 }
1579 
1580 /*
1581  * Registers handlers and starts context
1582  *
1583  * @doca_emulation_manager [in]: Emulation manager
1584  * @pci_dev_admin [in]: PCI device admin context
1585  * @return: DOCA_SUCCESS on success, and other error code on failure
1586  */
1588  struct nvmf_doca_pci_dev_admin *pci_dev_admin)
1589 {
1590  DOCA_LOG_DBG("Entering function %s", __func__);
1591 
1592  union doca_data user_data;
1593  doca_error_t ret;
1594  user_data.ptr = (void *)pci_dev_admin;
1595 
1597  doca_emulation_manager->dpa);
1598 
1599  if (ret != DOCA_SUCCESS) {
1600  DOCA_LOG_ERR("Failed to set PCI emulated device context datapath on DPA: %s", doca_error_get_name(ret));
1601  return ret;
1602  }
1603 
1604  ret = doca_ctx_set_user_data(doca_devemu_pci_dev_as_ctx(pci_dev_admin->pci_dev), user_data);
1605  if (ret != DOCA_SUCCESS) {
1606  DOCA_LOG_ERR("Failed to set user data: %s", doca_error_get_name(ret));
1607  return ret;
1608  }
1609 
1612  if (ret != DOCA_SUCCESS) {
1613  DOCA_LOG_ERR("Unable to set state change callback: %s", doca_error_get_name(ret));
1614  return ret;
1615  }
1616 
1619  user_data);
1620  if (ret != DOCA_SUCCESS) {
1621  DOCA_LOG_ERR("Unable to register to hotplug state change callback: %s", doca_error_get_name(ret));
1622  return ret;
1623  }
1624 
1625  ret = doca_devemu_pci_dev_event_flr_register(pci_dev_admin->pci_dev, flr_event_handler_cb, user_data);
1626  if (ret != DOCA_SUCCESS) {
1627  DOCA_LOG_ERR("Unable to register to FLR event: %s", doca_error_get_name(ret));
1628  return ret;
1629  }
1630 
1631  ret = register_to_stateful_region_write_events(pci_dev_admin->pci_dev, pci_dev_admin);
1632  if (ret != DOCA_SUCCESS) {
1633  DOCA_LOG_ERR("Unable to register to emulated PCI device stateful region write event: %s",
1634  doca_error_get_name(ret));
1635  return ret;
1636  }
1637 
1638  ret = doca_ctx_start(doca_devemu_pci_dev_as_ctx(pci_dev_admin->pci_dev));
1639  if (ret != DOCA_SUCCESS) {
1640  DOCA_LOG_ERR("Failed to start context: %s", doca_error_get_name(ret));
1641  return ret;
1642  }
1643  return DOCA_SUCCESS;
1644 }
1645 
1646 /*
1647  * Creates and starts mmap
1648  *
1649  * @pci_dev [in]: PCI device
1650  * @emulation_manager [in]: Emulation manager
1651  * @mmap_out [out]: The created mmap
1652  * @return: DOCA_SUCCESS on success, and an error code on failure
1653  */
1654 static doca_error_t nvmf_doca_create_host_mmap(struct doca_devemu_pci_dev *pci_dev,
1655  struct doca_dev *emulation_manager,
1656  struct doca_mmap **mmap_out)
1657 {
1658  DOCA_LOG_DBG("Entering function %s", __func__);
1659 
1660  struct doca_mmap *mmap;
1661  doca_error_t ret;
1662 
1663  ret = doca_devemu_pci_mmap_create(pci_dev, &mmap);
1664  if (ret != DOCA_SUCCESS) {
1665  DOCA_LOG_ERR("Failed to create mmap for pci device emulation: %s", doca_error_get_name(ret));
1666  return ret;
1667  }
1668 
1670  if (ret != DOCA_SUCCESS) {
1671  DOCA_LOG_ERR("Failed to set mmap max number of devices: %s", doca_error_get_name(ret));
1672  goto destroy_mmap;
1673  }
1674 
1675  ret = doca_mmap_add_dev(mmap, emulation_manager);
1676  if (ret != DOCA_SUCCESS) {
1677  DOCA_LOG_ERR("Failed to add device to mmap: %s", doca_error_get_name(ret));
1678  goto destroy_mmap;
1679  }
1680 
1682  if (ret != DOCA_SUCCESS) {
1683  DOCA_LOG_ERR("Failed to set memory permissions: %s", doca_error_get_name(ret));
1684  goto destroy_mmap;
1685  }
1686 
1687  ret = doca_mmap_set_memrange(mmap, 0, UINT64_MAX);
1688  if (ret != DOCA_SUCCESS) {
1689  DOCA_LOG_ERR("Failed to set memrange for mmap: %s", doca_error_get_name(ret));
1690  goto destroy_mmap;
1691  }
1692 
1693  ret = doca_mmap_start(mmap);
1694  if (ret != DOCA_SUCCESS) {
1695  DOCA_LOG_ERR("Failed to start mmap: %s", doca_error_get_name(ret));
1696  goto destroy_mmap;
1697  }
1698 
1699  *mmap_out = mmap;
1700 
1701  return DOCA_SUCCESS;
1702 
1703 destroy_mmap:
1705  return ret;
1706 }
1707 
1709 {
1710  doca_error_t ret;
1711 
1712  if (pci_dev_admin->pci_dev != NULL) {
1713  pci_dev_admin->is_destroy_flow = true;
1714  ret = doca_ctx_stop(doca_devemu_pci_dev_as_ctx(pci_dev_admin->pci_dev));
1715  if (ret != DOCA_SUCCESS && ret != DOCA_ERROR_BAD_STATE) {
1716  DOCA_LOG_ERR("Failed to stop DOCA Emulated Device context: %s", doca_error_get_name(ret));
1717  }
1718 
1719  ret = doca_devemu_pci_dev_destroy(pci_dev_admin->pci_dev);
1720  if (ret != DOCA_SUCCESS) {
1721  DOCA_LOG_ERR("Failed to destroy DOCA Emulated Device context: %s", doca_error_get_name(ret));
1722  }
1723  pci_dev_admin->pci_dev = NULL;
1724  }
1725 
1726  if (pci_dev_admin->stateful_region_values) {
1727  free(pci_dev_admin->stateful_region_values);
1728  pci_dev_admin->stateful_region_values = NULL;
1729  }
1730 
1731  if (pci_dev_admin->dev_rep != NULL) {
1732  ret = doca_dev_rep_close(pci_dev_admin->dev_rep);
1733  if (ret != DOCA_SUCCESS) {
1734  DOCA_LOG_ERR("Failed to destroy PCI device context: Failed to close representor - %s",
1735  doca_error_get_name(ret));
1736  }
1737  }
1738 
1739  free(pci_dev_admin);
1740 }
1741 
1742 static int nvmf_doca_pci_dev_admin_create(struct nvmf_doca_transport *doca_transport,
1743  const struct spdk_nvme_transport_id *trid,
1744  struct nvmf_doca_pci_dev_admin **pci_dev_admin_out)
1745 {
1746  DOCA_LOG_DBG("Entering function %s", __func__);
1747 
1748  struct nvmf_doca_emulation_manager *doca_emulation_manager;
1749  struct doca_dev_rep *dev_rep;
1750  doca_error_t ret;
1751  int err = 0;
1752 
1753  ret = find_emulation_manager_and_function_by_vuid(doca_transport,
1754  (char *)trid->traddr,
1755  &doca_emulation_manager,
1756  &dev_rep);
1757  if (ret != DOCA_SUCCESS) {
1758  DOCA_LOG_ERR("Could not find an emulation manager and a function with the given address: %s",
1759  doca_error_get_name(ret));
1760  return -ENXIO;
1761  }
1762 
1763  struct nvmf_doca_pci_dev_admin *pci_dev_admin = calloc(1, sizeof(*pci_dev_admin));
1764  if (pci_dev_admin == NULL) {
1765  DOCA_LOG_ERR("Failed to allocate PCI device context's memory: errno %d", err);
1767  return -ENOMEM;
1768  }
1769  pci_dev_admin->dev_rep = dev_rep;
1770  pci_dev_admin->subsystem = NULL;
1771  pci_dev_admin->doca_transport = doca_transport;
1772  pci_dev_admin->emulation_manager = doca_emulation_manager;
1773  pci_dev_admin->state = NVMF_DOCA_LISTENER_UNINITIALIZED;
1774  pci_dev_admin->ctlr_id = 0;
1775  memcpy(&pci_dev_admin->trid, trid, sizeof(pci_dev_admin->trid));
1776 
1777  /* Assign PCI device to admin poll group the poll group will be responsible for managing this device */
1778  struct nvmf_doca_admin_poll_group *admin_pg = &doca_transport->admin_pg;
1779  ret = doca_devemu_pci_dev_create(doca_emulation_manager->pci_type,
1780  dev_rep,
1781  admin_pg->pe,
1782  &pci_dev_admin->pci_dev);
1783  if (ret != DOCA_SUCCESS) {
1784  DOCA_LOG_ERR("Failed to create PCI device: %s", doca_error_get_name(ret));
1785  nvmf_doca_pci_dev_admin_destroy(pci_dev_admin);
1786  return -EINVAL;
1787  }
1788 
1789  ret = register_handlers_set_datapath_and_start(doca_emulation_manager, pci_dev_admin);
1790  if (ret != DOCA_SUCCESS) {
1791  DOCA_LOG_ERR("Faield to register handler and start context: %s", doca_error_get_name(ret));
1792  nvmf_doca_pci_dev_admin_destroy(pci_dev_admin);
1793  return -EINVAL;
1794  }
1795 
1796  *pci_dev_admin_out = pci_dev_admin;
1797 
1798  return EXIT_SUCCESS;
1799 }
1800 
1802  enum doca_devemu_pci_hotplug_state new_state,
1803  size_t timeout_in_micros)
1804 {
1805  static const size_t sleep_in_micros = 10;
1806  static const size_t sleep_in_nanos = sleep_in_micros * 1000;
1807 
1808  struct timespec timespec = {
1809  .tv_sec = 0,
1810  .tv_nsec = sleep_in_nanos,
1811  };
1812  doca_error_t ret;
1814 
1815  size_t elapsed_time_in_micros = 0;
1816  do {
1817  if (elapsed_time_in_micros >= timeout_in_micros) {
1818  DOCA_LOG_ERR("Failed to wait for hotplug state to change: Timed out");
1819  return DOCA_ERROR_TIME_OUT;
1820  }
1821  if (doca_pe_progress(pci_dev_admin->doca_transport->admin_pg.pe) == 0)
1822  nanosleep(&timespec, NULL);
1823  elapsed_time_in_micros += sleep_in_micros;
1825  if (ret != DOCA_SUCCESS) {
1826  DOCA_LOG_ERR("Failed to wait for hotplug state to change: Failed to get hotplug state %s",
1827  doca_error_get_name(ret));
1828  return ret;
1829  }
1830  } while (current_state != new_state);
1831 
1832  return DOCA_SUCCESS;
1833 }
1834 
1835 /*
1836  * Adds a listener to the DOCA transport at the given address
1837  *
1838  * Callback invoked by the NVMf target once user issues the add listener RPC
1839  * The callback will hotplug the emulated device towards the Host, and start listening on interactions from Host
1840  *
1841  * @transport [in]: The DOCA transport
1842  * @trid [in]: The transport ID containing the address to listen on, in this case the VUID of the emulated device
1843  * @listen_opts [in]: The listen options
1844  * @return: 0 on success and negative error code otherwise
1845  */
1846 static int nvmf_doca_listen(struct spdk_nvmf_transport *transport,
1847  const struct spdk_nvme_transport_id *trid,
1848  struct spdk_nvmf_listen_opts *listen_opts)
1849 {
1850  (void)listen_opts;
1851 
1852  doca_error_t ret;
1853  int err = 0;
1854 
1855  struct nvmf_doca_transport *doca_transport = SPDK_CONTAINEROF(transport, struct nvmf_doca_transport, transport);
1856  ret = check_for_duplicate(doca_transport, trid->traddr);
1857  if (ret != DOCA_SUCCESS) {
1858  DOCA_LOG_ERR("Emulated device already is listened to by this transport: %s", doca_error_get_name(ret));
1859  err = -EEXIST;
1860  goto exit;
1861  }
1862 
1863  struct nvmf_doca_pci_dev_admin *pci_dev_admin;
1864  err = nvmf_doca_pci_dev_admin_create(doca_transport, trid, &pci_dev_admin);
1865  if (err != 0) {
1866  goto exit;
1867  }
1868  TAILQ_INSERT_TAIL(&doca_transport->admin_pg.pci_dev_admins, pci_dev_admin, link);
1869 
1870  ret = doca_devemu_pci_dev_hotplug(pci_dev_admin->pci_dev);
1871  if (ret != DOCA_SUCCESS) {
1872  DOCA_LOG_ERR("Failed to hotplug pci device: %s", doca_error_get_name(ret));
1873  err = -EINVAL;
1874  if (ret == DOCA_ERROR_AGAIN) {
1875  err = -EAGAIN;
1876  }
1877  goto destroy_pci_dev_admin;
1878  }
1879  DOCA_LOG_INFO("Hotplug initiated waiting for host to notice new device");
1880 
1881  ret = devemu_hotplug_transition_wait(pci_dev_admin,
1884  if (ret != DOCA_SUCCESS) {
1885  DOCA_LOG_ERR("Failed to start listen: Could not transition device to POWER_ON state - %s",
1886  doca_error_get_name(ret));
1887  err = -EINVAL;
1888  goto unplug;
1889  }
1890  doca_transport->num_of_listeners++;
1891  return EXIT_SUCCESS;
1892 
1893 unplug:
1894  doca_devemu_pci_dev_hotunplug(pci_dev_admin->pci_dev);
1895 destroy_pci_dev_admin:
1896  TAILQ_REMOVE(&doca_transport->admin_pg.pci_dev_admins, pci_dev_admin, link);
1897  nvmf_doca_pci_dev_admin_destroy(pci_dev_admin);
1898 exit:
1899  return err;
1900 }
1901 
1902 /*
1903  * Removes a listener from the DOCA transport at the given address
1904  *
1905  * Callback invoked by the NVMf target once user issues the remove listener RPC
1906  * The callback will hotunplug the emulated device from the Host, preventing further interactions
1907  *
1908  * @transport [in]: The DOCA transport
1909  * @trid [in]: The transport ID containing the address to stop listen on, in this case the VUID of the emulated device
1910  */
1911 static void nvmf_doca_stop_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid)
1912 {
1913  DOCA_LOG_DBG("Entering function %s", __func__);
1914 
1915  struct nvmf_doca_transport *doca_transport = SPDK_CONTAINEROF(transport, struct nvmf_doca_transport, transport);
1916  doca_error_t ret;
1917 
1918  struct nvmf_doca_pci_dev_admin *pci_dev_admin =
1920  if (pci_dev_admin == NULL) {
1921  DOCA_LOG_ERR("Failed to stop listen: Could not find a PCI device (listener) with the requested VUID");
1922  return;
1923  }
1924 
1925  /* If the admin QP exists, executing a stop_listen may not function correctly due to the resources associated
1926  * with the admin QP that still need to be managed */
1927  if (pci_dev_admin->admin_qp != NULL) {
1928  DOCA_LOG_ERR("The QP and its resources must be freed before stopping the listeners %s", __func__);
1929  return;
1930  }
1931 
1932  ret = doca_devemu_pci_dev_hotunplug(pci_dev_admin->pci_dev);
1933  if (ret == DOCA_SUCCESS) {
1934  DOCA_LOG_INFO("Hotplug initiated waiting for host to notice new device");
1935  ret = devemu_hotplug_transition_wait(pci_dev_admin,
1938  if (ret != DOCA_SUCCESS) {
1939  DOCA_LOG_ERR("Failed to stop listen: Could not transition device to POWER_OFF state - %s",
1940  doca_error_get_name(ret));
1941  }
1942  } else {
1943  DOCA_LOG_ERR("Failed to hotunplug pci device: %s", doca_error_get_name(ret));
1944  }
1945 
1946  TAILQ_REMOVE(&doca_transport->admin_pg.pci_dev_admins, pci_dev_admin, link);
1947  nvmf_doca_pci_dev_admin_destroy(pci_dev_admin);
1948 }
1949 
1950 /*
1951  * Associates a listener with the given address with an NVMf subsystem
1952  *
1953  * Callback invoked by the NVMf target once user issues the add listener RPC
1954  *
1955  * @transport [in]: The DOCA transport
1956  * @subsystem [in]: The NVMf subsystem
1957  * @trid [in]: The transport ID containing the address of the listener, in this case the VUID of the emulated device
1958  * @return: 0 on success and negative error code otherwise
1959  */
1960 static int nvmf_doca_listen_associate(struct spdk_nvmf_transport *transport,
1961  const struct spdk_nvmf_subsystem *subsystem,
1962  const struct spdk_nvme_transport_id *trid)
1963 {
1964  DOCA_LOG_DBG("Entering function %s", __func__);
1965 
1966  struct nvmf_doca_transport *doca_transport = SPDK_CONTAINEROF(transport, struct nvmf_doca_transport, transport);
1967 
1968  struct nvmf_doca_pci_dev_admin *pci_dev_admin =
1970  if (pci_dev_admin == NULL) {
1971  return -ENXIO;
1972  }
1973 
1974  pci_dev_admin->subsystem = (struct spdk_nvmf_subsystem *)subsystem;
1975 
1976  return EXIT_SUCCESS;
1977 }
1978 
1979 /*
1980  * Creates a poll group for polling the DOCA transport
1981  *
1982  * Callback invoked by the NVMf target on each thread after the transport has been created
1983  *
1984  * @transport [in]: The DOCA transport
1985  * @group [in]: The NVMf target poll group
1986  * @return: The newly created DOCA transport poll group on success and NULL otherwise
1987  */
1988 static struct spdk_nvmf_transport_poll_group *nvmf_doca_poll_group_create(struct spdk_nvmf_transport *transport,
1989  struct spdk_nvmf_poll_group *group)
1990 {
1991  DOCA_LOG_DBG("Entering function %s", __func__);
1992 
1993  (void)group;
1994 
1995  doca_error_t ret;
1996  struct nvmf_doca_poll_group *doca_pg;
1997  struct nvmf_doca_transport *doca_transport = SPDK_CONTAINEROF(transport, struct nvmf_doca_transport, transport);
1998 
1999  doca_pg = (struct nvmf_doca_poll_group *)calloc(1, sizeof(struct nvmf_doca_poll_group));
2000  if (doca_pg == NULL) {
2001  DOCA_LOG_ERR("Failed to allocate memory for doca poll group");
2002  return NULL;
2003  }
2004 
2005  ret = doca_pe_create(&doca_pg->pe);
2006  if (ret != DOCA_SUCCESS) {
2007  DOCA_LOG_ERR("Failed to create progress engine %s", doca_error_get_name(ret));
2008  free(doca_pg);
2009  return NULL;
2010  }
2011 
2012  ret = doca_pe_create(&doca_pg->admin_qp_pe);
2013  if (ret != DOCA_SUCCESS) {
2014  DOCA_LOG_ERR("Failed to create progress engine %s", doca_error_get_name(ret));
2015  doca_pe_destroy(doca_pg->pe);
2016  free(doca_pg);
2017  return NULL;
2018  }
2019 
2020  doca_pg->admin_qp_poll_rate_limiter = 0;
2021 
2022  TAILQ_INIT(&doca_pg->pci_dev_pg_list);
2023 
2024  TAILQ_INSERT_TAIL(&doca_transport->poll_groups, doca_pg, link);
2025 
2026  return &doca_pg->pg;
2027 }
2028 
2029 /*
2030  * Destroy the DOCA transport poll group
2031  *
2032  * Callback invoked by the NVMf target before attempting to destroy the DOCA transport
2033  *
2034  * @group [in]: The poll group to destroy
2035  */
2036 static void nvmf_doca_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2037 {
2038  DOCA_LOG_DBG("Entering function %s", __func__);
2039 
2040  struct nvmf_doca_poll_group *doca_pg;
2041  struct nvmf_doca_transport *doca_transport;
2042 
2043  doca_pg = SPDK_CONTAINEROF(group, struct nvmf_doca_poll_group, pg);
2044  doca_transport = SPDK_CONTAINEROF(group->transport, struct nvmf_doca_transport, transport);
2045 
2046  doca_pe_destroy(doca_pg->admin_qp_pe);
2047  doca_pe_destroy(doca_pg->pe);
2048  TAILQ_REMOVE(&doca_transport->poll_groups, doca_pg, link);
2049  free(doca_pg);
2050 }
2051 
2052 /*
2053  * Picks the optimal poll group to add the QP to
2054  *
2055  * Callback invoked by the NVMf target after creation of qpair is complete but before calling nvmf_doca_poll_group_add
2056  *
2057  * @qpair [in]: The newly created NVMf QPair
2058  * @return: An existing DOCA transport poll group
2059  */
2060 static struct spdk_nvmf_transport_poll_group *nvmf_doca_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
2061 {
2062  DOCA_LOG_DBG("Entering function %s", __func__);
2063 
2064  struct nvmf_doca_sq *sq = SPDK_CONTAINEROF(qpair, struct nvmf_doca_sq, spdk_qp);
2065 
2066  /* Ensure that CQ along with SQs attached to it will run on same poll group */
2067  return &sq->io->poll_group->poll_group->pg;
2068 }
2069 
2070 /*
2071  * Set the controller status to ready.
2072  *
2073  * Callback invoked after nvmf_doca_set_property completes
2074  *
2075  * @request [in]: The request that triggered this callback
2076  * @cb_arg [in]: The call back argument which is the SQ
2077  */
2078 static void enable_nvmf_controller_cb(struct nvmf_doca_request *request, void *cb_arg)
2079 {
2080  (void)request;
2081 
2082  DOCA_LOG_DBG("Entering function %s", __func__);
2083 
2084  struct nvmf_doca_sq *sq = cb_arg;
2085  struct nvmf_doca_pci_dev_admin *pci_dev_admin = sq->io->poll_group->pci_dev_admin;
2086  struct spdk_thread *admin_thread = pci_dev_admin->doca_transport->admin_pg.thread;
2087 
2088  void *create_admin_qp_ctx = sq->ctx;
2089  sq->ctx = NULL;
2090  spdk_thread_exec_msg(admin_thread, nvmf_doca_create_admin_qp_done, create_admin_qp_ctx);
2091 }
2092 
2093 /*
2094  * Sets properties on the NVMF subsystem.
2095  *
2096  * Callback invoked from nvmf_doca_connect_spdk_qp_done
2097  *
2098  * @doca_sq [in]: The admin SQ that will execute the command
2099  */
2100 static void nvmf_doca_set_property(struct nvmf_doca_sq *doca_sq)
2101 {
2102  DOCA_LOG_DBG("Entering function %s", __func__);
2103 
2104  struct nvmf_doca_request *new_request;
2105  doca_error_t ret;
2106 
2107  struct nvmf_doca_pci_dev_admin *pci_dev_admin = doca_sq->io->poll_group->pci_dev_admin;
2108  struct spdk_thread *admin_thread = pci_dev_admin->doca_transport->admin_pg.thread;
2109 
2110  new_request = nvmf_doca_request_get(doca_sq);
2111  if (new_request == NULL) {
2112  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
2113  return;
2114  }
2115 
2116  struct spdk_nvme_registers *registers = pci_dev_admin->stateful_region_values;
2117 
2118  new_request->request.cmd->prop_set_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2119  new_request->request.cmd->prop_set_cmd.cid = 0;
2120  new_request->request.cmd->prop_set_cmd.attrib.size = 0;
2121  new_request->request.cmd->prop_set_cmd.ofst = offsetof(struct spdk_nvme_registers, cc);
2122  new_request->request.cmd->prop_set_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET;
2123  new_request->request.length = 4;
2124 
2126  0,
2127  offsetof(struct spdk_nvme_registers, cc),
2128  &registers->cc,
2129  4);
2130  if (ret != DOCA_SUCCESS) {
2131  DOCA_LOG_ERR("Failed to query stateful region values %s", doca_error_get_name(ret));
2132  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
2133  return;
2134  }
2135 
2136  int temp_iovcnt = (int)new_request->request.iovcnt;
2137 
2138  new_request->request.cmd->prop_set_cmd.value.u32.low = registers->cc.raw;
2139  spdk_iov_one(new_request->request.iov, &temp_iovcnt, &registers->cc, new_request->request.length);
2140 
2141  new_request->doca_cb = enable_nvmf_controller_cb;
2142  new_request->cb_arg = doca_sq;
2143 
2144  spdk_nvmf_request_exec_fabrics(&new_request->request);
2145 }
2146 
2152 };
2153 
2154 /*
2155  * Method to be called once async 'nvmf_doca_poll_group_create_io_sq()' completes
2156  *
2157  * @args [in]: The context of the async procedure
2158  */
2159 static void nvmf_doca_poll_group_create_io_sq_done(void *args);
2160 
2161 /*
2162  * Method to be called once async 'nvmf_doca_connect_spdk_qp()' completes
2163  *
2164  * @request [in]: The connect request that completed
2165  * @cb_arg [in]: The argument passed along this callback
2166  */
2167 static void nvmf_doca_connect_spdk_qp_done(struct nvmf_doca_request *request, void *cb_arg)
2168 {
2169  struct nvmf_doca_sq *sq = cb_arg;
2170  struct nvmf_doca_request *doca_request;
2171 
2173 
2174  if (sq->sq_id == NVMF_ADMIN_QUEUE_ID) {
2176  return;
2177  }
2178 
2180  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg = ctx->pci_dev_pg;
2181 
2182  doca_request = ctx->request;
2183  doca_request->request.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
2184  doca_request->request.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2185 
2186  sq->ctx = NULL;
2187 
2188  struct spdk_thread *admin_qp_thread = pci_dev_pg->pci_dev_admin->admin_qp_pg->pg.group->thread;
2189  spdk_thread_exec_msg(admin_qp_thread, nvmf_doca_poll_group_create_io_sq_done, ctx);
2190 }
2191 
2192 /*
2193  * Connect an SPDK QP of an SQ
2194  *
2195  * This method is async once complete nvmf_doca_connect_spdk_qp_done will be called
2196  *
2197  * @sq [in]: The SQ containing the SPDK QP to connect
2198  * @return: 0 on success and negative error code otherwise
2199  */
2201 {
2202  struct nvmf_doca_request *doca_request;
2203  struct spdk_nvmf_fabric_connect_data *data;
2204  struct nvmf_doca_pci_dev_admin *pci_dev_admin = sq->io->poll_group->pci_dev_admin;
2205  struct spdk_thread *admin_thread = pci_dev_admin->doca_transport->admin_pg.thread;
2206 
2207  doca_request = nvmf_doca_request_get(sq);
2208  if (doca_request == NULL) {
2209  if (sq->sq_id == NVMF_ADMIN_QUEUE_ID) {
2210  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
2211  }
2212  return -ENOMEM;
2213  }
2214 
2215  doca_request->request.cmd->connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
2216  doca_request->request.cmd->connect_cmd.cid = 0;
2217  doca_request->request.cmd->connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
2218  doca_request->request.cmd->connect_cmd.recfmt = 0;
2219  doca_request->request.cmd->connect_cmd.sqsize = sq->queue.num_elements - 1;
2220  doca_request->request.cmd->connect_cmd.qid = sq->sq_id;
2221  doca_request->request.length = sizeof(struct spdk_nvmf_fabric_connect_data);
2222 
2223  data = calloc(1, doca_request->request.length);
2224  if (data == NULL) {
2225  if (sq->sq_id == NVMF_ADMIN_QUEUE_ID) {
2226  spdk_thread_exec_msg(admin_thread, nvmf_doca_on_initialization_error, pci_dev_admin);
2227  }
2228  return -ENOMEM;
2229  }
2230  doca_request->data_from_alloc = true;
2231 
2232  if (pci_dev_admin->ctlr_id == 0) {
2233  data->cntlid = pci_dev_admin->doca_transport->num_of_listeners;
2234  pci_dev_admin->ctlr_id = pci_dev_admin->doca_transport->num_of_listeners;
2235  } else {
2236  data->cntlid = pci_dev_admin->ctlr_id;
2237  }
2238 
2239  snprintf((char *)data->subnqn,
2240  sizeof(data->subnqn),
2241  "%s",
2242  spdk_nvmf_subsystem_get_nqn(pci_dev_admin->subsystem));
2243 
2244  doca_request->doca_cb = nvmf_doca_connect_spdk_qp_done;
2245  doca_request->cb_arg = sq;
2246 
2247  doca_request->request.data = data;
2248  spdk_nvmf_request_exec_fabrics(&doca_request->request);
2249 
2250  return 0;
2251 }
2252 
2253 /*
2254  * Assigns QP to a poll group such that it will be responsible for polling the QP
2255  *
2256  * Callback invoked by the NVMf target after creation of qpair
2257  * Once this callback is invoked then it means both the NVMf DOCA SQ and SPDK NVMf QPair have been created
2258  *
2259  * @group [in]: The DOCA transport poll group as picked by nvmf_doca_get_optimal_poll_group
2260  * @qpair [in]: The newly created NVMf QPair
2261  * @return: 0 on success and negative error code otherwise
2262  */
2263 static int nvmf_doca_poll_group_add(struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_qpair *qpair)
2264 {
2265  DOCA_LOG_DBG("Entering function %s", __func__);
2266 
2267  (void)group;
2268 
2269  struct nvmf_doca_sq *doca_sq;
2270 
2271  doca_sq = SPDK_CONTAINEROF(qpair, struct nvmf_doca_sq, spdk_qp);
2272  if (doca_sq->sq_id == NVMF_ADMIN_QUEUE_ID) {
2273  doca_sq->io->poll_group->admin_qp->admin_sq = doca_sq;
2274  }
2275 
2276  return nvmf_doca_connect_spdk_qp(doca_sq);
2277 }
2278 
2279 /*
2280  * Removes QP from poll group
2281  *
2282  * Callback invoked by the NVMf target on destroy of qpair
2283  *
2284  * @group [in]: The DOCA transport poll group the QP is assigned to
2285  * @qpair [in]: The NVMf QPair to be removed
2286  * @return: 0 on success and negative error code otherwise
2287  */
2288 static int nvmf_doca_poll_group_remove(struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_qpair *qpair)
2289 {
2290  DOCA_LOG_DBG("Entering function: %s", __func__);
2291 
2292  (void)group;
2293  (void)qpair;
2294 
2295  return 0;
2296 }
2297 
2298 /*
2299  * Polls the DOCA transport poll group
2300  *
2301  * Callback invoked by reactor thread frequently
2302  *
2303  * @group [in]: The DOCA transport poll group as picked by nvmf_doca_get_optimal_poll_group
2304  * @return: 0 on success and negative error code otherwise
2305  */
2306 static int nvmf_doca_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2307 {
2308  struct nvmf_doca_poll_group *doca_pg = SPDK_CONTAINEROF(group, struct nvmf_doca_poll_group, pg);
2309 
2310  doca_pe_progress(doca_pg->pe);
2311 
2312  /* Polling for the admin QP typically involves lighter workloads compared to I/O QPs, which are more active
2313  and handle a greater number of tasks. By reducing the polling rate for the admin QP to once for every
2314  ADMIN_QP_POLL_RATE_LIMIT (1000) I/O QP polls, performance can be enhanced.
2315  However, this method may accidentally slow down the device destruction process. Each inflight task needs
2316  separate access to the progress engine to be properly released. When there are numerous inflight tasks across
2317  multiple devices on the same thread, infrequent polling of the admin QP leads to a slower overall cleanup
2318  process. */
2319 
2321  doca_pe_progress(doca_pg->admin_qp_pe);
2322  }
2323  doca_pg->admin_qp_poll_rate_limiter++;
2324 
2325  return 0;
2326 }
2327 
2328 /*
2329  * Frees a completed NVMf request back to the pool
2330  *
2331  * Callback invoked by NVMf target once a request can be freed
2332  *
2333  * @req [in]: The NVMf request to free
2334  * @return: 0 on success and negative error code otherwise
2335  */
2336 static int nvmf_doca_req_free(struct spdk_nvmf_request *req)
2337 {
2338  struct nvmf_doca_request *request = SPDK_CONTAINEROF(req, struct nvmf_doca_request, request);
2339 
2341 
2342  return 0;
2343 }
2344 
2345 /*
2346  * Completes the NVMf request
2347  *
2348  * Callback invoked by NVMf target once a request has been completed but before freeing it
2349  *
2350  * @req [in]: The NVMf request to complete
2351  * @return: 0 on success and negative error code otherwise
2352  */
2353 static int nvmf_doca_req_complete(struct spdk_nvmf_request *req)
2354 {
2355  struct nvmf_doca_request *request = SPDK_CONTAINEROF(req, struct nvmf_doca_request, request);
2356 
2358 
2359  return 0;
2360 }
2361 
2362 /*
2363  * Destroys the NVMf QP
2364  *
2365  * Callback invoked by the NVMf target once the QP can be destroyed
2366  *
2367  * @qpair [in]: The NVMf QPair
2368  * @cb_fn [in]: Callback to be invoked once close finishes - can be NULL
2369  * @cb_arg [in]: Argument to be passed to the callback
2370  */
2371 static void nvmf_doca_close_qpair(struct spdk_nvmf_qpair *qpair, spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2372 {
2373  DOCA_LOG_DBG("Entering function: %s", __func__);
2374 
2375  (void)qpair;
2376 
2377  if (cb_fn) {
2378  cb_fn(cb_arg);
2379  }
2380 }
2381 
2382 /*
2383  * Get the listener address from the QP
2384  *
2385  * @qpair [in]: The NVMf QPair
2386  * @trid [out]: The transport ID containing the address related to the QP, in this case the VUID of the emulated device
2387  * @return: 0 on success and negative error code otherwise
2388  */
2389 static int nvmf_doca_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid)
2390 {
2391  DOCA_LOG_DBG("Entering function: %s", __func__);
2392 
2393  struct nvmf_doca_sq *sq = SPDK_CONTAINEROF(qpair, struct nvmf_doca_sq, spdk_qp);
2394 
2395  struct nvmf_doca_pci_dev_admin *pci_dev_admin = sq->io->poll_group->pci_dev_admin;
2396 
2397  memcpy(trid, &pci_dev_admin->trid, sizeof(*trid));
2398 
2399  return 0;
2400 }
2401 
2402 /*********************************************************************************************************************
2403  * Data Path
2404  *********************************************************************************************************************/
2405 
2406 #define IDENTIFY_CMD_DATA_BUFFER_SIZE 4096
2407 #define FEAT_CMD_LBA_RANGE_SIZE 4096
2408 #define FEAT_CMD_AUTONOMOUS_POWER_STATE_TRANSITION_SIZE 256
2409 #define FEAT_CMD_TIMESTAMP_SIZE 8
2410 #define FEAT_CMD_HOST_BEHAVIOR_SUPPORT_SIZE 512
2411 #define FEAT_CMD_HOST_IDENTIFIER_EXT_SIZE 16
2412 #define FEAT_CMD_HOST_IDENTIFIER_SIZE 8
2413 
2414 /*
2415  * Map the data described by PRP list entries used in NVME command in IOV structres
2416  *
2417  * @request [in]: The NVMf request which holds the ??
2418  * @arg [in]: Argument associated with the callback
2419  */
2420 static void copy_prp_list_data(struct nvmf_doca_request *request, void *arg)
2421 {
2422  DOCA_LOG_TRC("Entering function %s", __func__);
2423 
2424  (void)arg;
2425  void *data_out_address;
2426  uintptr_t *prp_list_addr;
2427  uint32_t idx = 1; /* idx 0 is already occupied */
2428  uint32_t remaining_length;
2429  uint32_t length = request->residual_length;
2430 
2431  doca_buf_get_head(request->prp_dpu_buf, (void **)&prp_list_addr);
2432 
2434  request->prp_dpu_buf = NULL;
2436  request->prp_host_buf = NULL;
2437 
2438  /* Iterate over the prp entries */
2439  while (length != 0) {
2440  remaining_length = spdk_min(length, NVME_PAGE_SIZE);
2441 
2442  request->host_buffer[idx] = nvmf_doca_sq_get_host_buffer(request->doca_sq, prp_list_addr[idx - 1]);
2443  request->dpu_buffer[idx] = nvmf_doca_sq_get_dpu_buffer(request->doca_sq);
2444  doca_buf_get_head(request->dpu_buffer[idx], &data_out_address);
2445 
2446  request->request.iov[idx].iov_base = data_out_address;
2447  request->request.iov[idx].iov_len = remaining_length;
2448  request->request.iovcnt++;
2449 
2450  length -= remaining_length;
2451  idx++;
2452  }
2453  request->num_of_buffers = request->request.iovcnt;
2454 
2455  if (request->request.cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
2457  } else {
2459  }
2460 }
2461 
2462 /*
2463  * This method is responsible for mapping the data described by PRP entries used in NVME command in IOV structres.
2464  *
2465  * @request [in]: The NVMf request
2466  */
2467 static void nvme_cmd_map_prps(struct nvmf_doca_request *request)
2468 {
2469  DOCA_LOG_TRC("Entering function %s", __func__);
2470 
2471  uint64_t prp1, prp2;
2472  uint32_t remaining_length, number_of_pages;
2473  uint64_t *prp_list;
2474  uint32_t length = request->request.length;
2475 
2476  prp1 = request->request.cmd->nvme_cmd.dptr.prp.prp1;
2477  prp2 = request->request.cmd->nvme_cmd.dptr.prp.prp2;
2478 
2479  /* PRP1 may start with unaligned page address */
2480  remaining_length = NVME_PAGE_SIZE - (prp1 % NVME_PAGE_SIZE);
2481  remaining_length = spdk_min(length, remaining_length);
2482 
2483  void *data_out_address;
2484  request->host_buffer[0] = nvmf_doca_sq_get_host_buffer(request->doca_sq, prp1);
2485  request->dpu_buffer[0] = nvmf_doca_sq_get_dpu_buffer(request->doca_sq);
2486  doca_buf_get_head(request->dpu_buffer[0], &data_out_address);
2487 
2488  request->request.iov[0].iov_base = data_out_address;
2489  request->request.iov[0].iov_len = remaining_length;
2490  request->request.iovcnt++;
2491 
2492  length -= remaining_length;
2493 
2494  if (length == 0) {
2495  /* There is only one prp entry */
2496  request->num_of_buffers = request->request.iovcnt;
2497 
2498  if (request->request.cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
2500  } else {
2502  }
2503 
2504  } else if (length <= NVME_PAGE_SIZE) {
2505  /* Data crosses exactly one memory page boundray, there are two PRP entries */
2506  request->host_buffer[1] = nvmf_doca_sq_get_host_buffer(request->doca_sq, prp2);
2507  request->dpu_buffer[1] = nvmf_doca_sq_get_dpu_buffer(request->doca_sq);
2508  doca_buf_get_head(request->dpu_buffer[1], &data_out_address);
2509 
2510  request->request.iov[1].iov_base = data_out_address;
2511  request->request.iov[1].iov_len = length;
2512  request->request.iovcnt++;
2513  request->num_of_buffers = request->request.iovcnt;
2514 
2515  if (request->request.cmd->nvme_cmd.opc == SPDK_NVME_OPC_WRITE) {
2517  } else {
2519  }
2520  } else {
2521  /* PRP list used and prp2 holds a pointer to it*/
2522  number_of_pages = SPDK_CEIL_DIV(length, NVME_PAGE_SIZE);
2523 
2524  request->prp_host_buf = nvmf_doca_sq_get_host_buffer(request->doca_sq, prp2);
2525  request->prp_dpu_buf = nvmf_doca_sq_get_dpu_buffer(request->doca_sq);
2526 
2527  union doca_data user_data;
2528  user_data.ptr = request;
2529  request->residual_length = length;
2530  request->doca_cb = copy_prp_list_data;
2531  request->num_of_buffers = 1;
2532 
2534  request->prp_dpu_buf,
2535  request->prp_host_buf,
2536  number_of_pages * sizeof(*prp_list),
2537  user_data);
2538  }
2539 }
2540 
2541 /*
2542  * Initialize Host and DPU data buffers for data transfer
2543  *
2544  * @request [in]: The NVMf request
2545  */
2546 static void init_dpu_host_buffers(struct nvmf_doca_request *request)
2547 {
2548  DOCA_LOG_TRC("Entering function %s", __func__);
2549 
2550  if (request->doca_sq->sq_id != NVMF_ADMIN_QUEUE_ID) {
2551  if (request->request.cmd->nvme_cmd.psdt == SPDK_NVME_PSDT_PRP) {
2552  return nvme_cmd_map_prps(request);
2553  }
2554  }
2555 
2556  void *data_out_address;
2557  uintptr_t host_data_out_io_address = request->request.cmd->nvme_cmd.dptr.prp.prp1;
2558  request->num_of_buffers = 1;
2559  request->host_buffer[0] = nvmf_doca_sq_get_host_buffer(request->doca_sq, host_data_out_io_address);
2560  request->dpu_buffer[0] = nvmf_doca_sq_get_dpu_buffer(request->doca_sq);
2561  doca_buf_get_head(request->dpu_buffer[0], &data_out_address);
2562  spdk_iov_one(request->request.iov, (int *)&request->request.iovcnt, data_out_address, request->request.length);
2563  request->request.data = data_out_address;
2564 }
2565 
2566 /*
2567  * Post CQE based on the NVMf response
2568  *
2569  * @request [in]: The NVMf request which holds the response
2570  * @arg [in]: Argument associated with the callback
2571  */
2572 static void post_cqe_from_response(struct nvmf_doca_request *request, void *arg)
2573 {
2574  (void)arg;
2575 
2576  union doca_data user_data;
2577  user_data.ptr = request;
2578 
2579  // Update SQ head
2580  request->request.rsp->nvme_cpl.sqhd = request->sqe_idx;
2581 
2582  nvmf_doca_io_post_cqe(request->doca_sq->io,
2583  (const struct nvmf_doca_cqe *)&request->request.rsp->nvme_cpl,
2584  user_data);
2585 }
2586 
2587 /*
2588  * Post CQE based on the NVMf response
2589  *
2590  * @request [in]: The NVMf request which holds the response
2591  */
2593 {
2594  request->request.rsp->nvme_cpl.cid = request->request.cmd->nvme_cmd.cid;
2595  request->request.rsp->nvme_cpl.status.sc = 1;
2596 
2597  post_cqe_from_response(request, request);
2598 }
2599 
2600 /*
2601  * Begin async operation of copying data from DPU to Host
2602  *
2603  * @request [in]: The NVMf request
2604  * @arg [in]: Argument associated with the callback
2605  */
2606 static void copy_dpu_data_to_host(struct nvmf_doca_request *request, void *arg)
2607 {
2608  DOCA_LOG_TRC("Entering function %s", __func__);
2609 
2610  (void)arg;
2611  union doca_data user_data;
2612  user_data.ptr = request;
2613  request->doca_cb = post_cqe_from_response;
2614 
2615  if (request->request.cmd->nvme_cmd.opc == SPDK_NVME_OPC_IDENTIFY) {
2616  struct spdk_nvme_ctrlr_data *cdata = (struct spdk_nvme_ctrlr_data *)request->request.data;
2617 
2618  /* Disable SGL */
2619  cdata->sgls.supported = SPDK_NVME_SGLS_NOT_SUPPORTED;
2620  }
2621 
2623  request->host_buffer[0],
2624  request->dpu_buffer[0],
2625  request->request.length,
2626  user_data);
2627 }
2628 
2629 /*
2630  * Begin async operation of copying data from DPU to Host for NVM commands
2631  *
2632  * @request [in]: The NVMf request
2633  * @arg [in]: Argument associated with the callback
2634  */
2635 static void copy_nvme_dpu_data_to_host(struct nvmf_doca_request *request, void *arg)
2636 {
2637  DOCA_LOG_TRC("Entering function %s", __func__);
2638 
2639  (void)arg;
2640  union doca_data user_data;
2641  user_data.ptr = request;
2642  request->doca_cb = post_cqe_from_response;
2643 
2644  uint32_t idx = request->num_of_buffers;
2645 
2646  for (idx = 0; idx < request->num_of_buffers; idx++) {
2648  request->host_buffer[idx],
2649  request->dpu_buffer[idx],
2650  request->request.iov[idx].iov_len,
2651  user_data);
2652  }
2653 }
2654 
2655 /*
2656  * Begin async operation of handling NVMe admin command that requires copying data back to Host
2657  *
2658  * @request [in]: The NVMf request
2659  */
2661 {
2662  DOCA_LOG_TRC("Entering function %s", __func__);
2663 
2664  // Prepare DPU data buffer
2665  init_dpu_host_buffers(request);
2666 
2667  request->doca_cb = copy_dpu_data_to_host;
2668 
2669  spdk_nvmf_request_exec(&request->request);
2670 }
2671 
2672 /*
2673  * Begin async operation of handling NVMe command that requires copying data back to Host for NVM commands
2674  *
2675  * @request [in]: The NVMf request
2676  */
2678 {
2679  DOCA_LOG_TRC("Entering function %s", __func__);
2680 
2681  // Prepare DPU data buffer
2682  init_dpu_host_buffers(request);
2683 }
2684 
2685 /*
2686  * Copy data back to host onve the buffers have been inilialized and ready
2687  *
2688  * @request [in]: The request of NVM read
2689  */
2691 {
2692  DOCA_LOG_TRC("Entering function %s", __func__);
2693 
2695 
2696  spdk_nvmf_request_exec(&request->request);
2697 }
2698 
2699 /*
2700  * Begin async NVMf request
2701  *
2702  * @request [in]: The NVMf request
2703  * @arg [in]: Argument associated with the callback
2704  */
2705 static void execute_spdk_request(struct nvmf_doca_request *request, void *arg)
2706 {
2707  (void)arg;
2708 
2709  request->doca_cb = post_cqe_from_response;
2710  spdk_nvmf_request_exec(&request->request);
2711 }
2712 
2713 /*
2714  * Begin async operation of handling NVMe admin command that requires copying data from Host
2715  *
2716  * @request [in]: The NVMf request
2717  */
2719 {
2720  DOCA_LOG_TRC("Entering function %s", __func__);
2721 
2722  union doca_data user_data;
2723 
2724  // Fetch data from Host to DPU
2725  init_dpu_host_buffers(request);
2726  user_data.ptr = request;
2727  request->doca_cb = execute_spdk_request;
2728 
2730  request->dpu_buffer[0],
2731  request->host_buffer[0],
2732  request->request.length,
2733  user_data);
2734 }
2735 
2736 /*
2737  * Begin async operation of handling NVMe command that requires copying data from Host
2738  *
2739  * @request [in]: The NVMf request
2740  */
2742 {
2743  DOCA_LOG_TRC("Entering function %s", __func__);
2744 
2745  // Fetch data from Host to DPU
2746  init_dpu_host_buffers(request);
2747 }
2748 
2749 /*
2750  * Copy data to dpu once the buffers have been inilialized and ready
2751  *
2752  * @request [in]: The request of NVM write
2753  */
2755 {
2756  DOCA_LOG_TRC("Entering function %s", __func__);
2757 
2758  union doca_data user_data;
2759 
2760  user_data.ptr = request;
2761  request->doca_cb = execute_spdk_request;
2762 
2763  uint32_t idx = request->num_of_buffers;
2764 
2765  for (idx = 0; idx < request->num_of_buffers; idx++) {
2767  request->dpu_buffer[idx],
2768  request->host_buffer[idx],
2769  request->request.iov[idx].iov_len,
2770  user_data);
2771  }
2772 }
2773 
2774 /*
2775  * Begin async operation of handling NVMe command that does not require copy of data between Host and DPU
2776  *
2777  * @request [in]: The NVMf request
2778  */
2779 static void begin_nvme_cmd_data_none(struct nvmf_doca_request *request)
2780 {
2781  request->doca_cb = post_cqe_from_response;
2782  spdk_nvmf_request_exec(&request->request);
2783 }
2784 
2790 };
2791 
2792 /*
2793  * Method to be called once async 'nvmf_doca_poll_group_create_io_cq()' completes
2794  *
2795  * @args [in]: The context of the async procedure
2796  */
2798 {
2800  struct nvmf_doca_request *request = ctx->request;
2801  struct nvmf_doca_pci_dev_admin *pci_dev_admin = ctx->pci_dev_admin;
2802  struct nvmf_doca_admin_qp *admin_qp = pci_dev_admin->admin_qp;
2803  struct nvmf_doca_io *io_cq = ctx->io_cq;
2804 
2805  /* If error then free io_cq */
2806  if (request->request.rsp->nvme_cpl.status.sc == 1) {
2807  free(io_cq);
2808  } else {
2809  TAILQ_INSERT_TAIL(&admin_qp->io_cqs, ctx->io_cq, pci_dev_admin_link);
2810  }
2811 
2812  free(ctx);
2813 
2814  request->request.rsp->nvme_cpl.cid = request->request.cmd->nvme_cmd.cid;
2815  post_cqe_from_response(request, request);
2816 }
2817 
2818 /*
2819  * Finds an IO SQ that matches a specific io_sq_id
2820  *
2821  * @admin_qp [in]: The PCI device admin QP context
2822  * @io_sq_id [in]: IO SQ ID
2823  * @return: A pointer to the maching SQ if found, or NULL if no match is found
2824  */
2825 static struct nvmf_doca_sq *admin_qp_find_io_sq_by_id(struct nvmf_doca_admin_qp *admin_qp, uint32_t io_sq_id)
2826 {
2827  struct nvmf_doca_sq *io_sq;
2828  TAILQ_FOREACH(io_sq, &admin_qp->io_sqs, pci_dev_admin_link)
2829  {
2830  if (io_sq->sq_id == io_sq_id) {
2831  return io_sq;
2832  }
2833  }
2834  return NULL;
2835 }
2836 
2837 /*
2838  * Callback invoked once IO SQ has been stopped
2839  *
2840  * @sq [in]: The NVMf DOCA SQ that was stopped
2841  */
2842 static void nvmf_doca_on_io_sq_stop(struct nvmf_doca_sq *sq)
2843 {
2844  struct spdk_thread *admin_qp_thread = sq->io->poll_group->pci_dev_admin->admin_qp_pg->pg.group->thread;
2845 
2846  nvmf_doca_io_rm_sq(sq);
2847 
2848  spdk_thread_exec_msg(admin_qp_thread, nvmf_doca_pci_dev_poll_group_stop_io_sq_done, sq);
2849 }
2850 
2851 /*
2852  * Finds a completion IO queue that matches a specific io_id
2853  *
2854  * @admin_qp [in]: The PCI device admin QP context
2855  * @io_cq_id [in]: IO CQ ID
2856  * @return: A pointer to the maching CQ if found, or NULL if no match is found
2857  */
2858 static struct nvmf_doca_io *admin_qp_find_io_cq_by_id(struct nvmf_doca_admin_qp *admin_qp, uint32_t io_cq_id)
2859 {
2860  struct nvmf_doca_io *io_cq;
2861  TAILQ_FOREACH(io_cq, &admin_qp->io_cqs, pci_dev_admin_link)
2862  {
2863  if (io_cq->cq.cq_id == io_cq_id) {
2864  return io_cq;
2865  }
2866  }
2867  return NULL;
2868 }
2869 
2870 /*
2871  * Callback invoked once IO CQ has been stopped
2872  *
2873  * @io [in]: The NVMf DOCA CQ that was stopped
2874  */
2875 static void nvmf_doca_on_io_cq_stop(struct nvmf_doca_io *io)
2876 {
2877  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg = io->poll_group;
2878  struct spdk_thread *admin_qp_thread = pci_dev_pg->pci_dev_admin->admin_qp_pg->pg.group->thread;
2879 
2880  TAILQ_REMOVE(&pci_dev_pg->io_cqs, io, pci_dev_pg_link);
2882 
2888  if (pci_dev_pg->admin_qp == NULL && TAILQ_EMPTY(&pci_dev_pg->io_cqs)) {
2889  DOCA_LOG_INFO("Destroying PCI dev poll group %p", pci_dev_pg);
2890  TAILQ_REMOVE(&pci_dev_pg->poll_group->pci_dev_pg_list, pci_dev_pg, link);
2892  io->poll_group = NULL;
2893  }
2894 
2895  spdk_thread_exec_msg(admin_qp_thread, nvmf_doca_pci_dev_poll_group_stop_io_cq_done, io);
2896 }
2897 
2898 /*
2899  * Async Method to create an IO CQ, once complete 'nvmf_doca_poll_group_create_io_cq_done()' will be called
2900  *
2901  * @args [in]: The context of the async procedure
2902  */
2903 static void nvmf_doca_poll_group_create_io_cq(void *args)
2904 {
2905  doca_error_t ret;
2907  struct nvmf_doca_request *request = ctx->request;
2908  struct nvmf_doca_pci_dev_admin *pci_dev_admin = ctx->pci_dev_admin;
2909  struct nvmf_doca_poll_group *poll_group = ctx->poll_group;
2910  struct spdk_nvme_cmd *cmd = &request->request.cmd->nvme_cmd;
2911  uint16_t qsize = cmd->cdw10_bits.create_io_q.qsize + 1;
2912 
2913  /* If first CQ to be created on this poll group then create a PCI device poll group */
2914  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg = get_pci_dev_poll_group(poll_group, pci_dev_admin->pci_dev);
2915  if (pci_dev_pg == NULL) {
2916  ret = nvmf_doca_create_pci_dev_poll_group(pci_dev_admin, NULL, poll_group, &pci_dev_pg);
2917  if (ret != DOCA_SUCCESS) {
2918  DOCA_LOG_ERR("Failed to create PCI device poll group: %s", doca_error_get_name(ret));
2919  request->request.rsp->nvme_cpl.status.sc = 1;
2920  goto respond_to_admin;
2921  }
2922  TAILQ_INSERT_TAIL(&poll_group->pci_dev_pg_list, pci_dev_pg, link);
2923  }
2924 
2925  struct nvmf_doca_io_create_attr io_attr = {
2926  .pe = pci_dev_pg->poll_group->pe,
2927  .dev = pci_dev_admin->emulation_manager->emulation_manager,
2928  .nvme_dev = pci_dev_admin->pci_dev,
2929  .dpa = pci_dev_admin->emulation_manager->dpa,
2930  .cq_id = cmd->cdw10_bits.create_io_q.qid,
2931  .cq_depth = qsize,
2932  .host_cq_mmap = pci_dev_pg->host_mmap,
2933  .host_cq_address = cmd->dptr.prp.prp1,
2934  .msix_idx = cmd->cdw11_bits.create_io_cq.iv,
2935  .enable_msix = cmd->cdw11_bits.create_io_cq.ien,
2936  .max_num_sq = 64,
2937  .post_cqe_cb = nvmf_doca_on_post_nvm_cqe_complete,
2938  .fetch_sqe_cb = nvmf_doca_on_fetch_nvm_sqe_complete,
2939  .copy_data_cb = nvmf_doca_on_copy_nvm_data_complete,
2940  .stop_sq_cb = nvmf_doca_on_io_sq_stop,
2941  .stop_io_cb = nvmf_doca_on_io_cq_stop,
2942  };
2943 
2944  struct nvmf_doca_io *io_cq = ctx->io_cq;
2945  ret = nvmf_doca_io_create(&io_attr, io_cq);
2946  if (ret != DOCA_SUCCESS) {
2947  DOCA_LOG_ERR("Failed to create io: %s", doca_error_get_name(ret));
2948  request->request.rsp->nvme_cpl.status.sc = 1;
2949  goto respond_to_admin;
2950  }
2951  io_cq->poll_group = pci_dev_pg;
2952  io_cq->pci_dev_admin = pci_dev_admin;
2953 
2954  TAILQ_INSERT_TAIL(&pci_dev_pg->io_cqs, io_cq, pci_dev_pg_link);
2955 
2956 respond_to_admin:
2957  struct spdk_thread *admin_qp_thread = pci_dev_admin->admin_qp_pg->pg.group->thread;
2958  spdk_thread_exec_msg(admin_qp_thread, nvmf_doca_poll_group_create_io_cq_done, args);
2959 }
2960 
2961 /*
2962  * Creates a completion I/O queue
2963  *
2964  * @sq [in]: The SQ that holds the command
2965  * @request [in]: the NVME command
2966  */
2967 static void handle_create_io_cq(struct nvmf_doca_sq *sq, struct nvmf_doca_request *request)
2968 {
2969  struct nvmf_doca_io *io_cq = calloc(1, sizeof(*io_cq));
2970  if (io_cq == NULL) {
2971  DOCA_LOG_ERR("Failed to create IO CQ: Out of memory");
2973  return;
2974  }
2975 
2976  struct nvmf_doca_pci_dev_admin *pci_dev_admin = sq->io->poll_group->pci_dev_admin;
2977  struct nvmf_doca_poll_group *poll_group = choose_poll_group(pci_dev_admin->doca_transport);
2978  struct nvmf_doca_poll_group_create_io_cq_ctx *create_io_cq_ctx = calloc(1, sizeof(*create_io_cq_ctx));
2979  if (create_io_cq_ctx == NULL) {
2980  DOCA_LOG_ERR("Failed to create IO CQ: Out of memory");
2981  free(io_cq);
2983  return;
2984  }
2985  *create_io_cq_ctx = (struct nvmf_doca_poll_group_create_io_cq_ctx){
2986  .request = request,
2987  .pci_dev_admin = pci_dev_admin,
2988  .poll_group = poll_group,
2989  .io_cq = io_cq,
2990  };
2991 
2992  struct spdk_thread *thread = poll_group->pg.group->thread;
2993  spdk_thread_exec_msg(thread, nvmf_doca_poll_group_create_io_cq, create_io_cq_ctx);
2994 }
2995 
2996 /*
2997  * Handle delete IO CQ admin command
2998  *
2999  * @sq [in]: The SQ that holds the command
3000  * @request [in]: The NVMe command
3001  */
3002 static void handle_delete_io_cq(struct nvmf_doca_sq *sq, struct nvmf_doca_request *request)
3003 {
3004  struct nvmf_doca_admin_qp *admin_qp = sq->io->poll_group->admin_qp;
3005 
3006  uint32_t io_cq_id = request->request.cmd->nvme_cmd.cdw10_bits.delete_io_q.qid;
3007  struct nvmf_doca_io *io_cq = admin_qp_find_io_cq_by_id(admin_qp, io_cq_id);
3008  if (io_cq == NULL) {
3009  DOCA_LOG_ERR("Failed to delete IO CQ: IO CQ with ID %u does not exist", io_cq_id);
3011  return;
3012  }
3013 
3014  struct nvmf_doca_poll_group_delete_io_cq_ctx *delete_io_cq_ctx = calloc(1, sizeof(*delete_io_cq_ctx));
3015  if (delete_io_cq_ctx == NULL) {
3016  DOCA_LOG_ERR("Failed to delete IO CQ: Out of memory");
3018  return;
3019  }
3020  *delete_io_cq_ctx = (struct nvmf_doca_poll_group_delete_io_cq_ctx){
3021  .request = request,
3022  };
3023  io_cq->ctx = delete_io_cq_ctx;
3024 
3025  struct spdk_thread *thread = io_cq->poll_group->poll_group->pg.group->thread;
3026  spdk_thread_exec_msg(thread, nvmf_doca_pci_dev_poll_group_stop_io_cq, io_cq);
3027 }
3028 
3029 /*
3030  * Method to be called once async 'nvmf_doca_poll_group_create_io_sq()' completes
3031  *
3032  * @args [in]: The context of the async procedure
3033  */
3035 {
3037  struct nvmf_doca_request *request = ctx->request;
3038  struct nvmf_doca_sq *io_sq = ctx->io_sq;
3039  struct nvmf_doca_admin_qp *admin_qp = io_sq->io->poll_group->pci_dev_admin->admin_qp;
3040 
3041  /* If error then free io_cq */
3042  if (request->request.rsp->nvme_cpl.status.sc == 1) {
3043  free(io_sq);
3044  } else {
3045  TAILQ_INSERT_TAIL(&admin_qp->io_sqs, io_sq, pci_dev_admin_link);
3046  }
3047 
3048  free(ctx);
3049 
3050  request->request.rsp->nvme_cpl.cid = request->request.cmd->nvme_cmd.cid;
3051  post_cqe_from_response(request, request);
3052 }
3053 
3054 /*
3055  * Async Method to create an IO SQ, once complete 'nvmf_doca_poll_group_create_io_sq_done()' will be called
3056  *
3057  * @args [in]: The context of the async procedure
3058  */
3059 static void nvmf_doca_poll_group_create_io_sq(void *args)
3060 {
3062  struct nvmf_doca_request *request = ctx->request;
3063  struct spdk_nvme_cmd *cmd = &request->request.cmd->nvme_cmd;
3064  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg = ctx->pci_dev_pg;
3065  struct nvmf_doca_pci_dev_admin *pci_dev_admin = pci_dev_pg->pci_dev_admin;
3066  uint32_t qsize = cmd->cdw10_bits.create_io_q.qsize + 1;
3067 
3068  struct nvmf_doca_io_add_sq_attr sq_attr = {
3069  .pe = pci_dev_pg->poll_group->pe,
3070  .dev = pci_dev_admin->emulation_manager->emulation_manager,
3071  .nvme_dev = pci_dev_admin->pci_dev,
3072  .sq_depth = qsize,
3073  .host_sq_mmap = pci_dev_pg->host_mmap,
3074  .host_sq_address = cmd->dptr.prp.prp1,
3075  .sq_id = cmd->cdw10_bits.create_io_q.qid,
3076  .transport = pci_dev_pg->poll_group->pg.transport,
3077  .ctx = args,
3078  };
3079  nvmf_doca_io_add_sq(ctx->io_cq, &sq_attr, ctx->io_sq);
3080 }
3081 
3082 /*
3083  * Creates a submision I/O queue
3084  *
3085  * @sq [in]: The SQ that holds the command
3086  * @request [in]: the NVME command
3087  */
3088 static void handle_create_io_sq(struct nvmf_doca_sq *sq, struct nvmf_doca_request *request)
3089 {
3090  struct nvmf_doca_admin_qp *admin_qp = sq->io->poll_group->admin_qp;
3091  uint32_t io_cq_id = request->request.cmd->nvme_cmd.cdw10_bits.create_io_q.qid;
3092  struct nvmf_doca_io *io_cq = admin_qp_find_io_cq_by_id(admin_qp, io_cq_id);
3093  if (io_cq == NULL) {
3094  DOCA_LOG_ERR("Failed to create IO SQ: IO CQ with ID %u not found", io_cq_id);
3096  return;
3097  }
3098 
3099  struct nvmf_doca_pci_dev_poll_group *pci_dev_pg = io_cq->poll_group;
3100  struct spdk_thread *thread = pci_dev_pg->poll_group->pg.group->thread;
3101  struct nvmf_doca_sq *io_sq = calloc(1, sizeof(*io_sq));
3102  if (io_sq == NULL) {
3103  DOCA_LOG_ERR("Failed to create IO SQ: Out of memory");
3105  return;
3106  }
3107 
3108  struct nvmf_doca_poll_group_create_io_sq_ctx *create_io_sq_ctx = calloc(1, sizeof(*create_io_sq_ctx));
3109  if (create_io_sq_ctx == NULL) {
3110  DOCA_LOG_ERR("Failed to create IO SQ: Out of memory");
3111  free(io_sq);
3113  return;
3114  }
3115  *create_io_sq_ctx = (struct nvmf_doca_poll_group_create_io_sq_ctx){
3116  .request = request,
3117  .pci_dev_pg = pci_dev_pg,
3118  .io_cq = io_cq,
3119  .io_sq = io_sq,
3120  };
3121 
3122  spdk_thread_exec_msg(thread, nvmf_doca_poll_group_create_io_sq, create_io_sq_ctx);
3123 }
3124 
3125 /*
3126  * Handle delete IO SQ admin command
3127  *
3128  * @sq [in]: The SQ that holds the command
3129  * @request [in]: The NVMe command
3130  */
3131 static void handle_delete_io_sq(struct nvmf_doca_sq *sq, struct nvmf_doca_request *request)
3132 {
3133  struct nvmf_doca_admin_qp *admin_qp = sq->io->poll_group->admin_qp;
3134  uint32_t io_sq_id = request->request.cmd->nvme_cmd.cdw10_bits.delete_io_q.qid;
3135 
3136  struct nvmf_doca_sq *io_sq = admin_qp_find_io_sq_by_id(admin_qp, io_sq_id);
3137  if (io_sq == NULL) {
3138  DOCA_LOG_ERR("Failed to delete IO SQ: IO SQ with ID %u does not exist", io_sq_id);
3140  return;
3141  }
3142 
3143  struct nvmf_doca_poll_group_delete_io_sq_ctx *delete_io_sq_ctx = calloc(1, sizeof(*delete_io_sq_ctx));
3144  if (delete_io_sq_ctx == NULL) {
3145  DOCA_LOG_ERR("Failed to delete IO SQ: Out of memory");
3147  return;
3148  }
3149  *delete_io_sq_ctx = (struct nvmf_doca_poll_group_delete_io_sq_ctx){
3150  .request = request,
3151  };
3152  io_sq->ctx = delete_io_sq_ctx;
3153 
3154  struct spdk_thread *thread = io_sq->io->poll_group->poll_group->pg.group->thread;
3155  spdk_thread_exec_msg(thread, nvmf_doca_pci_dev_poll_group_stop_io_sq, io_sq);
3156 }
3157 
3158 /*
3159  * Callback invoked once SQE has been fetched from Host SQ
3160  *
3161  * @sq [in]: The SQ used for the fetch operation
3162  * @sqe [in]: The SQE that was fetched from Host
3163  * @sqe_idx [in]: The SQE index
3164  */
3165 static void nvmf_doca_on_fetch_sqe_complete(struct nvmf_doca_sq *sq, struct nvmf_doca_sqe *sqe, uint16_t sqe_idx)
3166 {
3167  // Prepare request
3168  struct spdk_nvme_cmd *cmd = (struct spdk_nvme_cmd *)&sqe->data[0];
3169  struct nvmf_doca_request *request = nvmf_doca_request_get(sq);
3170  request->request.cmd->nvme_cmd = *cmd;
3171  request->sqe_idx = sqe_idx;
3172  request->doca_sq = sq;
3173 
3174  request->request.xfer = spdk_nvme_opc_get_data_transfer(request->request.cmd->nvme_cmd.opc);
3175 
3176  DOCA_LOG_DBG("Received admin command: opcode %u", request->request.cmd->nvme_cmd.opc);
3177  switch (request->request.cmd->nvme_cmd.opc) {
3178  case SPDK_NVME_OPC_CREATE_IO_CQ:
3179  handle_create_io_cq(sq, request);
3180  return;
3181  case SPDK_NVME_OPC_DELETE_IO_CQ:
3182  handle_delete_io_cq(sq, request);
3183  return;
3184  case SPDK_NVME_OPC_CREATE_IO_SQ:
3185  handle_create_io_sq(sq, request);
3186  return;
3187  case SPDK_NVME_OPC_DELETE_IO_SQ:
3188  handle_delete_io_sq(sq, request);
3189  return;
3190  case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST:
3191  request->request.length = 0;
3192  request->request.xfer = SPDK_NVME_DATA_NONE;
3193  break;
3194  case SPDK_NVME_OPC_IDENTIFY:
3195  request->request.length = IDENTIFY_CMD_DATA_BUFFER_SIZE;
3196  break;
3197  case SPDK_NVME_OPC_GET_LOG_PAGE:
3198  uint32_t num_dword =
3199  ((((uint32_t)cmd->cdw11_bits.get_log_page.numdu << 16) | cmd->cdw10_bits.get_log_page.numdl) +
3200  1);
3201  if (num_dword > UINT32_MAX / 4) {
3202  DOCA_LOG_ERR("NUMD exceeds maximum size: num of DW %u", num_dword);
3203  break;
3204  }
3205  request->request.length = num_dword * 4;
3206  break;
3207  case SPDK_NVME_OPC_GET_FEATURES:
3208  case SPDK_NVME_OPC_SET_FEATURES:
3209  uint8_t fid = cmd->cdw10_bits.set_features.fid;
3210  DOCA_LOG_DBG("Received feature: opcode %u", fid);
3211  switch (fid) {
3212  case SPDK_NVME_FEAT_LBA_RANGE_TYPE:
3213  request->request.length = FEAT_CMD_LBA_RANGE_SIZE;
3214  break;
3215  case SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION:
3216  request->request.length = FEAT_CMD_AUTONOMOUS_POWER_STATE_TRANSITION_SIZE;
3217  break;
3218  case SPDK_NVME_FEAT_TIMESTAMP:
3219  request->request.length = FEAT_CMD_TIMESTAMP_SIZE;
3220  break;
3221  case SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
3222  request->request.length = FEAT_CMD_HOST_BEHAVIOR_SUPPORT_SIZE;
3223  break;
3224  case SPDK_NVME_FEAT_HOST_IDENTIFIER:
3225  if (cmd->cdw11_bits.feat_host_identifier.bits.exhid) {
3226  request->request.length = FEAT_CMD_HOST_IDENTIFIER_EXT_SIZE;
3227  } else {
3228  request->request.length = FEAT_CMD_HOST_IDENTIFIER_SIZE;
3229  }
3230  break;
3231  case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
3232  case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
3233  case SPDK_NVME_FEAT_ARBITRATION:
3234  case SPDK_NVME_FEAT_POWER_MANAGEMENT:
3235  case SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD:
3236  case SPDK_NVME_FEAT_ERROR_RECOVERY:
3237  case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE:
3238  case SPDK_NVME_FEAT_INTERRUPT_COALESCING:
3239  case SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
3240  case SPDK_NVME_FEAT_WRITE_ATOMICITY:
3241  case SPDK_NVME_FEAT_HOST_MEM_BUFFER:
3242  case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
3243  case SPDK_NVME_FEAT_HOST_CONTROLLED_THERMAL_MANAGEMENT:
3244  case SPDK_NVME_FEAT_NON_OPERATIONAL_POWER_STATE_CONFIG:
3245  case SPDK_NVME_FEAT_READ_RECOVERY_LEVEL_CONFIG:
3246  case SPDK_NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG: /* this feature is supposed to have a data buffer*/
3247  case SPDK_NVME_FEAT_PREDICTABLE_LATENCY_MODE_WINDOW:
3248  case SPDK_NVME_FEAT_LBA_STATUS_INFORMATION_ATTRIBUTES:
3249  case SPDK_NVME_FEAT_SANITIZE_CONFIG:
3250  case SPDK_NVME_FEAT_ENDURANCE_GROUP_EVENT:
3251  case SPDK_NVME_FEAT_SOFTWARE_PROGRESS_MARKER:
3252  case SPDK_NVME_FEAT_HOST_RESERVE_MASK:
3253  case SPDK_NVME_FEAT_HOST_RESERVE_PERSIST:
3254  request->request.length = 0;
3255  request->request.xfer = SPDK_NVME_DATA_NONE;
3256  break;
3257  default:
3258  DOCA_LOG_ERR("Received unsupported feautre: opcode %u", fid);
3259  return;
3260  }
3261  break;
3262  default:
3263  DOCA_LOG_ERR("Received unsupported command: opcode %u", cmd->opc);
3264  return;
3265  }
3266 
3267  // Determine data direction
3268  switch (request->request.xfer) {
3269  case SPDK_NVME_DATA_NONE:
3274  begin_nvme_cmd_data_none(request);
3275  break;
3276  case SPDK_NVME_DATA_HOST_TO_CONTROLLER:
3283  break;
3284  case SPDK_NVME_DATA_CONTROLLER_TO_HOST:
3291  break;
3292  case SPDK_NVME_DATA_BIDIRECTIONAL:
3293  DOCA_LOG_ERR("Command with bidirectional data not support");
3294  return;
3295  default:
3296  DOCA_LOG_ERR("Received unidentified data direction");
3297  return;
3298  }
3299 }
3300 
3301 #define LBA_SIZE 512
3302 
3303 /*
3304  * Callback invoked once NVM IO SQE has been fetched from Host SQ
3305  *
3306  * @sq [in]: The SQ used for the fetch operation
3307  * @sqe [in]: The SQE that was fetched from Host
3308  * @sqe_idx [in]: The SQE index
3309  */
3310 static void nvmf_doca_on_fetch_nvm_sqe_complete(struct nvmf_doca_sq *sq, struct nvmf_doca_sqe *sqe, uint16_t sqe_idx)
3311 {
3312  // Prepare request
3313  struct spdk_nvme_cmd *cmd = (struct spdk_nvme_cmd *)&sqe->data[0];
3314  struct nvmf_doca_request *request = nvmf_doca_request_get(sq);
3315 
3316  request->request.cmd = (union nvmf_h2c_msg *)cmd;
3317  request->doca_sq = sq;
3318  request->sqe_idx = sqe_idx;
3319  request->request.xfer = spdk_nvme_opc_get_data_transfer(request->request.cmd->nvme_cmd.opc);
3320 
3321  DOCA_LOG_DBG("Received NVMe command: opcode %u", request->request.cmd->nvme_cmd.opc);
3322  switch (request->request.cmd->nvme_cmd.opc) {
3323  case SPDK_NVME_OPC_FLUSH:
3324  break;
3325  case SPDK_NVME_OPC_WRITE:
3326  case SPDK_NVME_OPC_READ:
3327  request->request.length = (request->request.cmd->nvme_cmd.cdw12_bits.copy.nr + 1) * LBA_SIZE;
3328  break;
3329  default:
3330  DOCA_LOG_ERR("Received unsupported NVM command: opcode %u", cmd->opc);
3332  return;
3333  }
3334 
3335  // Determine data direction
3336  switch (request->request.xfer) {
3337  case SPDK_NVME_DATA_NONE:
3342  begin_nvme_cmd_data_none(request);
3343  break;
3344  case SPDK_NVME_DATA_HOST_TO_CONTROLLER:
3351  break;
3352  case SPDK_NVME_DATA_CONTROLLER_TO_HOST:
3359  break;
3360  case SPDK_NVME_DATA_BIDIRECTIONAL:
3361  DOCA_LOG_ERR("Command with bidirectional data not support");
3362  return;
3363  default:
3364  DOCA_LOG_ERR("Received unidentified data direction");
3365  return;
3366  }
3367 }
3368 
3369 /*
3370  * Callback invoked once CQE has been posted to Host CQ
3371  *
3372  * @cq [in]: The CQ used for the post operation
3373  * @user_data [in]: Same user data previously provided in 'nvmf_doca_io_post_cqe()'
3374  */
3375 static void nvmf_doca_on_post_cqe_complete(struct nvmf_doca_cq *cq, union doca_data user_data)
3376 {
3377  (void)cq;
3378 
3379  struct nvmf_doca_request *request = user_data.ptr;
3380 
3381  nvmf_doca_req_free(&request->request);
3382 }
3383 
3384 /*
3385  * Callback invoked once CQE has been posted to Host IO CQ
3386  *
3387  * @cq [in]: The CQ used for the post operation
3388  * @user_data [in]: Same user data previously provided in 'nvmf_doca_io_post_cqe()'
3389  */
3390 static void nvmf_doca_on_post_nvm_cqe_complete(struct nvmf_doca_cq *cq, union doca_data user_data)
3391 {
3392  (void)cq;
3393 
3394  struct nvmf_doca_request *request = user_data.ptr;
3395 
3396  nvmf_doca_req_free(&request->request);
3397 }
3398 
3399 /*
3400  * Callback invoked once data copied to Host.
3401  *
3402  * The source and destination buffers must be freed at some point using doca_buf_dec_refcount()
3403  *
3404  * @sq [in]: The SQ used for the copy operation
3405  * @dst [in]: The buffer used as destination in the copy operation
3406  * @src [in]: The buffer used as source in the copy operation
3407  * @user_data [in]: Same user data previously provided in nvmf_doca_sq_copy_data()
3408  */
3410  struct doca_buf *dst,
3411  struct doca_buf *src,
3412  union doca_data user_data)
3413 {
3414  (void)sq;
3415  (void)dst;
3416  (void)src;
3417 
3418  struct nvmf_doca_request *request = user_data.ptr;
3419 
3421 }
3422 
3423 /*
3424  * Callback invoked once NVM command data copied to Host.
3425  *
3426  * The source and destination buffers must be freed at some point using doca_buf_dec_refcount()
3427  *
3428  * @sq [in]: The SQ used for the copy operation
3429  * @dst [in]: The buffer used as destination in the copy operation
3430  * @src [in]: The buffer used as source in the copy operation
3431  * @user_data [in]: Same user data previously provided in nvmf_doca_sq_copy_data()
3432  */
3434  struct doca_buf *dst,
3435  struct doca_buf *src,
3436  union doca_data user_data)
3437 {
3438  (void)sq;
3439  (void)dst;
3440  (void)src;
3441 
3442  struct nvmf_doca_request *request = user_data.ptr;
3443  request->num_of_buffers--;
3444  if (request->num_of_buffers == 0)
3446 }
3447 
3451 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_doca = {
3452  .name = "DOCA",
3453  .type = SPDK_NVME_TRANSPORT_CUSTOM,
3454  .opts_init = nvmf_doca_opts_init,
3455  .create = nvmf_doca_create,
3456  .dump_opts = nvmf_doca_dump_opts,
3457  .destroy = nvmf_doca_destroy,
3458 
3459  .listen = nvmf_doca_listen,
3460  .stop_listen = nvmf_doca_stop_listen,
3461  .listen_associate = nvmf_doca_listen_associate,
3462 
3463  .poll_group_create = nvmf_doca_poll_group_create,
3464  .get_optimal_poll_group = nvmf_doca_get_optimal_poll_group,
3465  .poll_group_destroy = nvmf_doca_poll_group_destroy,
3466  .poll_group_add = nvmf_doca_poll_group_add,
3467  .poll_group_remove = nvmf_doca_poll_group_remove,
3468  .poll_group_poll = nvmf_doca_poll_group_poll,
3469 
3470  .req_free = nvmf_doca_req_free,
3471  .req_complete = nvmf_doca_req_complete,
3472 
3473  .qpair_fini = nvmf_doca_close_qpair,
3474  .qpair_get_listen_trid = nvmf_doca_qpair_get_listen_trid,
3475 };
3476 
#define NULL
Definition: __stddef_null.h:26
#define offsetof(t, d)
int32_t result
struct rte_eth_dev_info dev_info
Definition: device.c:32
doca_dpa_dev_mmap_t mmap
static void nvmf_doca_on_io_sq_stop(struct nvmf_doca_sq *sq)
static void execute_spdk_request(struct nvmf_doca_request *request, void *arg)
static void nvmf_doca_poll_group_create_io_cq_done(void *args)
#define IDENTIFY_CMD_DATA_BUFFER_SIZE
static struct spdk_nvmf_transport_poll_group * nvmf_doca_poll_group_create(struct spdk_nvmf_transport *transport, struct spdk_nvmf_poll_group *group)
#define NVMF_DOCA_DEFAULT_NUM_SHARED_BUFFER
static void nvmf_doca_on_post_cqe_complete(struct nvmf_doca_cq *cq, union doca_data user_data)
static void nvmf_doca_destroy_pci_dev_poll_group(struct nvmf_doca_pci_dev_poll_group *pci_dev_pg)
static void copy_dpu_data_to_host(struct nvmf_doca_request *request, void *arg)
static void nvmf_doca_on_copy_nvm_data_complete(struct nvmf_doca_sq *sq, struct doca_buf *dst, struct doca_buf *src, union doca_data user_data)
static doca_error_t nvmf_doca_create_emulation_manager(struct doca_devinfo *dev_info, struct nvmf_doca_emulation_manager **ret_emulation_manager)
static void flr_event_handler_cb(struct doca_devemu_pci_dev *pci_dev, union doca_data user_data)
static void enable_nvmf_controller_cb(struct nvmf_doca_request *request, void *cb_arg)
#define FEAT_CMD_LBA_RANGE_SIZE
static void buffers_ready_copy_data_host_to_dpu(struct nvmf_doca_request *request)
static void nvmf_doca_pci_dev_admin_reset_continue(struct nvmf_doca_pci_dev_admin *pci_dev_admin)
static int nvmf_doca_listen_associate(struct spdk_nvmf_transport *transport, const struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid)
static void begin_nvme_cmd_data_dpu_to_host(struct nvmf_doca_request *request)
#define ADMIN_QP_POLL_RATE_LIMIT
#define TAILQ_FOREACH_SAFE(var, head, field, tvar)
static void nvmf_doca_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
static doca_error_t register_handlers_set_datapath_and_start(struct nvmf_doca_emulation_manager *doca_emulation_manager, struct nvmf_doca_pci_dev_admin *pci_dev_admin)
static struct spdk_nvmf_transport_poll_group * nvmf_doca_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
#define NVMF_DOCA_DEFAULT_AQ_DEPTH
#define NVMF_DOCA_DEFAULT_MAX_QPAIRS_PER_CTRLR
#define NVMF_DOCA_DEFAULT_BUFFER_CACHE_SIZE
static int nvmf_doca_req_free(struct spdk_nvmf_request *req)
static struct nvmf_doca_io * admin_qp_find_io_cq_by_id(struct nvmf_doca_admin_qp *admin_qp, uint32_t io_cq_id)
static void post_cqe_from_response(struct nvmf_doca_request *request, void *arg)
static doca_error_t nvmf_doca_create_pci_dev_poll_group(struct nvmf_doca_pci_dev_admin *pci_dev_admin, struct nvmf_doca_admin_qp *admin_qp, struct nvmf_doca_poll_group *doca_poll_group, struct nvmf_doca_pci_dev_poll_group **ret_pci_dev_pg)
static void nvmf_doca_poll_group_create_io_sq(void *args)
static void begin_nvme_cmd_data_host_to_dpu(struct nvmf_doca_request *request)
static void nvmf_doca_on_fetch_sqe_complete(struct nvmf_doca_sq *sq, struct nvmf_doca_sqe *sqe, uint16_t sqe_idx)
static int nvmf_doca_pci_dev_admin_create(struct nvmf_doca_transport *doca_transport, const struct spdk_nvme_transport_id *trid, struct nvmf_doca_pci_dev_admin **pci_dev_admin_out)
#define LBA_SIZE
static void handle_create_io_cq(struct nvmf_doca_sq *sq, struct nvmf_doca_request *request)
#define FEAT_CMD_TIMESTAMP_SIZE
static void begin_nvme_admin_cmd_data_dpu_to_host(struct nvmf_doca_request *request)
static void hotplug_state_change_handler_cb(struct doca_devemu_pci_dev *pci_dev, union doca_data user_data)
#define NVMF_DOCA_DEFAULT_ABORT_TIMEOUT_SEC
static void nvmf_doca_admin_qp_stop_all_io_cqs(struct nvmf_doca_admin_qp *admin_qp)
static int nvmf_doca_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
static doca_error_t nvmf_doca_pci_type_create_and_start(struct nvmf_doca_emulation_manager *doca_emulation_manager)
const struct spdk_nvmf_transport_ops spdk_nvmf_transport_doca
static void nvmf_doca_pci_dev_poll_group_stop_io_sq(void *ctx)
static void nvmf_doca_destroy_admin_qp_continue(struct nvmf_doca_pci_dev_admin *pci_dev_admin)
static int nvmf_doca_req_complete(struct spdk_nvmf_request *req)
static void nvmf_doca_admin_qp_stop_all_io_sqs(struct nvmf_doca_admin_qp *admin_qp)
static struct spdk_nvmf_transport * nvmf_doca_create(struct spdk_nvmf_transport_opts *opts)
static void stateful_region_write_event_handler_cb(struct doca_devemu_pci_dev_event_bar_stateful_region_driver_write *event, union doca_data user_data)
static void nvmf_doca_connect_spdk_qp_done(struct nvmf_doca_request *request, void *cb_arg)
#define NVMF_DOCA_DEFAULT_IN_CAPSULE_DATA_SIZE
#define HOTPLUG_TIMEOUT_IN_MICROS
static void begin_nvme_cmd_data_none(struct nvmf_doca_request *request)
static doca_error_t check_for_duplicate(struct nvmf_doca_transport *doca_transport, const char *vuid)
static struct nvmf_doca_sq * admin_qp_find_io_sq_by_id(struct nvmf_doca_admin_qp *admin_qp, uint32_t io_sq_id)
static void nvmf_doca_pci_dev_poll_group_stop_io_cq(void *ctx)
#define NVMF_DOCA_DIF_INSERT_OR_STRIP
static void copy_prp_list_data(struct nvmf_doca_request *request, void *arg)
struct doca_dpa_app * nvmf_doca_transport_app
static void nvmf_doca_on_admin_cq_stop(struct nvmf_doca_io *io)
static void nvme_cmd_map_prps(struct nvmf_doca_request *request)
static void nvmf_doca_on_post_nvm_cqe_complete(struct nvmf_doca_cq *cq, union doca_data user_data)
static struct nvmf_doca_pci_dev_admin * nvmf_doca_transport_find_pci_dev_admin(struct nvmf_doca_transport *doca_transport, const char *vuid)
#define FEAT_CMD_HOST_IDENTIFIER_SIZE
static void nvmf_doca_close_qpair(struct spdk_nvmf_qpair *qpair, spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
static doca_error_t find_emulation_manager_and_function_by_vuid(struct nvmf_doca_transport *doca_transport, const char *vuid, struct nvmf_doca_emulation_manager **ret_emulation_manager, struct doca_dev_rep **ret_device_rep)
static void nvmf_doca_on_initialization_error(void *cb_arg)
static struct nvmf_doca_poll_group * choose_poll_group(struct nvmf_doca_transport *transport)
static void handle_delete_io_cq(struct nvmf_doca_sq *sq, struct nvmf_doca_request *request)
static int nvmf_doca_connect_spdk_qp(struct nvmf_doca_sq *sq)
static void nvmf_doca_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w)
static void handle_create_io_sq(struct nvmf_doca_sq *sq, struct nvmf_doca_request *request)
static void nvmf_doca_set_property(struct nvmf_doca_sq *doca_sq)
#define FEAT_CMD_HOST_BEHAVIOR_SUPPORT_SIZE
static doca_error_t devemu_hotplug_transition_wait(struct nvmf_doca_pci_dev_admin *pci_dev_admin, enum doca_devemu_pci_hotplug_state new_state, size_t timeout_in_micros)
#define NVMF_DOCA_DEFAULT_IO_UINT_SIZE
#define NVMF_ADMIN_QUEUE_ID
static void nvmf_doca_poll_group_create_io_cq(void *args)
static int nvmf_doca_poll_group_remove(struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_qpair *qpair)
static void post_error_cqe_from_response(struct nvmf_doca_request *request)
static int nvmf_doca_admin_poll_group_poll(void *arg)
static void buffers_ready_copy_data_dpu_to_host(struct nvmf_doca_request *request)
static doca_error_t register_to_stateful_region_write_events(struct doca_devemu_pci_dev *pci_dev, struct nvmf_doca_pci_dev_admin *pci_dev_admin)
static doca_error_t nvmf_doca_destroy_emulation_manager(struct nvmf_doca_emulation_manager *doca_emulation_manager)
static doca_error_t nvmf_doca_create_host_mmap(struct doca_devemu_pci_dev *pci_dev, struct doca_dev *emulation_manager, struct doca_mmap **mmap_out)
nvmf_doca_listener_state
@ NVMF_DOCA_LISTENER_INITIALIZING
@ NVMF_DOCA_LISTENER_INITIALIZED
@ NVMF_DOCA_LISTENER_INITIALIZATION_ERROR
@ NVMF_DOCA_LISTENER_UNINITIALIZED
@ NVMF_DOCA_LISTENER_RESETTING
static void handle_controller_register_events(struct doca_devemu_pci_dev *pci_dev, const struct bar_region_config *config)
static void nvmf_doca_destroy_admin_qp(void *cb_arg)
static void nvmf_doca_poll_group_create_io_sq_done(void *args)
static void nvmf_doca_on_copy_data_complete(struct nvmf_doca_sq *sq, struct doca_buf *dst, struct doca_buf *src, union doca_data user_data)
static void nvmf_doca_admin_poll_group_destroy(struct nvmf_doca_admin_poll_group *admin_pg)
static doca_error_t nvmf_doca_admin_poll_group_create(struct nvmf_doca_admin_poll_group *admin_pg)
static void nvmf_doca_pci_dev_admin_destroy(struct nvmf_doca_pci_dev_admin *pci_dev_admin)
static void nvmf_doca_on_admin_sq_stop(struct nvmf_doca_sq *sq)
static void nvmf_doca_opts_init(struct spdk_nvmf_transport_opts *opts)
static void nvmf_doca_create_admin_qp(void *cb_arg)
static void nvmf_doca_pci_dev_poll_group_stop_io_sq_done(void *ctx)
static void nvmf_doca_pci_dev_admin_reset(struct nvmf_doca_pci_dev_admin *pci_dev_admin)
#define FEAT_CMD_HOST_IDENTIFIER_EXT_SIZE
static void handle_delete_io_sq(struct nvmf_doca_sq *sq, struct nvmf_doca_request *request)
#define NVME_PAGE_SIZE
static int nvmf_doca_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid, struct spdk_nvmf_listen_opts *listen_opts)
static int nvmf_doca_destroy(struct spdk_nvmf_transport *transport, spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
static void init_dpu_host_buffers(struct nvmf_doca_request *request)
static void nvmf_doca_stop_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid)
static void nvmf_doca_on_io_cq_stop(struct nvmf_doca_io *io)
#define NVMF_DOCA_DEFAULT_MAX_QUEUE_DEPTH
static struct nvmf_doca_pci_dev_poll_group * get_pci_dev_poll_group(struct nvmf_doca_poll_group *doca_poll_group, struct doca_devemu_pci_dev *pci_dev)
static void devemu_state_changed_cb(const union doca_data user_data, struct doca_ctx *ctx, enum doca_ctx_states prev_state, enum doca_ctx_states next_state)
static void begin_nvme_admin_cmd_data_host_to_dpu(struct nvmf_doca_request *request)
static void copy_nvme_dpu_data_to_host(struct nvmf_doca_request *request, void *arg)
static void nvmf_doca_create_admin_qp_done(void *cb_arg)
#define NVMF_DOCA_DEFAULT_MAX_IO_SIZE
DOCA_LOG_REGISTER(NVME_EMULATION_DOCA_TRANSPORT)
static void nvmf_doca_pci_dev_poll_group_stop_io_cq_done(void *ctx)
static void nvmf_doca_destroy_admin_qp_done(void *cb_arg)
SPDK_NVMF_TRANSPORT_REGISTER(doca, &spdk_nvmf_transport_doca)
static int nvmf_doca_poll_group_add(struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_qpair *qpair)
static void nvmf_doca_on_fetch_nvm_sqe_complete(struct nvmf_doca_sq *sq, struct nvmf_doca_sqe *sqe, uint16_t sqe_idx)
static int nvmf_doca_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid)
#define FEAT_CMD_AUTONOMOUS_POWER_STATE_TRANSITION_SIZE
static uint64_t *restrict src
Definition: dpaintrin.h:230
if(bitoffset % 64+bitlength > 64) result|
static enum doca_flow_port_operation_state current_state
DOCA_STABLE doca_error_t doca_buf_dec_refcount(struct doca_buf *buf, uint16_t *refcount)
Decrease the object reference count by 1, if 0 reached, return the element back to the inventory.
DOCA_STABLE doca_error_t doca_buf_get_head(const struct doca_buf *buf, void **head)
Get the buffer's head.
DOCA_STABLE doca_error_t doca_ctx_start(struct doca_ctx *ctx)
Finalizes all configurations, and starts the DOCA CTX.
DOCA_EXPERIMENTAL doca_error_t doca_ctx_set_datapath_on_dpa(struct doca_ctx *ctx, struct doca_dpa *dpa_dev)
This function binds the DOCA context to a dpa device.
DOCA_STABLE doca_error_t doca_ctx_set_state_changed_cb(struct doca_ctx *ctx, doca_ctx_state_changed_callback_t cb)
Set state changed callback.
DOCA_STABLE doca_error_t doca_ctx_set_user_data(struct doca_ctx *ctx, union doca_data user_data)
set user data to context
DOCA_STABLE doca_error_t doca_ctx_stop(struct doca_ctx *ctx)
Stops the context allowing reconfiguration.
doca_ctx_states
This enum defines the states of a context.
Definition: doca_ctx.h:83
DOCA_STABLE doca_error_t doca_ctx_get_user_data(const struct doca_ctx *ctx, union doca_data *user_data)
get user data from context
@ DOCA_CTX_STATE_STARTING
Definition: doca_ctx.h:93
@ DOCA_CTX_STATE_STOPPING
Definition: doca_ctx.h:106
@ DOCA_CTX_STATE_IDLE
Definition: doca_ctx.h:88
@ DOCA_CTX_STATE_RUNNING
Definition: doca_ctx.h:98
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_bar_stateful_region_conf(struct doca_devemu_pci_type *pci_type, uint8_t id, uint64_t start_addr, uint64_t size)
Set a stateful BAR region configuration for a BAR layout in a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_start(struct doca_devemu_pci_type *pci_type)
Start a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_create_rep_list(struct doca_devemu_pci_type *pci_type, struct doca_devinfo_rep ***dev_list_rep, uint32_t *nb_devs_rep)
Create list of available representor devices for a given DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_memory_bar_conf(struct doca_devemu_pci_type *pci_type, uint8_t id, uint8_t log_sz, enum doca_devemu_pci_bar_mem_type memory_type, uint8_t prefetchable)
Set a memory BAR layout configuration for DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_stop(struct doca_devemu_pci_type *pci_type)
Stop a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_class_code(struct doca_devemu_pci_type *pci_type, uint32_t class_code)
Set the PCI Class Code of a DOCA devemu PCI type to identify generic operation.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_revision_id(struct doca_devemu_pci_type *pci_type, uint8_t revision_id)
Set the PCI Revision ID of a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_device_id(struct doca_devemu_pci_type *pci_type, uint16_t device_id)
Set the PCI Device ID of a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_bar_db_region_by_offset_conf(struct doca_devemu_pci_type *pci_type, uint8_t id, uint64_t start_addr, uint64_t size, uint8_t log_db_size, uint8_t log_stride_size)
Set a doorbell BAR region configuration for a BAR layout in a DOCA devemu PCI type....
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_create(const char *name, struct doca_devemu_pci_type **pci_type)
Create a stopped DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_vendor_id(struct doca_devemu_pci_type *pci_type, uint16_t vendor_id)
Set the PCI Vendor ID of a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_bar_db_region_by_data_conf(struct doca_devemu_pci_type *pci_type, uint8_t id, uint64_t start_addr, uint64_t size, uint8_t log_db_size, uint16_t db_id_msbyte, uint16_t db_id_lsbyte)
Set a doorbell BAR region configuration for a BAR layout in a DOCA devemu PCI type....
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_num_msix(struct doca_devemu_pci_type *pci_type, uint16_t num_msix)
Set the size of the MSI-X Table from MSI-X Capability Registers (1 based) of a DOCA devemu PCI type....
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_subsystem_id(struct doca_devemu_pci_type *pci_type, uint16_t subsystem_id)
Set the PCI Subsystem ID of a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_destroy(struct doca_devemu_pci_type *pci_type)
Destroy a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_modify_bar_stateful_region_default_values(struct doca_devemu_pci_type *pci_type, uint8_t id, uint64_t start_addr, void *default_values, uint64_t size)
Modify default registers values for a configured stateful region in a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_subsystem_vendor_id(struct doca_devemu_pci_type *pci_type, uint16_t subsystem_vid)
Set the PCI Subsystem Vendor ID of a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_bar_msix_pba_region_conf(struct doca_devemu_pci_type *pci_type, uint8_t id, uint64_t start_addr, uint64_t size)
Set a MSI-X PBA BAR region configuration for a BAR layout in a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_dev(struct doca_devemu_pci_type *pci_type, struct doca_dev *dev)
Set the DOCA device for a specific DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_type_set_bar_msix_table_region_conf(struct doca_devemu_pci_type *pci_type, uint8_t id, uint64_t start_addr, uint64_t size)
Set a MSI-X table BAR region configuration for a BAR layout in a DOCA devemu PCI type.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_event_flr_register(struct doca_devemu_pci_dev *pci_dev, doca_devemu_pci_dev_event_flr_handler_cb_t handler, union doca_data user_data)
Register to PCI FLR (Function Level Reset) event.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_hotplug(struct doca_devemu_pci_dev *pci_dev)
Issue hotplug procedure of the DOCA devemu PCI device.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_destroy(struct doca_devemu_pci_dev *pci_dev)
Free a DOCA devemu PCI device.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_get_hotplug_state(struct doca_devemu_pci_dev *pci_dev, enum doca_devemu_pci_hotplug_state *state)
Get the hotplug state of the DOCA devemu PCI device.
DOCA_EXPERIMENTAL struct doca_devemu_pci_dev * doca_devemu_pci_dev_event_bar_stateful_region_driver_write_get_pci_dev(struct doca_devemu_pci_dev_event_bar_stateful_region_driver_write *event)
Get DOCA devemu PCI device from BAR stateful region driver write event.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_create(struct doca_devemu_pci_type *pci_type, struct doca_dev_rep *dev_rep, struct doca_pe *progress_engine, struct doca_devemu_pci_dev **pci_dev)
Allocate DOCA devemu PCI device.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_query_bar_stateful_region_values(struct doca_devemu_pci_dev *pci_dev, uint8_t id, uint64_t offset, void *out_values, uint64_t size)
Query registers values of the stateful region in a DOCA devemu PCI device.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_event_bar_stateful_region_driver_write_register(struct doca_devemu_pci_dev *pci_dev, doca_devemu_pci_dev_event_bar_stateful_region_driver_write_handler_cb_t handler, uint8_t bar_id, uint64_t bar_region_start_addr, union doca_data user_data)
Register to BAR stateful region driver write event.
doca_devemu_pci_hotplug_state
DOCA devemu pci hotplug state.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_event_hotplug_state_change_register(struct doca_devemu_pci_dev *pci_dev, doca_devemu_pci_dev_event_hotplug_state_change_handler_cb_t handler, union doca_data user_data)
Register to hotplug state changes.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_mmap_create(struct doca_devemu_pci_dev *pci_dev, struct doca_mmap **mmap)
Allocates zero size memory map object with default/unset attributes associated with a DOCA devemu PCI...
DOCA_EXPERIMENTAL struct doca_ctx * doca_devemu_pci_dev_as_ctx(struct doca_devemu_pci_dev *pci_dev)
Convert DOCA devemu PCI device instance into DOCA context.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_hotunplug(struct doca_devemu_pci_dev *pci_dev)
Issue hot unplug procedure of the DOCA devemu PCI device.
DOCA_EXPERIMENTAL doca_error_t doca_devemu_pci_dev_modify_bar_stateful_region_values(struct doca_devemu_pci_dev *pci_dev, uint8_t id, uint64_t offset, void *values, uint64_t size)
Modify registers values for stateful region in a DOCA devemu PCI device.
@ DOCA_DEVEMU_PCI_HP_STATE_POWER_OFF
@ DOCA_DEVEMU_PCI_HP_STATE_POWER_ON
DOCA_STABLE doca_error_t doca_dev_rep_open(struct doca_devinfo_rep *devinfo, struct doca_dev_rep **dev_rep)
Initialize representor device for use.
DOCA_STABLE doca_error_t doca_devinfo_cap_is_hotplug_manager_supported(const struct doca_devinfo *devinfo, uint8_t *is_hotplug_manager)
Get the hotplug manager capability of a DOCA devinfo.
DOCA_STABLE doca_error_t doca_devinfo_rep_destroy_list(struct doca_devinfo_rep **dev_list_rep)
Destroy list of representor device info structures.
DOCA_STABLE doca_error_t doca_devinfo_create_list(struct doca_devinfo ***dev_list, uint32_t *nb_devs)
Creates list of all available local devices.
DOCA_STABLE doca_error_t doca_dev_rep_close(struct doca_dev_rep *dev)
Destroy allocated representor device instance.
DOCA_STABLE doca_error_t doca_devinfo_rep_get_vuid(const struct doca_devinfo_rep *devinfo_rep, char *rep_vuid, uint32_t size)
Get the Vendor Unique ID of a representor DOCA devinfo.
#define DOCA_DEVINFO_REP_VUID_SIZE
Buffer size to hold VUID. Including a null terminator.
Definition: doca_dev.h:661
DOCA_STABLE doca_error_t doca_devinfo_destroy_list(struct doca_devinfo **dev_list)
Destroy list of local device info structures.
DOCA_STABLE doca_error_t doca_dev_open(struct doca_devinfo *devinfo, struct doca_dev **dev)
Initialize local device for use.
DOCA_EXPERIMENTAL doca_error_t doca_dpa_stop(struct doca_dpa *dpa)
Stop a DPA context.
DOCA_EXPERIMENTAL doca_error_t doca_dpa_create(struct doca_dev *dev, struct doca_dpa **dpa)
Create a DOCA DPA Context.
DOCA_EXPERIMENTAL doca_error_t doca_dpa_destroy(struct doca_dpa *dpa)
Destroy a DOCA DPA context.
DOCA_EXPERIMENTAL doca_error_t doca_dpa_start(struct doca_dpa *dpa)
Start a DPA context.
DOCA_EXPERIMENTAL doca_error_t doca_dpa_set_app(struct doca_dpa *dpa, struct doca_dpa_app *app)
Set program app for DPA context.
enum doca_error doca_error_t
DOCA API return codes.
DOCA_STABLE const char * doca_error_get_name(doca_error_t error)
Returns the string representation of an error code name.
@ DOCA_ERROR_TIME_OUT
Definition: doca_error.h:47
@ DOCA_ERROR_INITIALIZATION
Definition: doca_error.h:46
@ DOCA_ERROR_NOT_FOUND
Definition: doca_error.h:54
@ DOCA_ERROR_BAD_STATE
Definition: doca_error.h:56
@ DOCA_ERROR_AGAIN
Definition: doca_error.h:43
@ DOCA_SUCCESS
Definition: doca_error.h:38
@ DOCA_ERROR_NO_MEMORY
Definition: doca_error.h:45
@ DOCA_ERROR_NOT_PERMITTED
Definition: doca_error.h:40
#define DOCA_LOG_ERR(format,...)
Generates an ERROR application log message.
Definition: doca_log.h:466
#define DOCA_LOG_INFO(format,...)
Generates an INFO application log message.
Definition: doca_log.h:486
#define DOCA_LOG_TRC(format,...)
Generates a TRACE application log message.
Definition: doca_log.h:513
#define DOCA_LOG_DBG(format,...)
Generates a DEBUG application log message.
Definition: doca_log.h:496
DOCA_STABLE doca_error_t doca_mmap_set_memrange(struct doca_mmap *mmap, void *addr, size_t len)
Set the memory range of DOCA memory map.
DOCA_STABLE doca_error_t doca_mmap_set_max_num_devices(struct doca_mmap *mmap, uint32_t max_num_devices)
Set a new max number of devices to add to a DOCA Memory Map.
DOCA_STABLE doca_error_t doca_mmap_destroy(struct doca_mmap *mmap)
Destroy DOCA Memory Map structure.
DOCA_STABLE doca_error_t doca_mmap_set_permissions(struct doca_mmap *mmap, uint32_t access_mask)
Set access flags of the registered memory.
DOCA_STABLE doca_error_t doca_mmap_start(struct doca_mmap *mmap)
Start DOCA Memory Map.
DOCA_STABLE doca_error_t doca_mmap_add_dev(struct doca_mmap *mmap, struct doca_dev *dev)
Register DOCA memory map on a given device.
DOCA_STABLE doca_error_t doca_pe_destroy(struct doca_pe *pe)
Destroy doca progress engine.
DOCA_STABLE uint8_t doca_pe_progress(struct doca_pe *pe)
Run the progress engine.
DOCA_STABLE doca_error_t doca_pe_create(struct doca_pe **pe)
Creates DOCA progress engine.
@ DOCA_ACCESS_FLAG_LOCAL_READ_WRITE
Definition: doca_types.h:83
void cleanup_pci_resources(struct doca_devemu_pci_type *pci_type, struct doca_dev *dev)
const char * hotplug_state_to_string(enum doca_devemu_pci_hotplug_state hotplug_state)
#define PCI_TYPE_NUM_BAR_MSIX_PBA_REGIONS
#define PCI_TYPE_SUBSYSTEM_VENDOR_ID
#define PCI_TYPE_NUM_BAR_MSIX_TABLE_REGIONS
static const struct bar_memory_layout_config layout_configs[PCI_TYPE_NUM_BAR_MEMORY_LAYOUT]
#define PCI_TYPE_NUM_BAR_MEMORY_LAYOUT
static const struct bar_db_region_config db_configs[PCI_TYPE_NUM_BAR_DB_REGIONS]
#define PCI_TYPE_SUBSYSTEM_ID
#define PCI_TYPE_VENDOR_ID
#define NVME_TYPE_NAME
#define PCI_TYPE_DEVICE_ID
static const struct bar_region_config msix_pba_configs[PCI_TYPE_NUM_BAR_MSIX_PBA_REGIONS]
#define PCI_TYPE_NUM_BAR_DB_REGIONS
#define PCI_TYPE_NUM_BAR_STATEFUL_REGIONS
#define PCI_TYPE_REVISION_ID
static const struct bar_region_config msix_table_configs[PCI_TYPE_NUM_BAR_MSIX_TABLE_REGIONS]
#define PCI_TYPE_CLASS_CODE
static const struct bar_region_config stateful_configs[PCI_TYPE_NUM_BAR_STATEFUL_REGIONS]
#define PCI_TYPE_NUM_MSIX
struct doca_buf * nvmf_doca_sq_get_dpu_buffer(struct nvmf_doca_sq *sq)
struct doca_buf * nvmf_doca_sq_get_host_buffer(struct nvmf_doca_sq *sq, uintptr_t host_io_address)
void nvmf_doca_request_free(struct nvmf_doca_request *request)
void nvmf_doca_io_add_sq(struct nvmf_doca_io *io, const struct nvmf_doca_io_add_sq_attr *attr, struct nvmf_doca_sq *sq)
void nvmf_doca_request_complete(struct nvmf_doca_request *request)
void nvmf_doca_sq_copy_data(struct nvmf_doca_sq *sq, struct doca_buf *dst_buffer, struct doca_buf *src_buffer, size_t length, union doca_data user_data)
struct nvmf_doca_request * nvmf_doca_request_get(struct nvmf_doca_sq *sq)
void nvmf_doca_io_destroy(struct nvmf_doca_io *io)
doca_error_t nvmf_doca_io_create(const struct nvmf_doca_io_create_attr *attr, struct nvmf_doca_io *io)
void nvmf_doca_sq_stop(struct nvmf_doca_sq *sq)
void nvmf_doca_io_stop(struct nvmf_doca_io *io)
void nvmf_doca_io_post_cqe(struct nvmf_doca_io *io, const struct nvmf_doca_cqe *cqe, union doca_data user_data)
void nvmf_doca_io_rm_sq(struct nvmf_doca_sq *sq)
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:298
struct bar_region_config region
enum doca_devemu_pci_bar_mem_type memory_type
struct spdk_poller * poller
struct spdk_thread * thread
struct nvmf_doca_sq * admin_sq
struct nvmf_doca_io * admin_cq
uint32_t cq_id
Definition: nvmf_doca_io.h:85
struct nvmf_doca_admin_qp * admin_qp_out
struct nvmf_doca_pci_dev_admin * pci_dev_admin
struct nvmf_doca_poll_group * doca_poll_group
struct doca_devemu_pci_type * pci_type
struct doca_dev * emulation_manager
struct doca_pe * pe
Definition: nvmf_doca_io.h:249
struct doca_pe * pe
Definition: nvmf_doca_io.h:200
struct nvmf_doca_pci_dev_poll_group * poll_group
Definition: nvmf_doca_io.h:181
struct nvmf_doca_pci_dev_admin * pci_dev_admin
Definition: nvmf_doca_io.h:182
struct nvmf_doca_cq cq
Definition: nvmf_doca_io.h:185
union spdk_nvme_csts_register csts
union spdk_nvme_cc_register cc
union spdk_nvme_aqa_register aqa
union spdk_nvme_cap_register cap
union spdk_nvme_vs_register vs
struct nvmf_doca_poll_group * admin_qp_pg
struct nvmf_doca_transport * doca_transport
struct spdk_nvmf_ctrlr * ctrlr
struct nvmf_doca_emulation_manager * emulation_manager
enum nvmf_doca_listener_state state
struct nvmf_doca_admin_qp * admin_qp
struct doca_dev_rep * dev_rep
struct spdk_nvmf_subsystem * subsystem
struct spdk_nvme_transport_id trid
struct doca_devemu_pci_dev * pci_dev
struct nvmf_doca_pci_dev_admin * pci_dev_admin
struct nvmf_doca_admin_qp * admin_qp
struct doca_mmap * host_mmap
struct nvmf_doca_poll_group * poll_group
struct nvmf_doca_request * request
struct nvmf_doca_pci_dev_admin * pci_dev_admin
struct nvmf_doca_request * request
struct nvmf_doca_pci_dev_poll_group * pci_dev_pg
struct nvmf_doca_request * request
struct nvmf_doca_request * request
struct doca_pe * pe
struct spdk_nvmf_transport_poll_group pg
struct doca_pe * admin_qp_pe
uint32_t num_elements
Definition: nvmf_doca_io.h:81
struct doca_buf * prp_dpu_buf
Definition: nvmf_doca_io.h:118
struct doca_buf * dpu_buffer[NVMF_REQ_MAX_BUFFERS]
Definition: nvmf_doca_io.h:115
uint32_t num_of_buffers
Definition: nvmf_doca_io.h:119
struct spdk_nvmf_request request
Definition: nvmf_doca_io.h:111
uint32_t residual_length
Definition: nvmf_doca_io.h:120
nvmf_doca_req_cb doca_cb
Definition: nvmf_doca_io.h:123
struct doca_buf * host_buffer[NVMF_REQ_MAX_BUFFERS]
Definition: nvmf_doca_io.h:116
struct nvmf_doca_sq * doca_sq
Definition: nvmf_doca_io.h:112
struct doca_buf * prp_host_buf
Definition: nvmf_doca_io.h:117
TAILQ_HEAD(, nvmf_doca_request) request_pool
uint32_t sq_id
Definition: nvmf_doca_io.h:154
struct nvmf_doca_io * io
Definition: nvmf_doca_io.h:152
struct spdk_nvmf_qpair spdk_qp
Definition: nvmf_doca_io.h:147
struct nvmf_doca_queue queue
Definition: nvmf_doca_io.h:148
uint8_t data[NVMF_DOCA_SQE_SIZE]
Definition: nvmf_doca_io.h:57
struct spdk_nvmf_transport transport
Convenience type for representing opaque data.
Definition: doca_types.h:56
void * ptr
Definition: doca_types.h:57
struct upf_accel_ctx * ctx