NVIDIA DOCA SDK Data Center on a Chip Framework Documentation
urom_multi_workers_bootstrap_sample.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2023 NVIDIA CORPORATION AND AFFILIATES. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without modification, are permitted
5  * provided that the following conditions are met:
6  * * Redistributions of source code must retain the above copyright notice, this list of
7  * conditions and the following disclaimer.
8  * * Redistributions in binary form must reproduce the above copyright notice, this list of
9  * conditions and the following disclaimer in the documentation and/or other materials
10  * provided with the distribution.
11  * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
12  * to endorse or promote products derived from this software without specific prior written
13  * permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
21  * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
22  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  *
24  */
25 
26 #include <sched.h>
27 #include <stdbool.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <unistd.h>
32 #include <pthread.h>
33 #include <errno.h>
34 #include <signal.h>
35 
36 #include <doca_ctx.h>
37 #include <doca_pe.h>
38 #include <doca_log.h>
39 #include <doca_urom.h>
40 
41 #include <worker_graph.h>
42 
43 #include "common.h"
44 #include "urom_common.h"
45 
46 DOCA_LOG_REGISTER(UROM_MULTI_WORKERS_BOOTS::SAMPLE);
47 
48 static int nb_exit_workers; /* Number of exited workers */
49 static int nb_running_workers; /* Number of running workers */
50 static pthread_mutex_t mutex; /* Mutex to sync between the workers threads */
51 static bool worker_force_quit; /* Flag for forcing Workers to exit and terminate the sample */
52 
53 /* Worker context per thread */
54 struct worker_ctx {
55  uint32_t gid; /* UROM worker group id */
56  uint64_t worker_id; /* UROM worker id to create */
57  uint64_t plugins; /* UROM worker plugins */
58  struct doca_urom_service *service; /* UROM service context */
59  doca_error_t *exit_status; /* Worker exit status */
60 };
61 
66  uint64_t data; /* Loopback data */
67  doca_error_t result; /* Worker task result */
68 };
69 
70 /*
71  * Signal handler
72  *
73  * @signum [in]: Signal number to handle
74  */
75 static void signal_handler(int signum)
76 {
77  if (signum == SIGINT || signum == SIGTERM) {
78  DOCA_LOG_INFO("Signal %d received, preparing to exit", signum);
79  worker_force_quit = true;
80  }
81 }
82 
83 /*
84  * Graph loopback task callback function
85  *
86  * @result [in]: task result
87  * @cookie [in]: worker cookie
88  * @data [in]: loopback data
89  */
91 {
92  struct loopback_result *ret = cookie.ptr;
93 
94  if (ret == NULL)
95  return;
96  ret->data = data;
97  ret->result = result;
98 }
99 
100 /*
101  * Thread main function for creating UROM worker context
102  *
103  * @context [in]: Thread context
104  * @return: NULL (dummy return because of pthread requirement)
105  */
106 static void *worker_main(void *context)
107 {
108  uint8_t ret;
109  uint64_t id;
110  int pthread_ret;
111  struct doca_pe *pe;
112  doca_error_t result, tmp_result;
113  enum doca_ctx_states state;
114  const uint64_t nb_tasks = 2;
115  struct doca_urom_worker *worker;
116  struct loopback_result lb_res = {0};
117  struct worker_ctx *ctx = (struct worker_ctx *)context;
118  union doca_data cookie;
119  char *env[] = {"UCX_LOG_LEVEL=debug"};
120 
121  /* Create worker PE */
123  if (result != DOCA_SUCCESS) {
124  worker_force_quit = true;
125  DOCA_LOG_ERR("Failed to create PE");
126  goto worker_exit;
127  }
128 
129  /* Create and start worker context */
131  ctx->service,
132  ctx->worker_id,
133  &ctx->gid,
134  nb_tasks,
135  NULL,
136  env,
137  1,
138  ctx->plugins,
139  &worker);
140  if (result != DOCA_SUCCESS) {
141  DOCA_LOG_ERR("Start UROM worker failed, returned error: %s", doca_error_get_descr(result));
142  worker_force_quit = true;
143  goto pe_destroy;
144  }
145 
146  ret = 0;
147  /* Progress till worker state changes to running or error happened */
148  do {
149  ret += doca_pe_progress(pe);
151  } while (state == DOCA_CTX_STATE_STARTING && result == DOCA_SUCCESS && !worker_force_quit);
152 
153  /* Verify that worker state is running */
154  if (ret == 0 || state != DOCA_CTX_STATE_RUNNING) {
155  DOCA_LOG_ERR("Bad worker state");
157  goto err_exit;
158  }
159 
160  /* Get worker id */
161  result = doca_urom_worker_get_id(worker, &id);
162  if (result != DOCA_SUCCESS)
163  goto err_exit;
164 
165  DOCA_LOG_INFO("Worker id is %lu", id);
166 
167  /* Run graph loopback task */
168  cookie.ptr = &lb_res;
170  if (result != DOCA_SUCCESS)
171  goto err_exit;
172 
173  /* Wait for task completion */
174  do {
175  ret = doca_pe_progress(pe);
176  } while (ret == 0 && !worker_force_quit);
177 
178  if (lb_res.result != DOCA_SUCCESS) {
179  DOCA_LOG_ERR("Loopback Failed, result [%s]", doca_error_get_descr(lb_res.result));
180  worker_force_quit = true;
181  result = lb_res.result;
182  goto worker_destroy;
183  }
184 
185  /* Check if data was updated accordingly in the task callback function */
186  if (lb_res.data != id) {
187  DOCA_LOG_ERR("Loopback data is wrong, should be [%lu] and received [%lu]", id, lb_res.data);
188  worker_force_quit = true;
190  goto worker_destroy;
191  }
192 
193  DOCA_LOG_INFO("Worker id %lu received loopback data %lu", id, lb_res.data);
194 
195  pthread_ret = pthread_mutex_lock(&mutex);
196  if (pthread_ret != 0) {
197  DOCA_LOG_ERR("Failed to lock resource, error=%d", errno);
198  goto err_exit;
199  }
201  pthread_ret = pthread_mutex_unlock(&mutex);
202  if (pthread_ret != 0) {
203  DOCA_LOG_ERR("Failed to unlock resource, error=%d", errno);
204  goto err_exit;
205  }
206 
207  /* Wait till triggering sample teardown */
208  while (!worker_force_quit)
209  sleep(1);
210 
211  goto worker_destroy;
212 
213 err_exit:
214  worker_force_quit = true;
215 worker_destroy:
216  tmp_result = doca_ctx_stop(doca_urom_worker_as_ctx(worker));
217  if (tmp_result != DOCA_SUCCESS && tmp_result != DOCA_ERROR_IN_PROGRESS) {
218  DOCA_LOG_ERR("Failed to request stop UROM worker");
219  DOCA_ERROR_PROPAGATE(result, tmp_result);
220  }
221 
222  ret = 0;
223  do {
224  ret += doca_pe_progress(pe);
225  tmp_result = doca_ctx_get_state(doca_urom_worker_as_ctx(worker), &state);
226  } while (state != DOCA_CTX_STATE_IDLE && tmp_result == DOCA_SUCCESS);
227 
228  if (ret == 0 || state != DOCA_CTX_STATE_IDLE) {
229  DOCA_LOG_ERR("Failed to stop worker context");
230  goto pe_destroy;
231  }
232 
233  tmp_result = doca_urom_worker_destroy(worker);
234  if (tmp_result != DOCA_SUCCESS) {
235  DOCA_LOG_ERR("Failed to destroy UROM worker returned error: %s", doca_error_get_descr(tmp_result));
236  DOCA_ERROR_PROPAGATE(result, tmp_result);
237  }
238 
239 pe_destroy:
240  tmp_result = doca_pe_destroy(pe);
241  if (tmp_result != DOCA_SUCCESS) {
242  DOCA_LOG_ERR("Failed to destroy PE");
243  DOCA_ERROR_PROPAGATE(result, tmp_result);
244  }
245 
246 worker_exit:
247  *ctx->exit_status = result;
248  pthread_ret = pthread_mutex_lock(&mutex);
249  if (pthread_ret != 0) {
250  DOCA_LOG_ERR("Failed to lock resource, error=%d", errno);
251  goto exit;
252  }
253  nb_exit_workers++;
254  pthread_ret = pthread_mutex_unlock(&mutex);
255  if (pthread_ret != 0)
256  DOCA_LOG_ERR("Failed to unlock resource, error=%d", errno);
257 exit:
258  free(ctx);
259  return NULL;
260 }
261 
262 /*
263  * Worker odd ids query task completion
264  *
265  * @task [in]: worker ids query task
266  * @task_user_data [in]: doca_data from the task
267  * @ctx_user_data [in]: doca_data from the context
268  */
269 static void odd_gid_task_cb(struct doca_urom_service_get_workers_by_gid_task *task,
270  union doca_data task_user_data,
271  union doca_data ctx_user_data)
272 {
273  (void)ctx_user_data;
274 
275  size_t i;
276  size_t worker_counts;
277  const uint64_t *ids;
279  uint64_t *is_failure = task_user_data.ptr;
280 
281  if (task == NULL) {
282  *is_failure = 1;
283  return;
284  }
285 
287  if (result != DOCA_SUCCESS) {
288  DOCA_LOG_ERR("Service workers query by gid 1 failed");
289  *is_failure = 1;
290  }
291 
294 
295  for (i = 0; i < worker_counts; i++) {
296  DOCA_LOG_DBG("Worker #%lu id is %lu", i, ids[i]);
297  if (ids[0] % 2 != 1) {
298  DOCA_LOG_ERR("Wrong worker id exists in workers ids list, should be odd number");
299  *is_failure = 1;
300  return;
301  }
302  }
303  DOCA_LOG_INFO("Worker odd ids query finished successfully");
304  *is_failure = 0;
306 }
307 
308 /*
309  * Worker even ids query task completion
310  *
311  * @task [in]: worker ids query task
312  * @task_user_data [in]: doca_data from the task
313  * @ctx_user_data [in]: doca_data from the context
314  */
315 static void even_gid_task_cb(struct doca_urom_service_get_workers_by_gid_task *task,
316  union doca_data task_user_data,
317  union doca_data ctx_user_data)
318 {
319  (void)ctx_user_data;
320 
321  size_t i;
322  size_t worker_counts;
323  const uint64_t *ids;
325  uint64_t *is_failure = task_user_data.ptr;
326 
327  if (task == NULL) {
328  *is_failure = 1;
329  return;
330  }
331 
333  if (result != DOCA_SUCCESS) {
334  DOCA_LOG_ERR("Service workers query by gid 0 failed");
335  *is_failure = 1;
336  }
337 
340 
341  for (i = 0; i < worker_counts; i++) {
342  DOCA_LOG_DBG("Worker #%lu id is %lu", i, ids[i]);
343  if (ids[0] % 2 != 0) {
344  DOCA_LOG_ERR("Wrong worker id exists in workers ids list, should be even number");
345  *is_failure = 1;
346  return;
347  }
348  }
349  DOCA_LOG_INFO("Worker even ids query finished successfully");
350  *is_failure = 0;
352 }
353 
354 /*
355  * Run multi_workers_bootstrap sample
356  *
357  * @device_name [in]: DOCA UROM device
358  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise.
359  */
361 {
362  struct doca_pe *pe;
363  union doca_data data;
364  struct doca_dev *dev;
365  doca_cpu_set_t cpuset;
366  struct worker_ctx *ctx;
367  uint64_t is_failure = 0;
368  const int nb_workers = 4;
369  pthread_t ids[nb_workers];
370  int ret, idx, actual_workers = 0;
371  doca_error_t status_arr[nb_workers];
372  size_t i, plugins_count = 0;
373  char *plugin_name = "worker_graph";
374  doca_error_t result, tmp_result;
375  struct doca_urom_service *service;
376  const struct doca_urom_service_plugin_info *plugins, *graph_info = NULL;
377  struct doca_urom_service_get_workers_by_gid_task *odd_gid_task, *even_gid_task;
378 
379  data.ptr = &is_failure;
380  memset(status_arr, DOCA_SUCCESS, sizeof(status_arr));
381 
382  if (pthread_mutex_init(&mutex, NULL) != 0) {
383  DOCA_LOG_ERR("Failed to initiate UROM worker lock, error=%d", errno);
384  return DOCA_ERROR_BAD_STATE;
385  }
386 
387  result = open_doca_device_with_ibdev_name((uint8_t *)device_name, strlen(device_name), NULL, &dev);
388  if (result != DOCA_SUCCESS)
389  goto mutex_free;
390 
392  if (result != DOCA_SUCCESS)
393  goto close_dev;
394 
395  result = start_urom_service(pe, dev, nb_workers, &service);
396  if (result != DOCA_SUCCESS)
397  goto pe_cleanup;
398 
399  result = doca_urom_service_get_plugins_list(service, &plugins, &plugins_count);
400  if (result != DOCA_SUCCESS || plugins_count == 0)
401  goto service_stop;
402 
403  for (i = 0; i < plugins_count; i++) {
404  if (strcmp(plugin_name, plugins[i].plugin_name) == 0) {
405  graph_info = &plugins[i];
406  break;
407  }
408  }
409 
410  if (graph_info == NULL) {
411  DOCA_LOG_ERR("Failed to match graph plugin");
413  goto service_stop;
414  }
415 
416  result = urom_graph_init(graph_info->id, graph_info->version);
417  if (result != DOCA_SUCCESS)
418  goto service_stop;
419 
420  doca_urom_service_get_cpuset(service, &cpuset);
421  for (idx = 0; idx < 8; idx++) {
422  if (!doca_cpu_is_set(idx, &cpuset))
423  goto service_stop;
424  }
425 
426  signal(SIGINT, signal_handler);
427  signal(SIGTERM, signal_handler);
428 
429  /* Create workers threads */
430  for (idx = 0; idx < nb_workers && !worker_force_quit; idx++) {
431  ctx = malloc(sizeof(*ctx));
432  if (ctx == NULL) {
433  DOCA_LOG_ERR("Failed to create worker context");
434  worker_force_quit = true;
436  goto progress;
437  }
438 
439  ctx->service = service;
440  ctx->worker_id = idx;
441  /* Split the workers to two groups according to modulo of two */
442  ctx->gid = idx % 2;
443  ctx->plugins = graph_info->id;
444  ctx->exit_status = &status_arr[idx];
445  if (pthread_create(&ids[idx], NULL, worker_main, ctx) != 0) {
446  worker_force_quit = true;
448  goto progress;
449  }
450  actual_workers++;
451  }
452 
453 progress:
454  /* Handling workers requests for bootstrap */
455  do {
457  } while (nb_running_workers != actual_workers && !worker_force_quit);
459  goto teardown;
460 
461  /* Query worker ids by group id 1 */
462  DOCA_LOG_INFO("Start service workers query task with gid 1 for odd ids");
464  if (result != DOCA_SUCCESS) {
465  DOCA_LOG_ERR("Failed to allocate service query task");
466  goto teardown;
467  }
470  if (result != DOCA_SUCCESS) {
471  DOCA_LOG_ERR("Failed to submit service query task");
472  goto teardown;
473  }
474 
475  do {
476  ret = doca_pe_progress(pe);
477  } while (ret == 0 && !is_failure && !worker_force_quit);
478 
479  if (is_failure) {
480  DOCA_LOG_ERR("Worker odd ids query finished with errors");
481  goto teardown;
482  }
483 
484  /* Query worker ids by group id 0 */
485  DOCA_LOG_INFO("Start service workers query task with gid 0 for even ids");
487  if (result != DOCA_SUCCESS) {
488  DOCA_LOG_ERR("Failed to allocate service query task");
489  goto teardown;
490  }
493  if (result != DOCA_SUCCESS) {
494  DOCA_LOG_ERR("Failed to submit service query task");
495  goto teardown;
496  }
497 
498  do {
499  ret = doca_pe_progress(pe);
500  } while (ret == 0 && !is_failure && !worker_force_quit);
501 
502  if (is_failure) {
503  DOCA_LOG_ERR("Worker even ids query finished with errors");
504  goto teardown;
505  }
506 
507 teardown:
508  /* Handling workers requests for teardown */
509  do {
511  } while (nb_exit_workers != actual_workers);
512 
513  /* Wait all threads to exit */
514  for (idx = 0; idx < actual_workers; idx++) {
515  pthread_join(ids[idx], NULL);
516  DOCA_ERROR_PROPAGATE(result, status_arr[idx]);
517  }
518 
519 service_stop:
520  tmp_result = doca_ctx_stop(doca_urom_service_as_ctx(service));
521  if (tmp_result != DOCA_SUCCESS) {
522  DOCA_LOG_ERR("Failed to stop UROM service");
523  DOCA_ERROR_PROPAGATE(result, tmp_result);
524  }
525 
526  tmp_result = doca_urom_service_destroy(service);
527  if (tmp_result != DOCA_SUCCESS) {
528  DOCA_LOG_ERR("Failed to destroy UROM service");
529  DOCA_ERROR_PROPAGATE(result, tmp_result);
530  }
531 pe_cleanup:
532  tmp_result = doca_pe_destroy(pe);
533  if (tmp_result != DOCA_SUCCESS) {
534  DOCA_LOG_ERR("Failed to destroy PE");
535  DOCA_ERROR_PROPAGATE(result, tmp_result);
536  }
537 
538 close_dev:
539  tmp_result = doca_dev_close(dev);
540  if (tmp_result != DOCA_SUCCESS) {
541  DOCA_LOG_ERR("Failed to close device");
542  DOCA_ERROR_PROPAGATE(result, tmp_result);
543  }
544 
545 mutex_free:
546  if (pthread_mutex_destroy(&mutex) != 0)
547  DOCA_LOG_ERR("Failed to destroy UROM worker lock, error=%d", errno);
548 
549  return result;
550 }
#define NULL
Definition: __stddef_null.h:26
int32_t result
doca_error_t open_doca_device_with_ibdev_name(const uint8_t *value, size_t val_size, tasks_check func, struct doca_dev **retval)
Definition: common.c:84
uint64_t cookie
static struct doca_pe * pe
DOCA_STABLE doca_error_t doca_ctx_get_state(const struct doca_ctx *ctx, enum doca_ctx_states *state)
Get context state.
DOCA_STABLE doca_error_t doca_ctx_stop(struct doca_ctx *ctx)
Stops the context allowing reconfiguration.
doca_ctx_states
This enum defines the states of a context.
Definition: doca_ctx.h:83
@ DOCA_CTX_STATE_STARTING
Definition: doca_ctx.h:93
@ DOCA_CTX_STATE_IDLE
Definition: doca_ctx.h:88
@ DOCA_CTX_STATE_RUNNING
Definition: doca_ctx.h:98
DOCA_STABLE doca_error_t doca_dev_close(struct doca_dev *dev)
Destroy allocated local device instance.
#define DOCA_ERROR_PROPAGATE(r, t)
Save the first encountered doca_error_t.
Definition: doca_error.h:83
enum doca_error doca_error_t
DOCA API return codes.
DOCA_STABLE const char * doca_error_get_descr(doca_error_t error)
Returns the description string of an error code.
@ DOCA_ERROR_INVALID_VALUE
Definition: doca_error.h:44
@ DOCA_ERROR_BAD_STATE
Definition: doca_error.h:56
@ DOCA_ERROR_IO_FAILED
Definition: doca_error.h:55
@ DOCA_SUCCESS
Definition: doca_error.h:38
@ DOCA_ERROR_NO_MEMORY
Definition: doca_error.h:45
@ DOCA_ERROR_IN_PROGRESS
Definition: doca_error.h:64
#define DOCA_LOG_ERR(format,...)
Generates an ERROR application log message.
Definition: doca_log.h:466
#define DOCA_LOG_INFO(format,...)
Generates an INFO application log message.
Definition: doca_log.h:486
#define DOCA_LOG_DBG(format,...)
Generates a DEBUG application log message.
Definition: doca_log.h:496
DOCA_STABLE doca_error_t doca_pe_destroy(struct doca_pe *pe)
Destroy doca progress engine.
DOCA_STABLE doca_error_t doca_task_get_status(const struct doca_task *task)
Get task status.
DOCA_STABLE doca_error_t doca_task_submit(struct doca_task *task)
Submit a task to a progress engine.
DOCA_STABLE uint8_t doca_pe_progress(struct doca_pe *pe)
Run the progress engine.
DOCA_STABLE void doca_task_set_user_data(struct doca_task *task, union doca_data user_data)
Set user data to a task.
DOCA_STABLE doca_error_t doca_pe_create(struct doca_pe **pe)
Creates DOCA progress engine.
ucs_cpu_set_t doca_cpu_set_t
DOCA CPU set structure.
Definition: doca_urom.h:98
DOCA_EXPERIMENTAL doca_error_t doca_urom_service_get_plugins_list(struct doca_urom_service *service_ctx, const struct doca_urom_service_plugin_info **plugins, size_t *plugins_count)
This method gets the list of supported plugins on service's DPU side.
DOCA_EXPERIMENTAL doca_error_t doca_urom_service_get_cpuset(struct doca_urom_service *service_ctx, doca_cpu_set_t *cpuset)
Get the allowed CPU set for the service.
DOCA_EXPERIMENTAL struct doca_ctx * doca_urom_service_as_ctx(struct doca_urom_service *service_ctx)
Convert service_ctx instance into a generalized context for use with DOCA core objects.
DOCA_EXPERIMENTAL doca_error_t doca_urom_service_destroy(struct doca_urom_service *service_ctx)
This method destroys a UROM Service context.
#define doca_cpu_is_set(_cpu, _cpusetp)
Check if specific bit in DOCA CPU is set.
Definition: doca_urom.h:119
DOCA_EXPERIMENTAL doca_error_t doca_urom_worker_destroy(struct doca_urom_worker *worker_ctx)
This method destroys a UROM Worker context.
DOCA_EXPERIMENTAL doca_error_t doca_urom_worker_get_id(struct doca_urom_worker *worker_ctx, uint64_t *worker_id)
This method gets the Worker context ID to be used to identify the Worker. Worker IDs enable an applic...
DOCA_EXPERIMENTAL struct doca_ctx * doca_urom_worker_as_ctx(struct doca_urom_worker *worker_ctx)
DOCA_EXPERIMENTAL struct doca_task * doca_urom_service_get_workers_by_gid_task_as_task(struct doca_urom_service_get_workers_by_gid_task *task)
This method converts a service get workers task to doca_task.
DOCA_EXPERIMENTAL doca_error_t doca_urom_service_get_workers_by_gid_task_allocate_init(struct doca_urom_service *service_ctx, uint32_t gid, doca_urom_service_get_workers_by_gid_task_completion_cb_t cb, struct doca_urom_service_get_workers_by_gid_task **task)
Allocate Service get workers by gid task and set task attributes.
DOCA_EXPERIMENTAL doca_error_t doca_urom_service_get_workers_by_gid_task_release(struct doca_urom_service_get_workers_by_gid_task *task)
Release Service get workers by gid task.
DOCA_EXPERIMENTAL const uint64_t * doca_urom_service_get_workers_by_gid_task_get_worker_ids(struct doca_urom_service_get_workers_by_gid_task *task)
Get Service get workers task ids array.
DOCA_EXPERIMENTAL size_t doca_urom_service_get_workers_by_gid_task_get_workers_count(struct doca_urom_service_get_workers_by_gid_task *task)
Get the number of workers returned for the requested gid.
struct doca_urom_service * service
Convenience type for representing opaque data.
Definition: doca_types.h:56
void * ptr
Definition: doca_types.h:57
struct upf_accel_ctx * ctx
doca_error_t start_urom_service(struct doca_pe *pe, struct doca_dev *dev, uint64_t nb_workers, struct doca_urom_service **service)
Definition: urom_common.c:95
doca_error_t start_urom_worker(struct doca_pe *pe, struct doca_urom_service *service, uint64_t worker_id, uint32_t *gid, uint64_t nb_tasks, doca_cpu_set_t *cpuset, char **env, size_t env_count, uint64_t plugins, struct doca_urom_worker **worker)
Definition: urom_common.c:148
static void even_gid_task_cb(struct doca_urom_service_get_workers_by_gid_task *task, union doca_data task_user_data, union doca_data ctx_user_data)
static bool worker_force_quit
static pthread_mutex_t mutex
DOCA_LOG_REGISTER(UROM_MULTI_WORKERS_BOOTS::SAMPLE)
static int nb_running_workers
static void worker_graph_loopback_finished_cb(doca_error_t result, union doca_data cookie, uint64_t data)
static void * worker_main(void *context)
static int nb_exit_workers
doca_error_t multi_workers_bootstrap(char *device_name)
static void odd_gid_task_cb(struct doca_urom_service_get_workers_by_gid_task *task, union doca_data task_user_data, union doca_data ctx_user_data)
static void signal_handler(int signum)
doca_error_t urom_graph_init(uint64_t plugin_id, uint64_t version)
Definition: worker_graph.c:194
doca_error_t urom_graph_task_loopback(struct doca_urom_worker *worker_ctx, union doca_data cookie, uint64_t data, urom_graph_loopback_finished cb)
Definition: worker_graph.c:135