NVIDIA DOCA SDK Data Center on a Chip Framework Documentation
ip_frag_dp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2025 NVIDIA CORPORATION AND AFFILIATES. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without modification, are permitted
5  * provided that the following conditions are met:
6  * * Redistributions of source code must retain the above copyright notice, this list of
7  * conditions and the following disclaimer.
8  * * Redistributions in binary form must reproduce the above copyright notice, this list of
9  * conditions and the following disclaimer in the documentation and/or other materials
10  * provided with the distribution.
11  * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
12  * to endorse or promote products derived from this software without specific prior written
13  * permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
17  * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
20  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
21  * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
22  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  *
24  */
25 
26 #include "ip_frag_dp.h"
27 #include <flow_common.h>
28 
29 #include <doca_log.h>
30 #include <doca_flow.h>
31 #include <dpdk_utils.h>
32 #include <packet_parser.h>
33 
34 #include <rte_lcore.h>
35 #include <rte_malloc.h>
36 #include <rte_ethdev.h>
37 #include <rte_ip_frag.h>
38 #include <rte_cycles.h>
39 #include <rte_mempool.h>
40 
41 #include <stdbool.h>
42 
43 #define IP_FRAG_MAX_PKT_BURST 32
44 #define IP_FRAG_BURST_PREFETCH (IP_FRAG_MAX_PKT_BURST / 8)
45 #define IP_FRAG_FLUSH_THRESHOLD 16
46 
47 #define IP_FRAG_TBL_BUCKET_SIZE 4
48 
49 DOCA_LOG_REGISTER(IP_FRAG::DP);
50 
52  uint64_t frags_rx; /* Fragments received that need reassembly */
53  uint64_t whole; /* Whole packets (either reassembled or not fragmented) */
54  uint64_t mtu_fits_rx; /* Packets received that are within the MTU size and dont require fragmentation */
55  uint64_t mtu_exceed_rx; /* Packets received that exceed the MTU size and require fragmentation */
56  uint64_t frags_gen; /* Fragments generated from packets that exceed the MTU size */
57  uint64_t err; /* Errors */
58 };
59 
61  const struct ip_frag_config *cfg; /* Application config */
62  uint16_t queue_id; /* Queue id */
63  struct ip_frag_sw_counters sw_counters[IP_FRAG_PORT_NUM]; /* SW counters */
64  struct rte_eth_dev_tx_buffer *tx_buffer; /* TX buffer */
65  uint64_t tx_buffer_err; /* TX buffer error counter */
66  struct rte_ip_frag_tbl *frag_tbl; /* Fragmentation table */
67  struct rte_mempool *indirect_pool; /* Indirect memory pool */
68  struct rte_ip_frag_death_row death_row; /* Fragmentation table expired fragments death row */
69 } __rte_aligned(RTE_CACHE_LINE_SIZE);
70 
71 bool force_stop = false;
72 
73 /*
74  * Drop the packet and increase error counters.
75  *
76  * @wt_data [in]: worker thread data
77  * @rx_port_id [in]: incoming packet port id
78  * @pkt [in]: packet to drop
79  */
80 static void ip_frag_pkt_err_drop(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, struct rte_mbuf *pkt)
81 {
82  wt_data->sw_counters[rx_port_id].err++;
83  rte_pktmbuf_free(pkt);
84 }
85 
86 /*
87  * Parse the packet.
88  *
89  * @pkt_type [out]: incoming packet type
90  * @pkt [in]: pointer to the pkt
91  * @parse_ctx [out]: pointer to the parser context
92  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
93  */
95  struct rte_mbuf *pkt,
96  struct tun_parser_ctx *parse_ctx)
97 {
98  uint8_t *data_beg = rte_pktmbuf_mtod(pkt, uint8_t *);
99  uint8_t *data_end = data_beg + rte_pktmbuf_data_len(pkt);
100 
101  switch (*pkt_type) {
103  return tunnel_parse(data_beg, data_end, parse_ctx);
105  return plain_parse(data_beg, data_end, &parse_ctx->inner);
107  return unknown_parse(data_beg, data_end, parse_ctx, pkt_type);
108  default:
109  assert(0);
111  }
112 }
113 
114 /*
115  * Prepare packet mbuf for reassembly.
116  *
117  * @pkt [in]: packet
118  * @l2_len [in]: L2 header length
119  * @l3_len [in]: L3 header length
120  * @flags [in]: mbuf flags to set
121  */
122 static void ip_frag_pkt_reassemble_prepare(struct rte_mbuf *pkt, size_t l2_len, size_t l3_len, uint64_t flags)
123 {
124  pkt->l2_len = l2_len;
125  pkt->l3_len = l3_len;
126  pkt->ol_flags |= flags;
127 }
128 
129 /*
130  * Calculate and set IPv4 header checksum
131  *
132  * @hdr [in]: IPv4 header
133  */
134 static void ip_frag_ipv4_hdr_cksum(struct rte_ipv4_hdr *hdr)
135 {
136  hdr->hdr_checksum = 0;
137  hdr->hdr_checksum = rte_ipv4_cksum(hdr);
138 }
139 
140 /*
141  * Calculate and set any required network-layer checksums for the parsed headers
142  *
143  * @ctx [in]: pointer to the parser network-layer context
144  */
146 {
147  if (ctx->ip_version == DOCA_FLOW_PROTO_IPV4)
148  ip_frag_ipv4_hdr_cksum(ctx->ipv4_hdr);
149 }
150 
151 /*
152  * Calculate and set any required network-layer checksums for the headers
153  *
154  * @wt_data [in]: worker thread data
155  * @pkt [in]: packet
156  * @l2_len [in]: length of L2 header
157  * @l3_len [in]: length of L3 header
158  * @ipv4_hdr [in]: header to calculate the checksum for
159  */
160 static void ip_frag_ipv4_cksum_handle(struct ip_frag_wt_data *wt_data,
161  struct rte_mbuf *pkt,
162  uint64_t l2_len,
163  uint64_t l3_len,
164  struct rte_ipv4_hdr *ipv4_hdr)
165 {
166  if (wt_data->cfg->hw_cksum) {
167  pkt->l2_len = l2_len;
168  pkt->l3_len = l3_len;
169  pkt->ol_flags |= RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM;
170  } else {
172  }
173 }
174 
175 /*
176  * Calculate and set any required network-layer checksums for the parsed headers
177  *
178  * @wt_data [in]: worker thread data
179  * @pkt [in]: packet
180  * @link_ctx [in]: pointer to the parser link-layer context
181  * @network_ctx [in]: pointer to the parser network-layer context
182  */
183 static void ip_frag_network_cksum_handle(struct ip_frag_wt_data *wt_data,
184  struct rte_mbuf *pkt,
185  struct link_parser_ctx *link_ctx,
186  struct network_parser_ctx *network_ctx)
187 {
188  if (network_ctx->ip_version != DOCA_FLOW_PROTO_IPV4)
189  return;
190  ip_frag_ipv4_cksum_handle(wt_data, pkt, link_ctx->len, network_ctx->len, network_ctx->ipv4_hdr);
191 }
192 
193 /*
194  * Handle UDP checksum
195  *
196  * @wt_data [in]: worker thread data
197  * @pkt [in]: packet
198  * @link_ctx [in]: pointer to the parser link-layer context
199  * @network_ctx [in]: pointer to the parser network-layer context
200  * @transport_ctx [in]: pointer to the parser transport-layer context
201  */
202 static void ip_frag_udp_cksum_handle(struct ip_frag_wt_data *wt_data,
203  struct rte_mbuf *pkt,
204  struct link_parser_ctx *link_ctx,
205  struct network_parser_ctx *network_ctx,
206  struct transport_parser_ctx *transport_ctx)
207 {
208  if (network_ctx->ip_version == DOCA_FLOW_PROTO_IPV4) {
209  /* UDP checksum is optional according to the spec */
210  transport_ctx->udp_hdr->dgram_cksum = 0;
211  } else {
212  if (wt_data->cfg->hw_cksum) {
213  pkt->l2_len = link_ctx->len;
214  pkt->l3_len = network_ctx->len;
215  transport_ctx->udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(network_ctx->ipv6.hdr, pkt->ol_flags);
216  pkt->ol_flags |= RTE_MBUF_F_TX_IPV6 | RTE_MBUF_F_TX_UDP_CKSUM;
217  } else {
218  /* UDP checksum can be omitted for tunnel headers IETF RFC 6935 */
219  transport_ctx->udp_hdr->dgram_cksum = 0;
220  }
221  }
222 }
223 
224 /*
225  * Fixup the packet headers after reassembly. This involves fixing any length fields that may have become outdated and
226  * recalculating the necessary checksums (potentially zeroing them out when allowed by the spec)
227  *
228  * @wt_data [in]: worker thread data
229  * @pkt_type [in]: incoming packet type
230  * @pkt [in]: packet
231  * @parse_ctx [in]: pointer to the parser context
232  */
233 static void ip_frag_pkt_fixup(struct ip_frag_wt_data *wt_data,
234  enum parser_pkt_type pkt_type,
235  struct rte_mbuf *pkt,
236  struct tun_parser_ctx *parse_ctx)
237 {
238  assert(pkt_type == PARSER_PKT_TYPE_TUNNELED || pkt_type == PARSER_PKT_TYPE_PLAIN);
239 
240  if (pkt->ol_flags & wt_data->cfg->mbuf_flag_inner_modified) {
241  if (pkt_type == PARSER_PKT_TYPE_TUNNELED) {
242  /* Payload has been modified, need to fix the encapsulation accordingly going from inner to
243  * outer protocols since changing inner data may affect outer's checksums. Fix GTPU payload
244  * length first (which includes optional fields)... */
245  parse_ctx->gtp_ctx.gtp_hdr->plen =
246  rte_cpu_to_be_16(rte_pktmbuf_pkt_len(pkt) -
247  (parse_ctx->link_ctx.len + parse_ctx->network_ctx.len +
248  parse_ctx->transport_ctx.len + sizeof(*parse_ctx->gtp_ctx.gtp_hdr)));
249 
250  /* ...then fix UDP total length... */
251  parse_ctx->transport_ctx.udp_hdr->dgram_len = rte_cpu_to_be_16(
252  rte_pktmbuf_pkt_len(pkt) - (parse_ctx->link_ctx.len + parse_ctx->network_ctx.len));
253 
254  if (parse_ctx->network_ctx.ip_version == DOCA_FLOW_PROTO_IPV4) {
255  /* ...and fix the IP total length which requires recalculating header checksum in case
256  * of IPv4... */
257  parse_ctx->network_ctx.ipv4_hdr->total_length =
258  rte_cpu_to_be_16(rte_pktmbuf_pkt_len(pkt) - parse_ctx->link_ctx.len);
260  pkt,
261  &parse_ctx->link_ctx,
262  &parse_ctx->network_ctx);
263  } else {
264  /* ...or just IP payload length field in case of IPv6.. */
265  parse_ctx->network_ctx.ipv6.hdr->payload_len =
266  rte_cpu_to_be_16(rte_pktmbuf_pkt_len(pkt) - parse_ctx->link_ctx.len -
267  sizeof(*parse_ctx->network_ctx.ipv6.hdr));
268  }
269 
270  /* ...either recalculate or zero-out encapsulation UDP checksum... */
271  ip_frag_udp_cksum_handle(wt_data,
272  pkt,
273  &parse_ctx->link_ctx,
274  &parse_ctx->network_ctx,
275  &parse_ctx->transport_ctx);
276 
277  /* ...fix payload network-level header checksum, if necessary */
279  } else {
281  pkt,
282  &parse_ctx->inner.link_ctx,
283  &parse_ctx->inner.network_ctx);
284  }
285  } else if (pkt->ol_flags & wt_data->cfg->mbuf_flag_outer_modified) {
286  ip_frag_network_cksum_handle(wt_data, pkt, &parse_ctx->link_ctx, &parse_ctx->network_ctx);
287  }
288 }
289 
290 /*
291  * Flatten chained mbuf to a single contiguous mbuf segment
292  *
293  * @pkt [in]: chained mbuf head
294  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
295  */
296 static doca_error_t ip_frag_pkt_flatten(struct rte_mbuf *pkt)
297 {
298  uint16_t tail_size = rte_pktmbuf_pkt_len(pkt) - rte_pktmbuf_data_len(pkt);
299  struct rte_mbuf *tail = pkt->next;
300  struct rte_mbuf *tmp;
301  uint16_t seg_len;
302  char *dst;
303 
304  if (tail_size > rte_pktmbuf_tailroom(pkt)) {
305  DOCA_LOG_DBG("Resulting packet size %u doesn't fit into tailroom size %u",
306  tail_size,
307  rte_pktmbuf_tailroom(pkt));
308  return DOCA_ERROR_TOO_BIG;
309  }
310 
311  pkt->next = NULL;
312  pkt->nb_segs = 1;
313  pkt->pkt_len = pkt->data_len;
314 
315  while (tail) {
316  seg_len = rte_pktmbuf_data_len(tail);
317  dst = rte_pktmbuf_append(pkt, seg_len);
318  assert(dst); /* already verified that tail fits */
319  memcpy(dst, rte_pktmbuf_mtod(tail, void *), seg_len);
320 
321  tmp = tail->next;
322  rte_pktmbuf_free_seg(tail);
323  tail = tmp;
324  }
325 
326  return DOCA_SUCCESS;
327 }
328 
329 /*
330  * Push a packet with fragmented outer IP header to the frag table.
331  *
332  * @wt_data [in]: worker thread data
333  * @pkt [in]: packet
334  * @parse_ctx [in]: pointer to the parser context
335  * @rx_ts [in]: burst reception timestamp
336  * @return: fully reassembled packet or NULL pointer if more frags are expected
337  */
338 static struct rte_mbuf *ip_frag_pkt_reassemble_push_outer(struct ip_frag_wt_data *wt_data,
339  struct rte_mbuf *pkt,
340  struct tun_parser_ctx *parse_ctx,
341  uint64_t rx_ts)
342 {
344  parse_ctx->link_ctx.len,
345  parse_ctx->network_ctx.len,
346  wt_data->cfg->mbuf_flag_outer_modified);
347 
348  return parse_ctx->network_ctx.ip_version == DOCA_FLOW_PROTO_IPV4 ?
349  rte_ipv4_frag_reassemble_packet(wt_data->frag_tbl,
350  &wt_data->death_row,
351  pkt,
352  rx_ts,
353  parse_ctx->network_ctx.ipv4_hdr) :
354  rte_ipv6_frag_reassemble_packet(wt_data->frag_tbl,
355  &wt_data->death_row,
356  pkt,
357  rx_ts,
358  parse_ctx->network_ctx.ipv6.hdr,
359  parse_ctx->network_ctx.ipv6.frag_ext);
360 }
361 
362 /*
363  * Push a packet with fragmented inner IP header to the frag table.
364  *
365  * @wt_data [in]: worker thread data
366  * @pkt_type [in]: incoming packet type
367  * @pkt [in]: packet
368  * @parse_ctx [in]: pointer to the parser context
369  * @rx_ts [in]: burst reception timestamp
370  * @return: fully reassembled packet or NULL pointer if more frags are expected
371  */
372 static struct rte_mbuf *ip_frag_pkt_reassemble_push_inner(struct ip_frag_wt_data *wt_data,
373  enum parser_pkt_type pkt_type,
374  struct rte_mbuf *pkt,
375  struct tun_parser_ctx *parse_ctx,
376  uint64_t rx_ts)
377 {
379  pkt_type == PARSER_PKT_TYPE_PLAIN ? parse_ctx->inner.link_ctx.len :
380  /* For tunneled packets we treat the
381  whole encapsulation as L2 for the
382  purpose of reassembly library. */
383  parse_ctx->len - parse_ctx->inner.len,
384  parse_ctx->inner.network_ctx.len,
385  wt_data->cfg->mbuf_flag_inner_modified);
386 
387  return parse_ctx->inner.network_ctx.ip_version == DOCA_FLOW_PROTO_IPV4 ?
388  rte_ipv4_frag_reassemble_packet(wt_data->frag_tbl,
389  &wt_data->death_row,
390  pkt,
391  rx_ts,
392  parse_ctx->inner.network_ctx.ipv4_hdr) :
393  rte_ipv6_frag_reassemble_packet(wt_data->frag_tbl,
394  &wt_data->death_row,
395  pkt,
396  rx_ts,
397  parse_ctx->inner.network_ctx.ipv6.hdr,
398  parse_ctx->inner.network_ctx.ipv6.frag_ext);
399 }
400 /*
401  * Set necessary mbuf fields and push the packet to the frag table.
402  *
403  * @wt_data [in]: worker thread data
404  * @rx_port_id [in]: receive port id
405  * @pkt_type [in]: incoming packet type
406  * @pkt [in]: packet
407  * @parse_ctx [in]: pointer to the parser context
408  * @rx_ts [in]: burst reception timestamp
409  * @whole_pkt [out]: resulting reassembled packet
410  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
411  */
413  uint16_t rx_port_id,
414  enum parser_pkt_type pkt_type,
415  struct rte_mbuf *pkt,
416  struct tun_parser_ctx *parse_ctx,
417  uint64_t rx_ts,
418  struct rte_mbuf **whole_pkt)
419 {
420  struct rte_mbuf *res;
421  doca_error_t ret;
422 
423  assert(pkt_type == PARSER_PKT_TYPE_TUNNELED || pkt_type == PARSER_PKT_TYPE_PLAIN);
424 
425  if (parse_ctx->network_ctx.frag)
426  res = ip_frag_pkt_reassemble_push_outer(wt_data, pkt, parse_ctx, rx_ts);
427  else if (parse_ctx->inner.network_ctx.frag)
428  res = ip_frag_pkt_reassemble_push_inner(wt_data, pkt_type, pkt, parse_ctx, rx_ts);
429  else
431 
432  if (!res)
433  return DOCA_ERROR_AGAIN;
434 
435  if (!wt_data->cfg->mbuf_chain && !rte_pktmbuf_is_contiguous(res)) {
436  ret = ip_frag_pkt_flatten(res);
437  if (ret != DOCA_SUCCESS) {
438  ip_frag_pkt_err_drop(wt_data, rx_port_id, pkt);
439  return ret;
440  }
441  }
442 
443  *whole_pkt = res;
444  return DOCA_SUCCESS;
445 }
446 
447 /*
448  * Reassemble the packet.
449  *
450  * @wt_data [in]: worker thread data
451  * @rx_port_id [in]: receive port id
452  * @tx_port_id [in]: outgoing packet port id
453  * @pkt_type [in]: incoming packet type
454  * @pkt [in]: packet
455  * @rx_ts [in]: burst reception timestamp
456  */
457 static void ip_frag_pkt_reassemble(struct ip_frag_wt_data *wt_data,
458  uint16_t rx_port_id,
459  uint16_t tx_port_id,
460  enum parser_pkt_type pkt_type,
461  struct rte_mbuf *pkt,
462  uint64_t rx_ts)
463 {
464  struct tun_parser_ctx parse_ctx;
465  enum parser_pkt_type inferred_pkt_type;
466  doca_error_t ret;
467  bool reparse;
468 
469  do {
470  reparse = false;
471  /* For fragmented packet parser can't correctly deduce rx type on the first pass so reset it on
472  * each reparse iteration. */
473  inferred_pkt_type = pkt_type;
474  memset(&parse_ctx, 0, sizeof(parse_ctx));
475 
476  ret = ip_frag_pkt_parse(&inferred_pkt_type, pkt, &parse_ctx);
477  switch (ret) {
478  case DOCA_SUCCESS:
479  wt_data->sw_counters[rx_port_id].whole++;
480  ip_frag_pkt_fixup(wt_data, inferred_pkt_type, pkt, &parse_ctx);
481  rte_eth_tx_buffer(tx_port_id, wt_data->queue_id, wt_data->tx_buffer, pkt);
482  break;
483 
484  case DOCA_ERROR_AGAIN:
485  wt_data->sw_counters[rx_port_id].frags_rx++;
486  ret = ip_frag_pkt_reassemble_push(wt_data,
487  rx_port_id,
488  inferred_pkt_type,
489  pkt,
490  &parse_ctx,
491  rx_ts,
492  &pkt);
493  if (ret == DOCA_SUCCESS) {
494  reparse = true;
495  } else if (ret != DOCA_ERROR_AGAIN) {
496  DOCA_LOG_ERR("Unexpected packet fragmentation");
497  ip_frag_pkt_err_drop(wt_data, rx_port_id, pkt);
498  }
499  break;
500 
501  default:
502  ip_frag_pkt_err_drop(wt_data, rx_port_id, pkt);
503  DOCA_LOG_DBG("Failed to parse packet status %u", ret);
504  break;
505  }
506  } while (reparse);
507 }
508 
509 /*
510  * Reassemble the packet burst buffering any resulting packets.
511  *
512  * @wt_data [in]: worker thread data
513  * @rx_port_id [in]: receive port id
514  * @tx_port_id [in]: send port id
515  * @pkt_type [in]: incoming packet type
516  * @pkts [in]: packet burst
517  * @pkts_cnt [in]: number of packets in the burst
518  * @rx_ts [in]: burst reception timestamp
519  */
520 static void ip_frag_pkts_reassemble(struct ip_frag_wt_data *wt_data,
521  uint16_t rx_port_id,
522  uint16_t tx_port_id,
523  enum parser_pkt_type pkt_type,
524  struct rte_mbuf *pkts[],
525  int pkts_cnt,
526  uint64_t rx_ts)
527 {
528  int i; /* prefetch calculation can yield negative values */
529 
530  for (i = 0; i < IP_FRAG_BURST_PREFETCH && i < pkts_cnt; i++)
531  rte_prefetch0(rte_pktmbuf_mtod(pkts[i], void *));
532 
533  for (i = 0; i < (pkts_cnt - IP_FRAG_BURST_PREFETCH); i++) {
534  rte_prefetch0(rte_pktmbuf_mtod(pkts[i + IP_FRAG_BURST_PREFETCH], void *));
535  ip_frag_pkt_reassemble(wt_data, rx_port_id, tx_port_id, pkt_type, pkts[i], rx_ts);
536  }
537 
538  for (; i < pkts_cnt; i++)
539  ip_frag_pkt_reassemble(wt_data, rx_port_id, tx_port_id, pkt_type, pkts[i], rx_ts);
540 }
541 
542 /*
543  * Receive a burst of packets on rx port, reassemble any fragments and send resulting packets on the tx port.
544  *
545  * @wt_data [in]: worker thread data
546  * @rx_port_id [in]: receive port id
547  * @tx_port_id [in]: send port id
548  * @pkt_type [in]: incoming packet type
549  */
550 static void ip_frag_wt_reassemble(struct ip_frag_wt_data *wt_data,
551  uint16_t rx_port_id,
552  uint16_t tx_port_id,
553  enum parser_pkt_type pkt_type)
554 {
555  struct rte_mbuf *pkts[IP_FRAG_MAX_PKT_BURST];
556  uint16_t pkts_cnt;
557 
558  pkts_cnt = rte_eth_rx_burst(rx_port_id, wt_data->queue_id, pkts, IP_FRAG_MAX_PKT_BURST);
559  if (likely(pkts_cnt)) {
560  ip_frag_pkts_reassemble(wt_data, rx_port_id, tx_port_id, pkt_type, pkts, pkts_cnt, rte_rdtsc());
561  rte_eth_tx_buffer_flush(tx_port_id, wt_data->queue_id, wt_data->tx_buffer);
562  } else {
563  rte_ip_frag_table_del_expired_entries(wt_data->frag_tbl, &wt_data->death_row, rte_rdtsc());
564  }
565  rte_ip_frag_free_death_row(&wt_data->death_row, IP_FRAG_BURST_PREFETCH);
566 }
567 
568 static int32_t ip_frag_mbuf_fragment(struct ip_frag_wt_data *wt_data,
569  struct conn_parser_ctx *parse_ctx,
570  struct rte_mbuf *pkt_in,
571  struct rte_mbuf **pkts_out,
572  uint16_t pkts_out_max,
573  uint16_t mtu,
574  struct rte_mempool *direct_pool,
575  struct rte_mempool *indirect_pool)
576 {
577  if (parse_ctx->network_ctx.ip_version == DOCA_FLOW_PROTO_IPV4)
578  return wt_data->cfg->mbuf_chain ?
579  rte_ipv4_fragment_packet(pkt_in, pkts_out, pkts_out_max, mtu, direct_pool, indirect_pool) :
580  rte_ipv4_fragment_copy_nonseg_packet(pkt_in, pkts_out, pkts_out_max, mtu, direct_pool);
581  else
582  return wt_data->cfg->mbuf_chain ? rte_ipv6_fragment_packet(pkt_in,
583  pkts_out,
584  pkts_out_max,
585  mtu,
586  direct_pool,
587  indirect_pool) :
588  -EOPNOTSUPP;
589 }
590 
591 /*
592  * Fragment the packet, if necessary, and buffer resulting packets.
593  *
594  * @wt_data [in]: worker thread data
595  * @rx_port_id [in]: receive port id
596  * @tx_port_id [in]: outgoing packet port id
597  * @pkt [in]: packet
598  */
599 static void ip_frag_pkt_fragment(struct ip_frag_wt_data *wt_data,
600  uint16_t rx_port_id,
601  uint16_t tx_port_id,
602  struct rte_mbuf *pkt)
603 {
604  struct rte_eth_dev_tx_buffer *tx_buffer = wt_data->tx_buffer;
605  uint8_t eth_hdr_copy[RTE_PKTMBUF_HEADROOM];
606  struct conn_parser_ctx parse_ctx;
607  size_t eth_hdr_len;
608  void *eth_hdr_new;
609  doca_error_t ret;
610  int num_frags;
611  int i;
612 
613  memset(&parse_ctx, 0, sizeof(parse_ctx));
614  /* We only fragment the outer header and don't care about parsing encapsulation, so always treat the packet as
615  * non-encapsulated. */
616  ret = plain_parse(rte_pktmbuf_mtod(pkt, uint8_t *),
617  rte_pktmbuf_mtod(pkt, uint8_t *) + rte_pktmbuf_data_len(pkt),
618  &parse_ctx);
619  if (ret != DOCA_SUCCESS) {
620  ip_frag_pkt_err_drop(wt_data, rx_port_id, pkt);
621  DOCA_LOG_DBG("Failed to parse packet status %u", ret);
622  return;
623  }
624 
625  if (rte_pktmbuf_pkt_len(pkt) <= wt_data->cfg->mtu) {
626  wt_data->sw_counters[rx_port_id].mtu_fits_rx++;
627  rte_eth_tx_buffer(tx_port_id, wt_data->queue_id, tx_buffer, pkt);
628  return;
629  }
630 
631  wt_data->sw_counters[rx_port_id].mtu_exceed_rx++;
632  eth_hdr_len = parse_ctx.link_ctx.len;
633  if (sizeof(eth_hdr_copy) < eth_hdr_len) {
634  ip_frag_pkt_err_drop(wt_data, rx_port_id, pkt);
635  DOCA_LOG_ERR("Ethernet header size %lu too big", eth_hdr_len);
636  return;
637  }
638  memcpy(eth_hdr_copy, parse_ctx.link_ctx.eth, eth_hdr_len);
639  rte_pktmbuf_adj(pkt, eth_hdr_len);
640 
641  num_frags = ip_frag_mbuf_fragment(wt_data,
642  &parse_ctx,
643  pkt,
644  &tx_buffer->pkts[tx_buffer->length],
645  tx_buffer->size - tx_buffer->length,
646  wt_data->cfg->mtu - eth_hdr_len,
647  pkt->pool,
648  wt_data->indirect_pool);
649  if (num_frags < 0) {
650  ip_frag_pkt_err_drop(wt_data, rx_port_id, pkt);
651  DOCA_LOG_ERR("RTE fragmentation failed with code: %d", -num_frags);
652  return;
653  }
654  rte_pktmbuf_free(pkt);
655 
656  for (i = tx_buffer->length; i < tx_buffer->length + num_frags; i++) {
657  pkt = tx_buffer->pkts[i];
660  pkt,
661  eth_hdr_len,
662  rte_ipv4_hdr_len(rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *)),
663  rte_pktmbuf_mtod(pkt, struct rte_ipv4_hdr *));
664 
665  eth_hdr_new = rte_pktmbuf_prepend(pkt, eth_hdr_len);
666  assert(eth_hdr_new);
667  memcpy(eth_hdr_new, eth_hdr_copy, eth_hdr_len);
668  }
669 
670  tx_buffer->length += num_frags;
671  wt_data->sw_counters[rx_port_id].frags_gen += num_frags;
672 }
673 
674 /*
675  * Fragment the packet burst buffering any resulting packets. Flush the tx buffer when it gets path threshold packets.
676  * Flush the tx buffer when it gets path threshold packets.
677  *
678  * @wt_data [in]: worker thread data
679  * @rx_port_id [in]: receive port id
680  * @tx_port_id [in]: outgoing packet port id
681  * @pkts [in]: packet burst
682  * @pkts_cnt [in]: number of packets in the burst
683  */
684 static void ip_frag_pkts_fragment(struct ip_frag_wt_data *wt_data,
685  uint16_t rx_port_id,
686  uint16_t tx_port_id,
687  struct rte_mbuf *pkts[],
688  int pkts_cnt)
689 {
690  struct rte_eth_dev_tx_buffer *tx_buffer = wt_data->tx_buffer;
691  int i; /* prefetch calculation can yield negative values */
692 
693  for (i = 0; i < IP_FRAG_BURST_PREFETCH && i < pkts_cnt; i++)
694  rte_prefetch0(rte_pktmbuf_mtod(pkts[i], void *));
695 
696  for (i = 0; i < (pkts_cnt - IP_FRAG_BURST_PREFETCH); i++) {
697  rte_prefetch0(rte_pktmbuf_mtod(pkts[i + IP_FRAG_BURST_PREFETCH], void *));
698  ip_frag_pkt_fragment(wt_data, rx_port_id, tx_port_id, pkts[i]);
699  if (tx_buffer->size - tx_buffer->length < IP_FRAG_FLUSH_THRESHOLD)
700  rte_eth_tx_buffer_flush(tx_port_id, wt_data->queue_id, tx_buffer);
701  }
702 
703  for (; i < pkts_cnt; i++) {
704  ip_frag_pkt_fragment(wt_data, rx_port_id, tx_port_id, pkts[i]);
705  if (tx_buffer->size - tx_buffer->length < IP_FRAG_FLUSH_THRESHOLD)
706  rte_eth_tx_buffer_flush(tx_port_id, wt_data->queue_id, tx_buffer);
707  }
708 }
709 
710 /*
711  * Receive a burst of packets on rx port, fragment any larger than MTU, and send resulting packets on the tx port.
712  *
713  * @wt_data [in]: worker thread data
714  * @rx_port_id [in]: receive port id
715  * @tx_port_id [in]: send port id
716  */
717 static void ip_frag_wt_fragment(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, uint16_t tx_port_id)
718 {
719  struct rte_mbuf *pkts[IP_FRAG_MAX_PKT_BURST];
720  uint16_t pkts_cnt;
721 
722  pkts_cnt = rte_eth_rx_burst(rx_port_id, wt_data->queue_id, pkts, IP_FRAG_MAX_PKT_BURST);
723  ip_frag_pkts_fragment(wt_data, rx_port_id, tx_port_id, pkts, pkts_cnt);
724  rte_eth_tx_buffer_flush(tx_port_id, wt_data->queue_id, wt_data->tx_buffer);
725 }
726 
727 /*
728  * Worker thread main run loop
729  *
730  * @param [in]: Array of thread data structures
731  * @return: 0 on success and system error code otherwise
732  */
733 static int ip_frag_wt_thread_main(void *param)
734 {
735  struct ip_frag_wt_data *wt_data_arr = param;
736  struct ip_frag_wt_data *wt_data = &wt_data_arr[rte_lcore_id()];
737 
738  while (!force_stop) {
739  switch (wt_data->cfg->mode) {
740  case IP_FRAG_MODE_BIDIR:
741  ip_frag_wt_reassemble(wt_data,
746  break;
748  ip_frag_wt_reassemble(wt_data,
752  ip_frag_wt_reassemble(wt_data,
758  break;
759  default:
760  DOCA_LOG_ERR("Unsupported application mode: %u", wt_data->cfg->mode);
761  return EINVAL;
762  };
763  }
764 
765  return 0;
766 }
767 
768 /*
769  * Allocate and initialize ip_frag mbuf fragmentation flags
770  *
771  * @cfg [in]: application config
772  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
773  */
775 {
776  static const struct rte_mbuf_dynflag flag_outer_desc = {
777  .name = "ip_frag outer",
778  };
779  static const struct rte_mbuf_dynflag flag_inner_desc = {
780  .name = "ip_frag inner",
781  };
782  int flag_outer;
783  int flag_inner;
784 
785  flag_outer = rte_mbuf_dynflag_register(&flag_outer_desc);
786  if (flag_outer < 0) {
787  DOCA_LOG_ERR("Failed to register mbuf outer fragmentation flag with code: %d", -flag_outer);
788  return DOCA_ERROR_NO_MEMORY;
789  }
790 
791  flag_inner = rte_mbuf_dynflag_register(&flag_inner_desc);
792  if (flag_inner < 0) {
793  DOCA_LOG_ERR("Failed to register mbuf inner fragmentation flag with code: %d", -flag_inner);
794  return DOCA_ERROR_NO_MEMORY;
795  }
796 
797  cfg->mbuf_flag_outer_modified = RTE_BIT64(flag_outer);
798  cfg->mbuf_flag_inner_modified = RTE_BIT64(flag_inner);
799  return DOCA_SUCCESS;
800 }
801 
802 /*
803  * Initialize indirect fragmentation mempools
804  *
805  * @nb_queues [in]: number of device queues
806  * @indirect_pools [out]: Per-socket array of indirect fragmentation mempools
807  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
808  */
809 static doca_error_t ip_frag_indirect_pool_init(uint16_t nb_queues, struct rte_mempool *indirect_pools[])
810 {
811  char mempool_name[RTE_MEMPOOL_NAMESIZE];
812  unsigned socket;
813  unsigned lcore;
814 
815  RTE_LCORE_FOREACH_WORKER(lcore)
816  {
817  socket = rte_lcore_to_socket_id(lcore);
818 
819  if (!indirect_pools[socket]) {
820  snprintf(mempool_name, sizeof(mempool_name), "Indirect mempool %u", socket);
821  indirect_pools[socket] = rte_pktmbuf_pool_create(mempool_name,
822  NUM_MBUFS * nb_queues,
824  0,
825  0,
826  socket);
827  if (!indirect_pools[socket]) {
828  DOCA_LOG_ERR("Failed to allocate indirect mempool for socket %u", socket);
829  return DOCA_ERROR_NO_MEMORY;
830  }
831 
832  DOCA_LOG_DBG("Indirect mempool for socket %u initialized", socket);
833  }
834  }
835 
836  return DOCA_SUCCESS;
837 }
838 
839 /*
840  * Cleanup and free FRAG fastpath data
841  *
842  * @wt_data_arr [in]: worker thread data array
843  */
844 static void ip_frag_wt_data_cleanup(struct ip_frag_wt_data *wt_data_arr)
845 {
846  struct ip_frag_wt_data *wt_data;
847  unsigned lcore;
848 
849  RTE_LCORE_FOREACH_WORKER(lcore)
850  {
851  wt_data = &wt_data_arr[lcore];
852 
853  if (wt_data->frag_tbl)
854  rte_ip_frag_table_destroy(wt_data->frag_tbl);
855  rte_free(wt_data->tx_buffer);
856  }
857 
858  rte_free(wt_data_arr);
859 }
860 
861 /*
862  * Allocate and initialize ip_frag worker thread data
863  *
864  * @cfg [in]: application config
865  * @indirect_pools [in]: Per-socket array of indirect fragmentation mempools
866  * @wt_data_arr_out [out]: worker thread data array
867  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
868  */
870  struct rte_mempool *indirect_pools[],
871  struct ip_frag_wt_data **wt_data_arr_out)
872 {
873  struct ip_frag_wt_data *wt_data_arr;
874  struct ip_frag_wt_data *wt_data;
875  uint16_t queue_id = 0;
876  doca_error_t ret;
877  unsigned lcore;
878 
879  wt_data_arr = rte_calloc("Worker data", RTE_MAX_LCORE, sizeof(*wt_data_arr), _Alignof(typeof(*wt_data_arr)));
880  if (!wt_data_arr) {
881  DOCA_LOG_ERR("Failed to allocate worker thread data array");
882  return DOCA_ERROR_NO_MEMORY;
883  }
884 
885  RTE_LCORE_FOREACH(lcore)
886  {
887  wt_data = &wt_data_arr[lcore];
888  wt_data->cfg = cfg;
889  wt_data->indirect_pool = indirect_pools[rte_lcore_to_socket_id(lcore)];
890  wt_data->queue_id = queue_id++;
891 
892  wt_data->tx_buffer = rte_zmalloc_socket("TX buffer",
893  RTE_ETH_TX_BUFFER_SIZE(IP_FRAG_MAX_PKT_BURST),
894  RTE_CACHE_LINE_SIZE,
895  rte_lcore_to_socket_id(lcore));
896  if (!wt_data->tx_buffer) {
897  DOCA_LOG_ERR("Failed to allocate worker thread tx buffer");
898  ret = DOCA_ERROR_NO_MEMORY;
899  goto cleanup;
900  }
901  rte_eth_tx_buffer_init(wt_data->tx_buffer, IP_FRAG_MAX_PKT_BURST);
902  rte_eth_tx_buffer_set_err_callback(wt_data->tx_buffer,
903  rte_eth_tx_buffer_count_callback,
904  &wt_data->tx_buffer_err);
905 
906  wt_data->frag_tbl =
907  rte_ip_frag_table_create(cfg->frag_tbl_size,
910  ((rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S) * cfg->frag_tbl_timeout,
911  rte_lcore_to_socket_id(lcore));
912  if (!wt_data->frag_tbl) {
913  DOCA_LOG_ERR("Failed to allocate worker thread fragmentation table");
914  ret = DOCA_ERROR_NO_MEMORY;
915  goto cleanup;
916  }
917 
918  DOCA_LOG_DBG("Worker thread %u data initialized", lcore);
919  }
920 
921  *wt_data_arr_out = wt_data_arr;
922  return DOCA_SUCCESS;
923 
924 cleanup:
925  ip_frag_wt_data_cleanup(wt_data_arr);
926  return ret;
927 }
928 
929 /*
930  * Print SW debug counters of each worker
931  *
932  * @ctx [in]: Ip_frag context
933  * @wt_data_arr [in]: worker thread data array
934  */
935 static void ip_frag_sw_counters_print(struct ip_frag_ctx *ctx, struct ip_frag_wt_data *wt_data_arr)
936 {
937  struct ip_frag_sw_counters sw_sum_port = {0};
938  struct ip_frag_sw_counters sw_sum = {0};
940  struct ip_frag_wt_data *wt_data;
941  unsigned lcore;
942  int port_id;
943 
944  DOCA_LOG_INFO("//////////////////// SW COUNTERS ////////////////////");
945 
946  for (port_id = 0; port_id < ctx->num_ports; port_id++) {
947  DOCA_LOG_INFO("== Port %d:", port_id);
948  memset(&sw_sum_port, 0, sizeof(sw_sum_port));
949 
950  RTE_LCORE_FOREACH(lcore)
951  {
952  wt_data = &wt_data_arr[lcore];
953  sw_counters = &wt_data->sw_counters[port_id];
954 
956  "Core sw %3u frags_rx=%-8lu whole=%-8lu mtu_fits_rx=%-8lu mtu_exceed_rx=%-8lu frags_gen=%-8lu err=%-8lu",
957  lcore,
963  sw_counters->err);
964 
965  sw_sum_port.frags_rx += sw_counters->frags_rx;
966  sw_sum_port.whole += sw_counters->whole;
967  sw_sum_port.mtu_fits_rx += sw_counters->mtu_fits_rx;
968  sw_sum_port.mtu_exceed_rx += sw_counters->mtu_exceed_rx;
969  sw_sum_port.frags_gen += sw_counters->frags_gen;
970  sw_sum_port.err += sw_counters->err;
971  }
972 
974  "TOTAL sw port %d frags_rx=%-8lu whole=%-8lu mtu_fits_rx=%-8lu mtu_exceed_rx=%-8lu frags_gen=%-8lu err=%-8lu",
975  port_id,
976  sw_sum_port.frags_rx,
977  sw_sum_port.whole,
978  sw_sum_port.mtu_fits_rx,
979  sw_sum_port.mtu_exceed_rx,
980  sw_sum_port.frags_gen,
981  sw_sum_port.err);
982 
983  sw_sum.frags_rx += sw_sum_port.frags_rx;
984  sw_sum.whole += sw_sum_port.whole;
985  sw_sum.mtu_fits_rx += sw_sum_port.mtu_fits_rx;
986  sw_sum.mtu_exceed_rx += sw_sum_port.mtu_exceed_rx;
987  sw_sum.frags_gen += sw_sum_port.frags_gen;
988  sw_sum.err += sw_sum_port.err;
989  }
990 
991  DOCA_LOG_INFO("== Total:");
993  "TOTAL sw frags_rx=%-8lu whole=%-8lu mtu_fits_rx=%-8lu mtu_exceed_rx=%-8lu frags_gen=%-8lu err=%-8lu",
994  sw_sum.frags_rx,
995  sw_sum.whole,
996  sw_sum.mtu_fits_rx,
997  sw_sum.mtu_exceed_rx,
998  sw_sum.frags_gen,
999  sw_sum.err);
1000 }
1001 
1002 /*
1003  * Print TX buffer errors counter of each worker
1004  *
1005  * @wt_data_arr [in]: worker thread data array
1006  */
1007 static void ip_frag_tx_buffer_error_print(struct ip_frag_wt_data *wt_data_arr)
1008 {
1009  struct ip_frag_wt_data *wt_data;
1010  uint64_t sum = 0;
1011  unsigned lcore;
1012 
1013  DOCA_LOG_INFO("//////////////////// TX BUFFER ERROR ////////////////////");
1014 
1015  RTE_LCORE_FOREACH(lcore)
1016  {
1017  wt_data = &wt_data_arr[lcore];
1018 
1019  DOCA_LOG_INFO("Core tx_buffer %3u err=%lu", lcore, wt_data->tx_buffer_err);
1020 
1021  sum += wt_data->tx_buffer_err;
1022  }
1023 
1024  DOCA_LOG_INFO("TOTAL tx_buffer err=%lu", sum);
1025 }
1026 
1027 /*
1028  * Print SW debug counters of each worker
1029  *
1030  * @wt_data_arr [in]: worker thread data array
1031  */
1032 static void ip_frag_tbl_stats_print(struct ip_frag_wt_data *wt_data_arr)
1033 {
1034  struct ip_frag_wt_data *wt_data;
1035  unsigned lcore;
1036 
1037  DOCA_LOG_INFO("//////////////////// FRAG TABLE STATS ////////////////////");
1038 
1039  RTE_LCORE_FOREACH(lcore)
1040  {
1041  wt_data = &wt_data_arr[lcore];
1042 
1043  DOCA_LOG_INFO("Core %3u:", lcore);
1044  rte_ip_frag_table_statistics_dump(stdout, wt_data->frag_tbl);
1045  }
1046 }
1047 /*
1048  * Print debug counters of each worker thread
1049  *
1050  * @ctx [in]: Ip_frag context
1051  * @wt_data_arr [in]: worker thread data array
1052  */
1053 static void ip_frag_debug_counters_print(struct ip_frag_ctx *ctx, struct ip_frag_wt_data *wt_data_arr)
1054 {
1055  DOCA_LOG_INFO("");
1056  rte_mbuf_dyn_dump(stdout);
1057  DOCA_LOG_INFO("");
1058  ip_frag_tbl_stats_print(wt_data_arr);
1059  DOCA_LOG_INFO("");
1060  ip_frag_tx_buffer_error_print(wt_data_arr);
1061  DOCA_LOG_INFO("");
1062  ip_frag_sw_counters_print(ctx, wt_data_arr);
1063  DOCA_LOG_INFO("");
1064 }
1065 
1066 /*
1067  * Create a flow pipe
1068  *
1069  * @pipe_cfg [in]: Ip_Frag pipe configuration
1070  * @pipe [out]: pointer to store the created pipe at
1071  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
1072  */
1073 static doca_error_t ip_frag_pipe_create(struct ip_frag_pipe_cfg *pipe_cfg, struct doca_flow_pipe **pipe)
1074 {
1075  struct doca_flow_pipe_cfg *cfg;
1077 
1078  result = doca_flow_pipe_cfg_create(&cfg, pipe_cfg->port);
1079  if (result != DOCA_SUCCESS) {
1080  DOCA_LOG_ERR("Failed to create doca_flow_pipe_cfg: %s", doca_error_get_descr(result));
1081  return result;
1082  }
1083 
1084  result = set_flow_pipe_cfg(cfg, pipe_cfg->name, DOCA_FLOW_PIPE_BASIC, pipe_cfg->is_root);
1085  if (result != DOCA_SUCCESS) {
1086  DOCA_LOG_ERR("Failed to set doca_flow_pipe_cfg: %s", doca_error_get_descr(result));
1087  goto destroy_pipe_cfg;
1088  }
1089 
1091  if (result != DOCA_SUCCESS) {
1092  DOCA_LOG_ERR("Failed to set doca_flow_pipe_cfg domain: %s", doca_error_get_descr(result));
1093  goto destroy_pipe_cfg;
1094  }
1095 
1097  if (result != DOCA_SUCCESS) {
1098  DOCA_LOG_ERR("Failed to set doca_flow_pipe_cfg num_entries: %s", doca_error_get_descr(result));
1099  goto destroy_pipe_cfg;
1100  }
1101 
1102  if (pipe_cfg->match != NULL) {
1103  result = doca_flow_pipe_cfg_set_match(cfg, pipe_cfg->match, pipe_cfg->match_mask);
1104  if (result != DOCA_SUCCESS) {
1105  DOCA_LOG_ERR("Failed to set doca_flow_pipe_cfg match: %s", doca_error_get_descr(result));
1106  goto destroy_pipe_cfg;
1107  }
1108  }
1109 
1110  result = doca_flow_pipe_create(cfg, pipe_cfg->fwd, pipe_cfg->fwd_miss, pipe);
1111  if (result != DOCA_SUCCESS) {
1112  DOCA_LOG_ERR("Failed to create IP_FRAG pipe: %s", doca_error_get_descr(result));
1113  goto destroy_pipe_cfg;
1114  }
1115 
1118  return result;
1119 }
1120 
1121 /*
1122  * Create RSS flow pipe
1123  *
1124  * @ctx [in]: Ip_frag context
1125  * @port_id [in]: port to create pipe at
1126  * @pipe_name [in]: name of the created pipe
1127  * @is_root [in]: flag indicating root pipe
1128  * @flags [in]: pipe RSS flags
1129  * @pipe_miss [in]: forward miss pipe
1130  * @pipe_out [out]: pointer to store the created pipe at
1131  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
1132  */
1134  uint16_t port_id,
1135  char *pipe_name,
1136  bool is_root,
1137  uint32_t flags,
1138  struct doca_flow_pipe *pipe_miss,
1139  struct doca_flow_pipe **pipe_out)
1140 {
1141  uint16_t rss_queues[RTE_MAX_LCORE];
1142  struct doca_flow_match match_pipe = {
1145  };
1146  const int num_of_entries = 1;
1147  struct doca_flow_fwd fwd = {.type = DOCA_FLOW_FWD_RSS,
1149  .rss.queues_array = rss_queues,
1150  .rss.nr_queues = ctx->num_queues,
1151  .rss.outer_flags = flags};
1152  struct doca_flow_fwd fwd_miss = {0};
1153  struct ip_frag_pipe_cfg pipe_cfg = {.port_id = port_id,
1154  .port = ctx->ports[port_id],
1156  .name = pipe_name,
1157  .is_root = is_root,
1158  .num_entries = num_of_entries,
1159  .match = &match_pipe,
1160  .match_mask = &match_pipe,
1161  .fwd = &fwd};
1162  struct entries_status status = {0};
1163  struct doca_flow_pipe *pipe;
1164  doca_error_t ret;
1165  int i;
1166 
1167  for (i = 0; i < ctx->num_queues; ++i)
1168  rss_queues[i] = i;
1169 
1170  if (pipe_miss) {
1172  fwd_miss.next_pipe = pipe_miss;
1173  pipe_cfg.fwd_miss = &fwd_miss;
1174  }
1175 
1176  ret = ip_frag_pipe_create(&pipe_cfg, &pipe);
1177  if (ret != DOCA_SUCCESS) {
1178  DOCA_LOG_ERR("Failed to create rss pipe: %s", doca_error_get_descr(ret));
1179  return ret;
1180  }
1181 
1182  ret = doca_flow_pipe_add_entry(0, pipe, NULL, NULL, NULL, NULL, 0, &status, NULL);
1183  if (ret != DOCA_SUCCESS) {
1184  DOCA_LOG_ERR("Failed to add rss entry: %s", doca_error_get_descr(ret));
1185  return ret;
1186  }
1187 
1188  ret = doca_flow_entries_process(pipe_cfg.port, 0, DEFAULT_TIMEOUT_US, pipe_cfg.num_entries);
1189  if (ret != DOCA_SUCCESS) {
1190  DOCA_LOG_ERR("Failed to process entries on port %u: %s", port_id, doca_error_get_descr(ret));
1191  return ret;
1192  }
1193 
1194  if (status.nb_processed != num_of_entries || status.failure) {
1195  DOCA_LOG_ERR("Failed to process port %u entries", port_id);
1196  return DOCA_ERROR_BAD_STATE;
1197  }
1198 
1199  *pipe_out = pipe;
1200  return DOCA_SUCCESS;
1201 }
1202 
1203 /*
1204  * Create RSS pipe for each port
1205  *
1206  * @ctx [in] Ip_Frag context
1207  * @return: DOCA_SUCCESS on success and DOCA_ERROR otherwise
1208  */
1210 {
1211  doca_error_t ret;
1212  int port_id;
1213 
1214  for (port_id = 0; port_id < ctx->num_ports; port_id++) {
1216  port_id,
1217  "RSS_IPV4_PIPE",
1218  false,
1220  NULL,
1221  &ctx->pipes[port_id][IP_FRAG_RSS_PIPE_IPV4]);
1222  if (ret != DOCA_SUCCESS) {
1223  DOCA_LOG_ERR("Failed to create RSS IPv4 pipe: %s", doca_error_get_descr(ret));
1224  return ret;
1225  }
1226 
1228  port_id,
1229  "RSS_IPV6_PIPE",
1230  true,
1232  ctx->pipes[port_id][IP_FRAG_RSS_PIPE_IPV4],
1233  &ctx->pipes[port_id][IP_FRAG_RSS_PIPE_IPV6]);
1234  if (ret != DOCA_SUCCESS) {
1235  DOCA_LOG_ERR("Failed to create RSS IPv6 pipe: %s", doca_error_get_descr(ret));
1236  return ret;
1237  }
1238  }
1239 
1240  return DOCA_SUCCESS;
1241 }
1242 
1244 {
1245  struct rte_mempool *indirect_pools[RTE_MAX_NUMA_NODES] = {NULL};
1246  uint32_t nr_shared_resources[SHARED_RESOURCE_NUM_VALUES] = {0};
1247  struct ip_frag_ctx ctx = {
1248  .num_ports = dpdk_cfg->port_config.nb_ports,
1249  .num_queues = dpdk_cfg->port_config.nb_queues,
1250  };
1251  uint32_t actions_mem_size[RTE_MAX_ETHPORTS];
1252  struct ip_frag_wt_data *wt_data_arr;
1253  struct flow_resources resource = {0};
1254  doca_error_t ret;
1255 
1256  ret = init_doca_flow(ctx.num_queues, "vnf,hws", &resource, nr_shared_resources);
1257  if (ret != DOCA_SUCCESS) {
1258  DOCA_LOG_ERR("Failed to init DOCA Flow: %s", doca_error_get_descr(ret));
1259  return ret;
1260  }
1261 
1263  if (ret != DOCA_SUCCESS)
1264  goto cleanup_doca_flow;
1265 
1266  ret = ip_frag_indirect_pool_init(ctx.num_queues, indirect_pools);
1267  if (ret != DOCA_SUCCESS)
1268  goto cleanup_doca_flow;
1269 
1270  ret = ip_frag_wt_data_init(cfg, indirect_pools, &wt_data_arr);
1271  if (ret != DOCA_SUCCESS)
1272  goto cleanup_doca_flow;
1273 
1275  if (ret != DOCA_SUCCESS) {
1276  DOCA_LOG_ERR("Failed to init DOCA ports: %s", doca_error_get_descr(ret));
1277  goto cleanup_wt_data;
1278  }
1279 
1280  ret = ip_frag_rss_pipes_create(&ctx);
1281  if (ret != DOCA_SUCCESS) {
1282  DOCA_LOG_ERR("Failed to pipes: %s", doca_error_get_descr(ret));
1283  goto cleanup_ports;
1284  }
1285 
1286  DOCA_LOG_INFO("Initialization finished, starting data path");
1287  if (rte_eal_mp_remote_launch(ip_frag_wt_thread_main, wt_data_arr, CALL_MAIN)) {
1288  DOCA_LOG_ERR("Failed to launch worker threads");
1289  goto cleanup_ports;
1290  }
1291  rte_eal_mp_wait_lcore();
1292 
1293  ip_frag_debug_counters_print(&ctx, wt_data_arr);
1294 cleanup_ports:
1296  if (ret != DOCA_SUCCESS)
1297  DOCA_LOG_ERR("Failed to stop doca flow ports: %s", doca_error_get_descr(ret));
1298 cleanup_wt_data:
1299  ip_frag_wt_data_cleanup(wt_data_arr);
1300 cleanup_doca_flow:
1302  return ret;
1303 }
#define NULL
Definition: __stddef_null.h:26
int32_t result
struct doca_flow_port * init_doca_flow(uint16_t port_id, uint8_t rxq_num)
Definition: flow.c:37
static void cleanup(struct cache_invalidate_sample_state *state)
#define likely(x)
Definition: utils.h:38
#define MBUF_CACHE_SIZE
Definition: dpdk_utils.h:46
#define NUM_MBUFS
Definition: dpdk_utils.h:45
static doca_error_t destroy_pipe_cfg(struct doca_flow_pipe_cfg *cfg)
static struct doca_flow_fwd fwd_miss
Definition: flow_parser.c:110
static uint16_t * rss_queues
Definition: flow_parser.c:114
static struct doca_flow_fwd fwd
Definition: flow_parser.c:109
#define DEFAULT_TIMEOUT_US
Definition: flow_skeleton.c:36
enum doca_error doca_error_t
DOCA API return codes.
DOCA_STABLE const char * doca_error_get_descr(doca_error_t error)
Returns the description string of an error code.
@ DOCA_ERROR_INVALID_VALUE
Definition: doca_error.h:44
@ DOCA_ERROR_TOO_BIG
Definition: doca_error.h:65
@ DOCA_ERROR_BAD_STATE
Definition: doca_error.h:56
@ DOCA_ERROR_AGAIN
Definition: doca_error.h:43
@ DOCA_SUCCESS
Definition: doca_error.h:38
@ DOCA_ERROR_NO_MEMORY
Definition: doca_error.h:45
#define DOCA_FLOW_PROTO_IPV4
Definition: doca_flow_net.h:39
DOCA_STABLE doca_error_t doca_flow_pipe_cfg_destroy(struct doca_flow_pipe_cfg *cfg)
Destroy DOCA Flow pipe configuration struct.
DOCA_STABLE doca_error_t doca_flow_pipe_cfg_create(struct doca_flow_pipe_cfg **cfg, struct doca_flow_port *port)
Create DOCA Flow pipe configuration struct.
DOCA_STABLE doca_error_t doca_flow_entries_process(struct doca_flow_port *port, uint16_t pipe_queue, uint64_t timeout, uint32_t max_processed_entries)
Process entries in queue.
DOCA_EXPERIMENTAL doca_error_t doca_flow_pipe_cfg_set_match(struct doca_flow_pipe_cfg *cfg, const struct doca_flow_match *match, const struct doca_flow_match *match_mask)
Set pipe's match and match mask.
DOCA_EXPERIMENTAL doca_error_t doca_flow_pipe_create(const struct doca_flow_pipe_cfg *cfg, const struct doca_flow_fwd *fwd, const struct doca_flow_fwd *fwd_miss, struct doca_flow_pipe **pipe)
Create one new pipe.
DOCA_EXPERIMENTAL doca_error_t doca_flow_pipe_add_entry(uint16_t pipe_queue, struct doca_flow_pipe *pipe, const struct doca_flow_match *match, const struct doca_flow_actions *actions, const struct doca_flow_monitor *monitor, const struct doca_flow_fwd *fwd, uint32_t flags, void *usr_ctx, struct doca_flow_pipe_entry **entry)
Add one new entry to a pipe.
DOCA_STABLE void doca_flow_destroy(void)
Destroy the doca flow.
DOCA_STABLE doca_error_t doca_flow_pipe_cfg_set_nr_entries(struct doca_flow_pipe_cfg *cfg, uint32_t nr_entries)
Set pipe's maximum number of flow rules.
DOCA_STABLE doca_error_t doca_flow_pipe_cfg_set_domain(struct doca_flow_pipe_cfg *cfg, enum doca_flow_pipe_domain domain)
Set pipe's domain.
@ DOCA_FLOW_RSS_IPV4
Definition: doca_flow.h:764
@ DOCA_FLOW_RSS_IPV6
Definition: doca_flow.h:766
@ DOCA_FLOW_PIPE_BASIC
Definition: doca_flow.h:221
@ DOCA_FLOW_L3_META_IPV4
Definition: doca_flow.h:296
@ DOCA_FLOW_L3_META_IPV6
Definition: doca_flow.h:298
@ DOCA_FLOW_RESOURCE_TYPE_NON_SHARED
Definition: doca_flow.h:615
@ DOCA_FLOW_FWD_PIPE
Definition: doca_flow.h:746
@ DOCA_FLOW_FWD_RSS
Definition: doca_flow.h:742
@ DOCA_FLOW_PIPE_DOMAIN_DEFAULT
Definition: doca_flow.h:241
#define DOCA_LOG_ERR(format,...)
Generates an ERROR application log message.
Definition: doca_log.h:466
#define DOCA_LOG_INFO(format,...)
Generates an INFO application log message.
Definition: doca_log.h:486
#define DOCA_LOG_DBG(format,...)
Generates a DEBUG application log message.
Definition: doca_log.h:496
static void ip_frag_ipv4_hdr_cksum(struct rte_ipv4_hdr *hdr)
Definition: ip_frag_dp.c:134
static void ip_frag_pkt_fragment(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, uint16_t tx_port_id, struct rte_mbuf *pkt)
Definition: ip_frag_dp.c:599
static doca_error_t ip_frag_pkt_parse(enum parser_pkt_type *pkt_type, struct rte_mbuf *pkt, struct tun_parser_ctx *parse_ctx)
Definition: ip_frag_dp.c:94
static doca_error_t ip_frag_wt_data_init(const struct ip_frag_config *cfg, struct rte_mempool *indirect_pools[], struct ip_frag_wt_data **wt_data_arr_out)
Definition: ip_frag_dp.c:869
DOCA_LOG_REGISTER(IP_FRAG::DP)
#define IP_FRAG_MAX_PKT_BURST
Definition: ip_frag_dp.c:43
static void ip_frag_debug_counters_print(struct ip_frag_ctx *ctx, struct ip_frag_wt_data *wt_data_arr)
Definition: ip_frag_dp.c:1053
#define IP_FRAG_BURST_PREFETCH
Definition: ip_frag_dp.c:44
static doca_error_t ip_frag_pipe_create(struct ip_frag_pipe_cfg *pipe_cfg, struct doca_flow_pipe **pipe)
Definition: ip_frag_dp.c:1073
static void ip_frag_network_cksum_handle(struct ip_frag_wt_data *wt_data, struct rte_mbuf *pkt, struct link_parser_ctx *link_ctx, struct network_parser_ctx *network_ctx)
Definition: ip_frag_dp.c:183
static void ip_frag_ipv4_cksum_handle(struct ip_frag_wt_data *wt_data, struct rte_mbuf *pkt, uint64_t l2_len, uint64_t l3_len, struct rte_ipv4_hdr *ipv4_hdr)
Definition: ip_frag_dp.c:160
static void ip_frag_pkt_fixup(struct ip_frag_wt_data *wt_data, enum parser_pkt_type pkt_type, struct rte_mbuf *pkt, struct tun_parser_ctx *parse_ctx)
Definition: ip_frag_dp.c:233
static void ip_frag_pkt_err_drop(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, struct rte_mbuf *pkt)
Definition: ip_frag_dp.c:80
static doca_error_t ip_frag_rss_pipes_create(struct ip_frag_ctx *ctx)
Definition: ip_frag_dp.c:1209
static void ip_frag_wt_reassemble(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, uint16_t tx_port_id, enum parser_pkt_type pkt_type)
Definition: ip_frag_dp.c:550
static void ip_frag_wt_data_cleanup(struct ip_frag_wt_data *wt_data_arr)
Definition: ip_frag_dp.c:844
static int32_t ip_frag_mbuf_fragment(struct ip_frag_wt_data *wt_data, struct conn_parser_ctx *parse_ctx, struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t pkts_out_max, uint16_t mtu, struct rte_mempool *direct_pool, struct rte_mempool *indirect_pool)
Definition: ip_frag_dp.c:568
static void ip_frag_wt_fragment(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, uint16_t tx_port_id)
Definition: ip_frag_dp.c:717
struct rte_eth_dev_tx_buffer * tx_buffer
Definition: ip_frag_dp.c:3
static doca_error_t ip_frag_pkt_flatten(struct rte_mbuf *pkt)
Definition: ip_frag_dp.c:296
static struct rte_mbuf * ip_frag_pkt_reassemble_push_inner(struct ip_frag_wt_data *wt_data, enum parser_pkt_type pkt_type, struct rte_mbuf *pkt, struct tun_parser_ctx *parse_ctx, uint64_t rx_ts)
Definition: ip_frag_dp.c:372
struct ip_frag_wt_data __rte_aligned(RTE_CACHE_LINE_SIZE)
static void ip_frag_network_cksum(struct network_parser_ctx *ctx)
Definition: ip_frag_dp.c:145
struct rte_mempool * indirect_pool
Definition: ip_frag_dp.c:6
doca_error_t ip_frag(struct ip_frag_config *cfg, struct application_dpdk_config *dpdk_cfg)
Definition: ip_frag_dp.c:1243
static struct rte_mbuf * ip_frag_pkt_reassemble_push_outer(struct ip_frag_wt_data *wt_data, struct rte_mbuf *pkt, struct tun_parser_ctx *parse_ctx, uint64_t rx_ts)
Definition: ip_frag_dp.c:338
static void ip_frag_tx_buffer_error_print(struct ip_frag_wt_data *wt_data_arr)
Definition: ip_frag_dp.c:1007
static void ip_frag_tbl_stats_print(struct ip_frag_wt_data *wt_data_arr)
Definition: ip_frag_dp.c:1032
static void ip_frag_sw_counters_print(struct ip_frag_ctx *ctx, struct ip_frag_wt_data *wt_data_arr)
Definition: ip_frag_dp.c:935
static doca_error_t ip_frag_rss_pipe_create(struct ip_frag_ctx *ctx, uint16_t port_id, char *pipe_name, bool is_root, uint32_t flags, struct doca_flow_pipe *pipe_miss, struct doca_flow_pipe **pipe_out)
Definition: ip_frag_dp.c:1133
const struct ip_frag_config * cfg
Definition: ip_frag_dp.c:0
static void ip_frag_pkts_fragment(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, uint16_t tx_port_id, struct rte_mbuf *pkts[], int pkts_cnt)
Definition: ip_frag_dp.c:684
static int ip_frag_wt_thread_main(void *param)
Definition: ip_frag_dp.c:733
#define IP_FRAG_TBL_BUCKET_SIZE
Definition: ip_frag_dp.c:47
bool force_stop
Definition: ip_frag_dp.c:71
static doca_error_t ip_frag_pkt_reassemble_push(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, enum parser_pkt_type pkt_type, struct rte_mbuf *pkt, struct tun_parser_ctx *parse_ctx, uint64_t rx_ts, struct rte_mbuf **whole_pkt)
Definition: ip_frag_dp.c:412
static doca_error_t ip_frag_mbuf_flags_init(struct ip_frag_config *cfg)
Definition: ip_frag_dp.c:774
static void ip_frag_udp_cksum_handle(struct ip_frag_wt_data *wt_data, struct rte_mbuf *pkt, struct link_parser_ctx *link_ctx, struct network_parser_ctx *network_ctx, struct transport_parser_ctx *transport_ctx)
Definition: ip_frag_dp.c:202
uint16_t queue_id
Definition: ip_frag_dp.c:1
static void ip_frag_pkts_reassemble(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, uint16_t tx_port_id, enum parser_pkt_type pkt_type, struct rte_mbuf *pkts[], int pkts_cnt, uint64_t rx_ts)
Definition: ip_frag_dp.c:520
static void ip_frag_pkt_reassemble(struct ip_frag_wt_data *wt_data, uint16_t rx_port_id, uint16_t tx_port_id, enum parser_pkt_type pkt_type, struct rte_mbuf *pkt, uint64_t rx_ts)
Definition: ip_frag_dp.c:457
#define IP_FRAG_FLUSH_THRESHOLD
Definition: ip_frag_dp.c:45
static void ip_frag_pkt_reassemble_prepare(struct rte_mbuf *pkt, size_t l2_len, size_t l3_len, uint64_t flags)
Definition: ip_frag_dp.c:122
static doca_error_t ip_frag_indirect_pool_init(uint16_t nb_queues, struct rte_mempool *indirect_pools[])
Definition: ip_frag_dp.c:809
struct ip_frag_sw_counters sw_counters[IP_FRAG_PORT_NUM]
Definition: ip_frag_dp.c:2
@ IP_FRAG_RSS_PIPE_IPV6
Definition: ip_frag_dp.h:52
@ IP_FRAG_RSS_PIPE_IPV4
Definition: ip_frag_dp.h:51
@ IP_FRAG_MODE_MULTIPORT
Definition: ip_frag_dp.h:39
@ IP_FRAG_MODE_BIDIR
Definition: ip_frag_dp.h:38
@ IP_FRAG_PORT_FRAGMENT_1
Definition: ip_frag_dp.h:46
@ IP_FRAG_PORT_NUM
Definition: ip_frag_dp.h:47
@ IP_FRAG_PORT_REASSEMBLE_0
Definition: ip_frag_dp.h:43
@ IP_FRAG_PORT_REASSEMBLE_1
Definition: ip_frag_dp.h:45
@ IP_FRAG_PORT_FRAGMENT_0
Definition: ip_frag_dp.h:44
doca_error_t unknown_parse(uint8_t *data, uint8_t *data_end, struct tun_parser_ctx *ctx, enum parser_pkt_type *parser_pkt_type)
doca_error_t tunnel_parse(uint8_t *data, uint8_t *data_end, struct tun_parser_ctx *ctx)
doca_error_t plain_parse(uint8_t *data, uint8_t *data_end, struct conn_parser_ctx *ctx)
parser_pkt_type
Definition: packet_parser.h:35
@ PARSER_PKT_TYPE_PLAIN
Definition: packet_parser.h:37
@ PARSER_PKT_TYPE_TUNNELED
Definition: packet_parser.h:36
@ PARSER_PKT_TYPE_UNKNOWN
Definition: packet_parser.h:38
doca_error_t stop_doca_flow_ports(int nb_ports, struct doca_flow_port *ports[])
Definition: flow_common.c:240
doca_error_t init_doca_flow_ports(int nb_ports, struct doca_flow_port *ports[], bool is_hairpin, struct doca_dev *dev_arr[], uint32_t actions_mem_size[])
Definition: flow_common.c:296
doca_error_t set_flow_pipe_cfg(struct doca_flow_pipe_cfg *cfg, const char *name, enum doca_flow_pipe_type type, bool is_root)
Definition: flow_common.c:305
#define SHARED_RESOURCE_NUM_VALUES
Definition: flow_common.h:59
struct application_port_config port_config
Definition: dpdk_utils.h:70
struct link_parser_ctx link_ctx
Definition: packet_parser.h:80
struct network_parser_ctx network_ctx
Definition: packet_parser.h:81
forwarding configuration
Definition: doca_flow.h:779
struct doca_flow_pipe * next_pipe
Definition: doca_flow.h:800
enum doca_flow_fwd_type type
Definition: doca_flow.h:780
doca flow matcher information
Definition: doca_flow.h:491
struct doca_flow_parser_meta parser_meta
Definition: doca_flow.h:496
uint32_t flags
Definition: doca_flow.h:492
enum doca_flow_l3_meta outer_l3_type
Definition: doca_flow.h:382
user context struct that will be used in entries process callback
Definition: flow_common.h:78
struct rte_gtp_hdr * gtp_hdr
Definition: packet_parser.h:73
uint32_t frag_tbl_size
Definition: ip_frag_dp.h:64
enum ip_frag_mode mode
Definition: ip_frag_dp.h:57
uint64_t mbuf_flag_inner_modified
Definition: ip_frag_dp.h:59
uint32_t frag_tbl_timeout
Definition: ip_frag_dp.h:63
uint16_t mtu
Definition: ip_frag_dp.h:60
uint64_t mbuf_flag_outer_modified
Definition: ip_frag_dp.h:58
struct doca_flow_match * match_mask
Definition: ip_frag_dp.h:75
struct doca_flow_fwd * fwd_miss
Definition: ip_frag_dp.h:77
uint32_t num_entries
Definition: ip_frag_dp.h:73
struct doca_flow_match * match
Definition: ip_frag_dp.h:74
struct doca_flow_port * port
Definition: ip_frag_dp.h:69
enum doca_flow_pipe_domain domain
Definition: ip_frag_dp.h:70
uint16_t port_id
Definition: ip_frag_dp.h:68
struct doca_flow_fwd * fwd
Definition: ip_frag_dp.h:76
uint64_t mtu_exceed_rx
Definition: ip_frag_dp.c:55
uint64_t frags_gen
Definition: ip_frag_dp.c:56
uint64_t mtu_fits_rx
Definition: ip_frag_dp.c:54
struct rte_ip_frag_death_row death_row
Definition: ip_frag_dp.c:68
struct rte_eth_dev_tx_buffer * tx_buffer
Definition: ip_frag_dp.c:64
struct rte_ip_frag_tbl * frag_tbl
Definition: ip_frag_dp.c:66
uint64_t tx_buffer_err
Definition: ip_frag_dp.c:65
const struct ip_frag_config * cfg
Definition: ip_frag_dp.c:61
uint16_t queue_id
Definition: ip_frag_dp.c:62
struct ip_frag_sw_counters sw_counters[IP_FRAG_PORT_NUM]
Definition: ip_frag_dp.c:63
struct rte_mempool * indirect_pool
Definition: ip_frag_dp.c:67
struct network_parser_ctx::@2::@4 ipv6
struct rte_ipv4_hdr * ipv4_hdr
Definition: packet_parser.h:54
struct rte_udp_hdr * udp_hdr
Definition: packet_parser.h:66
struct link_parser_ctx link_ctx
Definition: packet_parser.h:87
struct transport_parser_ctx transport_ctx
Definition: packet_parser.h:89
struct conn_parser_ctx inner
Definition: packet_parser.h:91
struct network_parser_ctx network_ctx
Definition: packet_parser.h:88
struct gtp_parser_ctx gtp_ctx
Definition: packet_parser.h:90
uint16_t num_queues
Definition: upf_accel.h:339
struct doca_flow_pipe * pipes[UPF_ACCEL_PORTS_MAX][UPF_ACCEL_PIPE_NUM]
Definition: upf_accel.h:343
struct doca_flow_port * ports[UPF_ACCEL_PORTS_MAX]
Definition: upf_accel.h:344
uint16_t num_ports
Definition: upf_accel.h:338
struct doca_dev * dev_arr[UPF_ACCEL_PORTS_MAX]
Definition: upf_accel.h:345
static uint32_t actions_mem_size[FLOW_SWITCH_PORTS_MAX]
Definition: switch_core.c:43
struct upf_accel_ctx * ctx