DPDK  23.11.4
rte_ethdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
148 #ifdef __cplusplus
149 extern "C" {
150 #endif
151 
152 #include <stdint.h>
153 
154 /* Use this macro to check if LRO API is supported */
155 #define RTE_ETHDEV_HAS_LRO_SUPPORT
156 
157 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
159 #define RTE_ETHDEV_DEBUG_RX
160 #define RTE_ETHDEV_DEBUG_TX
161 #endif
162 
163 #include <rte_cman.h>
164 #include <rte_compat.h>
165 #include <rte_log.h>
166 #include <rte_interrupts.h>
167 #include <rte_dev.h>
168 #include <rte_devargs.h>
169 #include <rte_bitops.h>
170 #include <rte_errno.h>
171 #include <rte_common.h>
172 #include <rte_config.h>
173 #include <rte_power_intrinsics.h>
174 
175 #include "rte_ethdev_trace_fp.h"
176 #include "rte_dev_info.h"
177 
178 extern int rte_eth_dev_logtype;
179 
180 #define RTE_ETHDEV_LOG(level, ...) \
181  rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
182 
183 struct rte_mbuf;
184 
201 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
202 
218 
232 
246 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247  for (rte_eth_iterator_init(iter, devargs), \
248  id = rte_eth_iterator_next(iter); \
249  id != RTE_MAX_ETHPORTS; \
250  id = rte_eth_iterator_next(iter))
251 
262  uint64_t ipackets;
263  uint64_t opackets;
264  uint64_t ibytes;
265  uint64_t obytes;
270  uint64_t imissed;
271  uint64_t ierrors;
272  uint64_t oerrors;
273  uint64_t rx_nombuf;
274  /* Queue stats are limited to max 256 queues */
276  uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278  uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280  uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282  uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
284  uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285 };
286 
290 #define RTE_ETH_LINK_SPEED_AUTONEG 0
291 #define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
292 #define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
293 #define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
294 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
295 #define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
296 #define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
297 #define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
298 #define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
299 #define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
300 #define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
301 #define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
302 #define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
303 #define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
304 #define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
305 #define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
306 #define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
307 #define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
313 #define RTE_ETH_SPEED_NUM_NONE 0
314 #define RTE_ETH_SPEED_NUM_10M 10
315 #define RTE_ETH_SPEED_NUM_100M 100
316 #define RTE_ETH_SPEED_NUM_1G 1000
317 #define RTE_ETH_SPEED_NUM_2_5G 2500
318 #define RTE_ETH_SPEED_NUM_5G 5000
319 #define RTE_ETH_SPEED_NUM_10G 10000
320 #define RTE_ETH_SPEED_NUM_20G 20000
321 #define RTE_ETH_SPEED_NUM_25G 25000
322 #define RTE_ETH_SPEED_NUM_40G 40000
323 #define RTE_ETH_SPEED_NUM_50G 50000
324 #define RTE_ETH_SPEED_NUM_56G 56000
325 #define RTE_ETH_SPEED_NUM_100G 100000
326 #define RTE_ETH_SPEED_NUM_200G 200000
327 #define RTE_ETH_SPEED_NUM_400G 400000
328 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
334 struct rte_eth_link {
335  union {
336  RTE_ATOMIC(uint64_t) val64;
337  __extension__
338  struct {
339  uint32_t link_speed;
340  uint16_t link_duplex : 1;
341  uint16_t link_autoneg : 1;
342  uint16_t link_status : 1;
343  };
344  };
345 };
346 
350 #define RTE_ETH_LINK_HALF_DUPLEX 0
351 #define RTE_ETH_LINK_FULL_DUPLEX 1
352 #define RTE_ETH_LINK_DOWN 0
353 #define RTE_ETH_LINK_UP 1
354 #define RTE_ETH_LINK_FIXED 0
355 #define RTE_ETH_LINK_AUTONEG 1
356 #define RTE_ETH_LINK_MAX_STR_LEN 40
364  uint8_t pthresh;
365  uint8_t hthresh;
366  uint8_t wthresh;
367 };
368 
372 #define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
373 #define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
374 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
384 
391 
401 };
402 
412 };
413 
420  uint32_t mtu;
428  uint64_t offloads;
429 
430  uint64_t reserved_64s[2];
431  void *reserved_ptrs[2];
432 };
433 
439  RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
442  RTE_ETH_VLAN_TYPE_MAX,
443 };
444 
450  uint64_t ids[64];
451 };
452 
474  RTE_ETH_HASH_FUNCTION_MAX,
475 };
476 
477 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
478 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
479 
497  uint8_t *rss_key;
498  uint8_t rss_key_len;
503  uint64_t rss_hf;
505 };
506 
507 /*
508  * A packet can be identified by hardware as different flow types. Different
509  * NIC hardware may support different flow types.
510  * Basically, the NIC hardware identifies the flow type as deep protocol as
511  * possible, and exclusively. For example, if a packet is identified as
512  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
513  * though it is an actual IPV4 packet.
514  */
515 #define RTE_ETH_FLOW_UNKNOWN 0
516 #define RTE_ETH_FLOW_RAW 1
517 #define RTE_ETH_FLOW_IPV4 2
518 #define RTE_ETH_FLOW_FRAG_IPV4 3
519 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
520 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
521 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
522 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
523 #define RTE_ETH_FLOW_IPV6 8
524 #define RTE_ETH_FLOW_FRAG_IPV6 9
525 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
526 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
527 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
528 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
529 #define RTE_ETH_FLOW_L2_PAYLOAD 14
530 #define RTE_ETH_FLOW_IPV6_EX 15
531 #define RTE_ETH_FLOW_IPV6_TCP_EX 16
532 #define RTE_ETH_FLOW_IPV6_UDP_EX 17
534 #define RTE_ETH_FLOW_PORT 18
535 #define RTE_ETH_FLOW_VXLAN 19
536 #define RTE_ETH_FLOW_GENEVE 20
537 #define RTE_ETH_FLOW_NVGRE 21
538 #define RTE_ETH_FLOW_VXLAN_GPE 22
539 #define RTE_ETH_FLOW_GTPU 23
540 #define RTE_ETH_FLOW_MAX 24
541 
542 /*
543  * Below macros are defined for RSS offload types, they can be used to
544  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
545  */
546 #define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
547 #define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
548 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
549 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
550 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
551 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
552 #define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
553 #define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
554 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
555 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
556 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
557 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
558 #define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
559 #define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
560 #define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
561 #define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
562 #define RTE_ETH_RSS_PORT RTE_BIT64(18)
563 #define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
564 #define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
565 #define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
566 #define RTE_ETH_RSS_GTPU RTE_BIT64(23)
567 #define RTE_ETH_RSS_ETH RTE_BIT64(24)
568 #define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
569 #define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
570 #define RTE_ETH_RSS_ESP RTE_BIT64(27)
571 #define RTE_ETH_RSS_AH RTE_BIT64(28)
572 #define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
573 #define RTE_ETH_RSS_PFCP RTE_BIT64(30)
574 #define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
575 #define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
576 #define RTE_ETH_RSS_MPLS RTE_BIT64(33)
577 #define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
578 
591 #define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
592 
593 #define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
594 
595 /*
596  * We use the following macros to combine with above RTE_ETH_RSS_* for
597  * more specific input set selection. These bits are defined starting
598  * from the high end of the 64 bits.
599  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
600  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
601  * the same level are used simultaneously, it is the same case as none of
602  * them are added.
603  */
604 #define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
605 #define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
606 #define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
607 #define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
608 #define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
609 #define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
610 
611 /*
612  * Only select IPV6 address prefix as RSS input set according to
613  * https://tools.ietf.org/html/rfc6052
614  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
615  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
616  */
617 #define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
618 #define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
619 #define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
620 #define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
621 #define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
622 #define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
623 
624 /*
625  * Use the following macros to combine with the above layers
626  * to choose inner and outer layers or both for RSS computation.
627  * Bits 50 and 51 are reserved for this.
628  */
629 
637 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
638 
643 #define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
644 
649 #define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
650 #define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
651 
652 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
653 
664 static inline uint64_t
665 rte_eth_rss_hf_refine(uint64_t rss_hf)
666 {
667  if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
668  rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
669 
670  if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
671  rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
672 
673  return rss_hf;
674 }
675 
676 #define RTE_ETH_RSS_IPV6_PRE32 ( \
677  RTE_ETH_RSS_IPV6 | \
678  RTE_ETH_RSS_L3_PRE32)
679 
680 #define RTE_ETH_RSS_IPV6_PRE40 ( \
681  RTE_ETH_RSS_IPV6 | \
682  RTE_ETH_RSS_L3_PRE40)
683 
684 #define RTE_ETH_RSS_IPV6_PRE48 ( \
685  RTE_ETH_RSS_IPV6 | \
686  RTE_ETH_RSS_L3_PRE48)
687 
688 #define RTE_ETH_RSS_IPV6_PRE56 ( \
689  RTE_ETH_RSS_IPV6 | \
690  RTE_ETH_RSS_L3_PRE56)
691 
692 #define RTE_ETH_RSS_IPV6_PRE64 ( \
693  RTE_ETH_RSS_IPV6 | \
694  RTE_ETH_RSS_L3_PRE64)
695 
696 #define RTE_ETH_RSS_IPV6_PRE96 ( \
697  RTE_ETH_RSS_IPV6 | \
698  RTE_ETH_RSS_L3_PRE96)
699 
700 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
701  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
702  RTE_ETH_RSS_L3_PRE32)
703 
704 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
705  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
706  RTE_ETH_RSS_L3_PRE40)
707 
708 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
709  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
710  RTE_ETH_RSS_L3_PRE48)
711 
712 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
713  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
714  RTE_ETH_RSS_L3_PRE56)
715 
716 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
717  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
718  RTE_ETH_RSS_L3_PRE64)
719 
720 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
721  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
722  RTE_ETH_RSS_L3_PRE96)
723 
724 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
725  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
726  RTE_ETH_RSS_L3_PRE32)
727 
728 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
729  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
730  RTE_ETH_RSS_L3_PRE40)
731 
732 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
733  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
734  RTE_ETH_RSS_L3_PRE48)
735 
736 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
737  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
738  RTE_ETH_RSS_L3_PRE56)
739 
740 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
741  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
742  RTE_ETH_RSS_L3_PRE64)
743 
744 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
745  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
746  RTE_ETH_RSS_L3_PRE96)
747 
748 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
749  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
750  RTE_ETH_RSS_L3_PRE32)
751 
752 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
753  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
754  RTE_ETH_RSS_L3_PRE40)
755 
756 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
757  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
758  RTE_ETH_RSS_L3_PRE48)
759 
760 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
761  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
762  RTE_ETH_RSS_L3_PRE56)
763 
764 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
765  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
766  RTE_ETH_RSS_L3_PRE64)
767 
768 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
769  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
770  RTE_ETH_RSS_L3_PRE96)
771 
772 #define RTE_ETH_RSS_IP ( \
773  RTE_ETH_RSS_IPV4 | \
774  RTE_ETH_RSS_FRAG_IPV4 | \
775  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
776  RTE_ETH_RSS_IPV6 | \
777  RTE_ETH_RSS_FRAG_IPV6 | \
778  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
779  RTE_ETH_RSS_IPV6_EX)
780 
781 #define RTE_ETH_RSS_UDP ( \
782  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
783  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
784  RTE_ETH_RSS_IPV6_UDP_EX)
785 
786 #define RTE_ETH_RSS_TCP ( \
787  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
788  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
789  RTE_ETH_RSS_IPV6_TCP_EX)
790 
791 #define RTE_ETH_RSS_SCTP ( \
792  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
793  RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
794 
795 #define RTE_ETH_RSS_TUNNEL ( \
796  RTE_ETH_RSS_VXLAN | \
797  RTE_ETH_RSS_GENEVE | \
798  RTE_ETH_RSS_NVGRE)
799 
800 #define RTE_ETH_RSS_VLAN ( \
801  RTE_ETH_RSS_S_VLAN | \
802  RTE_ETH_RSS_C_VLAN)
803 
805 #define RTE_ETH_RSS_PROTO_MASK ( \
806  RTE_ETH_RSS_IPV4 | \
807  RTE_ETH_RSS_FRAG_IPV4 | \
808  RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
809  RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
810  RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
811  RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
812  RTE_ETH_RSS_IPV6 | \
813  RTE_ETH_RSS_FRAG_IPV6 | \
814  RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
815  RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
816  RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
817  RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
818  RTE_ETH_RSS_L2_PAYLOAD | \
819  RTE_ETH_RSS_IPV6_EX | \
820  RTE_ETH_RSS_IPV6_TCP_EX | \
821  RTE_ETH_RSS_IPV6_UDP_EX | \
822  RTE_ETH_RSS_PORT | \
823  RTE_ETH_RSS_VXLAN | \
824  RTE_ETH_RSS_GENEVE | \
825  RTE_ETH_RSS_NVGRE | \
826  RTE_ETH_RSS_MPLS)
827 
828 /*
829  * Definitions used for redirection table entry size.
830  * Some RSS RETA sizes may not be supported by some drivers, check the
831  * documentation or the description of relevant functions for more details.
832  */
833 #define RTE_ETH_RSS_RETA_SIZE_64 64
834 #define RTE_ETH_RSS_RETA_SIZE_128 128
835 #define RTE_ETH_RSS_RETA_SIZE_256 256
836 #define RTE_ETH_RSS_RETA_SIZE_512 512
837 #define RTE_ETH_RETA_GROUP_SIZE 64
838 
840 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
841 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
842 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
843 #define RTE_ETH_DCB_NUM_QUEUES 128
847 #define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
848 #define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
852 #define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
853 #define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
854 #define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
855 #define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
857 #define RTE_ETH_VLAN_STRIP_MASK 0x0001
858 #define RTE_ETH_VLAN_FILTER_MASK 0x0002
859 #define RTE_ETH_VLAN_EXTEND_MASK 0x0004
860 #define RTE_ETH_QINQ_STRIP_MASK 0x0008
861 #define RTE_ETH_VLAN_ID_MAX 0x0FFF
864 /* Definitions used for receive MAC address */
865 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
867 /* Definitions used for unicast hash */
868 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
874 #define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
876 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
878 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
880 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
882 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
893  uint64_t mask;
895  uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
896 };
897 
904  RTE_ETH_8_TCS = 8
905 };
906 
915  RTE_ETH_64_POOLS = 64
916 };
917 
918 /* This structure may be extended in future. */
919 struct rte_eth_dcb_rx_conf {
920  enum rte_eth_nb_tcs nb_tcs;
922  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
923 };
924 
925 struct rte_eth_vmdq_dcb_tx_conf {
926  enum rte_eth_nb_pools nb_queue_pools;
928  uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
929 };
930 
931 struct rte_eth_dcb_tx_conf {
932  enum rte_eth_nb_tcs nb_tcs;
935 };
936 
937 struct rte_eth_vmdq_tx_conf {
938  enum rte_eth_nb_pools nb_queue_pools;
939 };
940 
955  uint8_t default_pool;
956  uint8_t nb_pool_maps;
957  struct {
958  uint16_t vlan_id;
959  uint64_t pools;
963 };
964 
986  uint8_t default_pool;
988  uint8_t nb_pool_maps;
989  uint32_t rx_mode;
990  struct {
991  uint16_t vlan_id;
992  uint64_t pools;
994 };
995 
1006  uint64_t offloads;
1007 
1008  uint16_t pvid;
1009  __extension__
1010  uint8_t
1016 
1017  uint64_t reserved_64s[2];
1018  void *reserved_ptrs[2];
1019 };
1020 
1082  struct rte_mempool *mp;
1083  uint16_t length;
1084  uint16_t offset;
1096  uint32_t proto_hdr;
1097 };
1098 
1106  /* The settings for buffer split offload. */
1107  struct rte_eth_rxseg_split split;
1108  /* The other features settings should be added here. */
1109 };
1110 
1115  struct rte_eth_thresh rx_thresh;
1116  uint16_t rx_free_thresh;
1117  uint8_t rx_drop_en;
1119  uint16_t rx_nseg;
1126  uint16_t share_group;
1127  uint16_t share_qid;
1133  uint64_t offloads;
1142 
1163  uint16_t rx_nmempool;
1165  uint64_t reserved_64s[2];
1166  void *reserved_ptrs[2];
1167 };
1168 
1173  struct rte_eth_thresh tx_thresh;
1174  uint16_t tx_rs_thresh;
1175  uint16_t tx_free_thresh;
1184  uint64_t offloads;
1185 
1186  uint64_t reserved_64s[2];
1187  void *reserved_ptrs[2];
1188 };
1189 
1202 
1207  uint32_t rte_memory:1;
1208 
1209  uint32_t reserved:30;
1210 };
1211 
1220  uint16_t max_nb_queues;
1222  uint16_t max_rx_2_tx;
1224  uint16_t max_tx_2_rx;
1225  uint16_t max_nb_desc;
1228 };
1229 
1230 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1231 
1239  uint16_t port;
1240  uint16_t queue;
1241 };
1242 
1250  uint32_t peer_count:16;
1261  uint32_t tx_explicit:1;
1262 
1274  uint32_t manual_bind:1;
1275 
1288 
1300  uint32_t use_rte_memory:1;
1301 
1312  uint32_t force_memory:1;
1313 
1314  uint32_t reserved:11;
1316  struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1317 };
1318 
1323  uint16_t nb_max;
1324  uint16_t nb_min;
1325  uint16_t nb_align;
1335  uint16_t nb_seg_max;
1336 
1348  uint16_t nb_mtu_seg_max;
1349 };
1350 
1359 };
1360 
1367  uint32_t high_water;
1368  uint32_t low_water;
1369  uint16_t pause_time;
1370  uint16_t send_xon;
1371  enum rte_eth_fc_mode mode;
1373  uint8_t autoneg;
1374 };
1375 
1382  struct rte_eth_fc_conf fc;
1383  uint8_t priority;
1384 };
1385 
1396  uint8_t tc_max;
1399 };
1400 
1419  enum rte_eth_fc_mode mode;
1421  struct {
1422  uint16_t tx_qid;
1426  uint8_t tc;
1427  } rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1428 
1429  struct {
1430  uint16_t pause_time;
1431  uint16_t rx_qid;
1435  uint8_t tc;
1436  } tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1437 };
1438 
1444  RTE_ETH_TUNNEL_TYPE_NONE = 0,
1445  RTE_ETH_TUNNEL_TYPE_VXLAN,
1446  RTE_ETH_TUNNEL_TYPE_GENEVE,
1447  RTE_ETH_TUNNEL_TYPE_TEREDO,
1448  RTE_ETH_TUNNEL_TYPE_NVGRE,
1449  RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1450  RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1451  RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1452  RTE_ETH_TUNNEL_TYPE_ECPRI,
1453  RTE_ETH_TUNNEL_TYPE_MAX,
1454 };
1455 
1456 /* Deprecated API file for rte_eth_dev_filter_* functions */
1457 #include "rte_eth_ctrl.h"
1458 
1469  uint16_t udp_port;
1470  uint8_t prot_type;
1471 };
1472 
1478  uint32_t lsc:1;
1480  uint32_t rxq:1;
1482  uint32_t rmv:1;
1483 };
1484 
1485 #define rte_intr_conf rte_eth_intr_conf
1486 
1493  uint32_t link_speeds;
1500  struct rte_eth_rxmode rxmode;
1501  struct rte_eth_txmode txmode;
1502  uint32_t lpbk_mode;
1507  struct {
1508  struct rte_eth_rss_conf rss_conf;
1512  struct rte_eth_dcb_rx_conf dcb_rx_conf;
1516  union {
1518  struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1520  struct rte_eth_dcb_tx_conf dcb_tx_conf;
1522  struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1527  struct rte_eth_intr_conf intr_conf;
1528 };
1529 
1533 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1534 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1535 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1536 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1537 #define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1538 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1539 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1540 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1541 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1542 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1543 #define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1549 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1550 #define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1551 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1552 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1553 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1554 #define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1555 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1556 
1557 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1558  RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1559  RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1560 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1561  RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1562  RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1563  RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1564 
1565 /*
1566  * If new Rx offload capabilities are defined, they also must be
1567  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1568  */
1569 
1573 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1574 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1575 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1576 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1577 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1578 #define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1579 #define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1580 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1581 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1582 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1583 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1584 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1585 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1586 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1591 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1593 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1599 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1600 #define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1606 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1612 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1614 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1620 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1621 /*
1622  * If new Tx offload capabilities are defined, they also must be
1623  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1624  */
1625 
1630 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1632 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1642 #define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1644 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1646 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1649 /*
1650  * Fallback default preferred Rx/Tx port parameters.
1651  * These are used if an application requests default parameters
1652  * but the PMD does not provide preferred values.
1653  */
1654 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1655 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1656 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1657 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1658 
1665  uint16_t burst_size;
1666  uint16_t ring_size;
1667  uint16_t nb_queues;
1668 };
1669 
1674 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1675 
1680  const char *name;
1681  uint16_t domain_id;
1689  uint16_t port_id;
1695  uint16_t rx_domain;
1696 };
1697 
1705  __extension__
1706  uint32_t multi_pools:1;
1707  uint32_t offset_allowed:1;
1708  uint32_t offset_align_log2:4;
1709  uint16_t max_nseg;
1710  uint16_t reserved;
1711 };
1712 
1725 };
1726 
1747 };
1748 
1755  struct rte_device *device;
1756  const char *driver_name;
1757  unsigned int if_index;
1759  uint16_t min_mtu;
1760  uint16_t max_mtu;
1761  const uint32_t *dev_flags;
1763  uint32_t min_rx_bufsize;
1770  uint32_t max_rx_bufsize;
1771  uint32_t max_rx_pktlen;
1774  uint16_t max_rx_queues;
1775  uint16_t max_tx_queues;
1776  uint32_t max_mac_addrs;
1779  uint16_t max_vfs;
1780  uint16_t max_vmdq_pools;
1791  uint16_t reta_size;
1792  uint8_t hash_key_size;
1793  uint32_t rss_algo_capa;
1798  uint16_t vmdq_queue_base;
1799  uint16_t vmdq_queue_num;
1800  uint16_t vmdq_pool_base;
1801  struct rte_eth_desc_lim rx_desc_lim;
1802  struct rte_eth_desc_lim tx_desc_lim;
1803  uint32_t speed_capa;
1805  uint16_t nb_rx_queues;
1806  uint16_t nb_tx_queues;
1819  uint64_t dev_capa;
1827 
1828  uint64_t reserved_64s[2];
1829  void *reserved_ptrs[2];
1830 };
1831 
1833 #define RTE_ETH_QUEUE_STATE_STOPPED 0
1834 #define RTE_ETH_QUEUE_STATE_STARTED 1
1835 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1843  struct rte_mempool *mp;
1844  struct rte_eth_rxconf conf;
1845  uint8_t scattered_rx;
1846  uint8_t queue_state;
1847  uint16_t nb_desc;
1848  uint16_t rx_buf_size;
1855  uint8_t avail_thresh;
1857 
1863  struct rte_eth_txconf conf;
1864  uint16_t nb_desc;
1865  uint8_t queue_state;
1867 
1877  struct rte_mbuf **mbuf_ring;
1878  struct rte_mempool *mp;
1879  uint16_t *refill_head;
1880  uint16_t *receive_tail;
1881  uint16_t mbuf_ring_size;
1891 
1892 /* Generic Burst mode flag definition, values can be ORed. */
1893 
1899 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1900 
1906  uint64_t flags;
1908 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1910 };
1911 
1913 #define RTE_ETH_XSTATS_NAME_SIZE 64
1914 
1925  uint64_t id;
1926  uint64_t value;
1927 };
1928 
1945 };
1946 
1947 #define RTE_ETH_DCB_NUM_TCS 8
1948 #define RTE_ETH_MAX_VMDQ_POOL 64
1949 
1956  struct {
1957  uint16_t base;
1958  uint16_t nb_queue;
1959  } tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1961  struct {
1962  uint16_t base;
1963  uint16_t nb_queue;
1964  } tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1965 };
1966 
1972  uint8_t nb_tcs;
1974  uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS];
1977 };
1978 
1989 };
1990 
1991 /* Translate from FEC mode to FEC capa */
1992 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1993 
1994 /* This macro indicates FEC capa mask */
1995 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1996 
1997 /* A structure used to get capabilities per link speed */
1998 struct rte_eth_fec_capa {
1999  uint32_t speed;
2000  uint32_t capa;
2001 };
2002 
2003 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
2004 
2005 /* Macros to check for valid port */
2006 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2007  if (!rte_eth_dev_is_valid_port(port_id)) { \
2008  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2009  return retval; \
2010  } \
2011 } while (0)
2012 
2013 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2014  if (!rte_eth_dev_is_valid_port(port_id)) { \
2015  RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2016  return; \
2017  } \
2018 } while (0)
2019 
2042 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2043  struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2044  void *user_param);
2045 
2066 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2067  struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2068 
2079 };
2080 
2081 struct rte_eth_dev_sriov {
2082  uint8_t active;
2083  uint8_t nb_q_per_pool;
2084  uint16_t def_vmdq_idx;
2085  uint16_t def_pool_q_idx;
2086 };
2087 #define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2088 
2089 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2090 
2091 #define RTE_ETH_DEV_NO_OWNER 0
2092 
2093 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2094 
2095 struct rte_eth_dev_owner {
2096  uint64_t id;
2097  char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2098 };
2099 
2105 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2107 #define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2109 #define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2111 #define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2113 #define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2115 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2120 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2134 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2135  const uint64_t owner_id);
2136 
2140 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2141  for (p = rte_eth_find_next_owned_by(0, o); \
2142  (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2143  p = rte_eth_find_next_owned_by(p + 1, o))
2144 
2153 uint16_t rte_eth_find_next(uint16_t port_id);
2154 
2158 #define RTE_ETH_FOREACH_DEV(p) \
2159  RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2160 
2172 uint16_t
2173 rte_eth_find_next_of(uint16_t port_id_start,
2174  const struct rte_device *parent);
2175 
2184 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2185  for (port_id = rte_eth_find_next_of(0, parent); \
2186  port_id < RTE_MAX_ETHPORTS; \
2187  port_id = rte_eth_find_next_of(port_id + 1, parent))
2188 
2200 uint16_t
2201 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2202 
2213 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2214  for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2215  port_id < RTE_MAX_ETHPORTS; \
2216  port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2217 
2228 int rte_eth_dev_owner_new(uint64_t *owner_id);
2229 
2240 int rte_eth_dev_owner_set(const uint16_t port_id,
2241  const struct rte_eth_dev_owner *owner);
2242 
2253 int rte_eth_dev_owner_unset(const uint16_t port_id,
2254  const uint64_t owner_id);
2255 
2264 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2265 
2276 int rte_eth_dev_owner_get(const uint16_t port_id,
2277  struct rte_eth_dev_owner *owner);
2278 
2290 
2300 
2312 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2313 
2322 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2323 
2332 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2333 
2345 __rte_experimental
2346 const char *rte_eth_dev_capability_name(uint64_t capability);
2347 
2387 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2388  uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2389 
2398 int
2399 rte_eth_dev_is_removed(uint16_t port_id);
2400 
2463 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2464  uint16_t nb_rx_desc, unsigned int socket_id,
2465  const struct rte_eth_rxconf *rx_conf,
2466  struct rte_mempool *mb_pool);
2467 
2495 __rte_experimental
2497  (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2498  const struct rte_eth_hairpin_conf *conf);
2499 
2548 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2549  uint16_t nb_tx_desc, unsigned int socket_id,
2550  const struct rte_eth_txconf *tx_conf);
2551 
2577 __rte_experimental
2579  (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2580  const struct rte_eth_hairpin_conf *conf);
2581 
2608 __rte_experimental
2609 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2610  size_t len, uint32_t direction);
2611 
2634 __rte_experimental
2635 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2636 
2661 __rte_experimental
2662 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2663 
2679 __rte_experimental
2680 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2681 
2709 __rte_experimental
2710 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2711  uint8_t affinity);
2712 
2725 int rte_eth_dev_socket_id(uint16_t port_id);
2726 
2736 int rte_eth_dev_is_valid_port(uint16_t port_id);
2737 
2754 __rte_experimental
2755 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2756 
2773 __rte_experimental
2774 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2775 
2793 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2794 
2811 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2812 
2830 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2831 
2848 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2849 
2873 int rte_eth_dev_start(uint16_t port_id);
2874 
2888 int rte_eth_dev_stop(uint16_t port_id);
2889 
2902 int rte_eth_dev_set_link_up(uint16_t port_id);
2903 
2913 int rte_eth_dev_set_link_down(uint16_t port_id);
2914 
2925 int rte_eth_dev_close(uint16_t port_id);
2926 
2964 int rte_eth_dev_reset(uint16_t port_id);
2965 
2977 int rte_eth_promiscuous_enable(uint16_t port_id);
2978 
2990 int rte_eth_promiscuous_disable(uint16_t port_id);
2991 
3002 int rte_eth_promiscuous_get(uint16_t port_id);
3003 
3015 int rte_eth_allmulticast_enable(uint16_t port_id);
3016 
3028 int rte_eth_allmulticast_disable(uint16_t port_id);
3029 
3040 int rte_eth_allmulticast_get(uint16_t port_id);
3041 
3059 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
3060 
3075 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
3076 
3090 __rte_experimental
3091 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3092 
3111 __rte_experimental
3112 int rte_eth_link_to_str(char *str, size_t len,
3113  const struct rte_eth_link *eth_link);
3114 
3132 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3133 
3145 int rte_eth_stats_reset(uint16_t port_id);
3146 
3176 int rte_eth_xstats_get_names(uint16_t port_id,
3177  struct rte_eth_xstat_name *xstats_names,
3178  unsigned int size);
3179 
3213 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3214  unsigned int n);
3215 
3240 int
3242  struct rte_eth_xstat_name *xstats_names, unsigned int size,
3243  uint64_t *ids);
3244 
3269 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3270  uint64_t *values, unsigned int size);
3271 
3291 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3292  uint64_t *id);
3293 
3306 int rte_eth_xstats_reset(uint16_t port_id);
3307 
3327  uint16_t tx_queue_id, uint8_t stat_idx);
3328 
3348  uint16_t rx_queue_id,
3349  uint8_t stat_idx);
3350 
3364 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3365 
3386 __rte_experimental
3387 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3388  unsigned int num);
3389 
3409 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3410 
3426 __rte_experimental
3427 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3428 
3449 int rte_eth_dev_fw_version_get(uint16_t port_id,
3450  char *fw_version, size_t fw_size);
3451 
3491 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3492  uint32_t *ptypes, int num);
3523 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3524  uint32_t *set_ptypes, unsigned int num);
3525 
3538 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3539 
3557 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3558 
3578 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3579 
3598 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3599  int on);
3600 
3617 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3618  enum rte_vlan_type vlan_type,
3619  uint16_t tag_type);
3620 
3638 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3639 
3653 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3654 
3669 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3670 
3696 __rte_experimental
3697 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3698  uint8_t avail_thresh);
3699 
3726 __rte_experimental
3727 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3728  uint8_t *avail_thresh);
3729 
3730 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3731  void *userdata);
3732 
3738  buffer_tx_error_fn error_callback;
3739  void *error_userdata;
3740  uint16_t size;
3741  uint16_t length;
3743  struct rte_mbuf *pkts[];
3744 };
3745 
3752 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3753  (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3754 
3765 int
3766 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3767 
3792 int
3794  buffer_tx_error_fn callback, void *userdata);
3795 
3818 void
3819 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3820  void *userdata);
3821 
3845 void
3846 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3847  void *userdata);
3848 
3874 int
3875 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3876 
3909 };
3910 
3930 };
3931 
3950  uint64_t metadata;
3951 };
3952 
3990 };
3991 
4016  uint64_t metadata;
4017 };
4018 
4101 };
4102 
4104 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4105  enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4106 
4124 int rte_eth_dev_callback_register(uint16_t port_id,
4125  enum rte_eth_event_type event,
4126  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4127 
4146 int rte_eth_dev_callback_unregister(uint16_t port_id,
4147  enum rte_eth_event_type event,
4148  rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4149 
4171 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4172 
4193 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4194 
4212 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4213 
4235 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4236  int epfd, int op, void *data);
4237 
4252 int
4253 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4254 
4268 int rte_eth_led_on(uint16_t port_id);
4269 
4283 int rte_eth_led_off(uint16_t port_id);
4284 
4313 __rte_experimental
4314 int rte_eth_fec_get_capability(uint16_t port_id,
4315  struct rte_eth_fec_capa *speed_fec_capa,
4316  unsigned int num);
4317 
4338 __rte_experimental
4339 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4340 
4364 __rte_experimental
4365 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4366 
4381 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4382  struct rte_eth_fc_conf *fc_conf);
4383 
4398 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4399  struct rte_eth_fc_conf *fc_conf);
4400 
4417  struct rte_eth_pfc_conf *pfc_conf);
4418 
4437 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4438  uint32_t pool);
4439 
4457 __rte_experimental
4459  struct rte_eth_pfc_queue_info *pfc_queue_info);
4460 
4484 __rte_experimental
4486  struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4487 
4502 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4503  struct rte_ether_addr *mac_addr);
4504 
4523  struct rte_ether_addr *mac_addr);
4524 
4542 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4543  struct rte_eth_rss_reta_entry64 *reta_conf,
4544  uint16_t reta_size);
4545 
4564 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4565  struct rte_eth_rss_reta_entry64 *reta_conf,
4566  uint16_t reta_size);
4567 
4587 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4588  uint8_t on);
4589 
4608 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4609 
4626 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4627  uint32_t tx_rate);
4628 
4643 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4644  struct rte_eth_rss_conf *rss_conf);
4645 
4661 int
4663  struct rte_eth_rss_conf *rss_conf);
4664 
4677 __rte_experimental
4678 const char *
4680 
4705 int
4707  struct rte_eth_udp_tunnel *tunnel_udp);
4708 
4728 int
4730  struct rte_eth_udp_tunnel *tunnel_udp);
4731 
4746 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4747  struct rte_eth_dcb_info *dcb_info);
4748 
4749 struct rte_eth_rxtx_callback;
4750 
4776 const struct rte_eth_rxtx_callback *
4777 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4778  rte_rx_callback_fn fn, void *user_param);
4779 
4806 const struct rte_eth_rxtx_callback *
4807 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4808  rte_rx_callback_fn fn, void *user_param);
4809 
4835 const struct rte_eth_rxtx_callback *
4836 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4837  rte_tx_callback_fn fn, void *user_param);
4838 
4872 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4873  const struct rte_eth_rxtx_callback *user_cb);
4874 
4908 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4909  const struct rte_eth_rxtx_callback *user_cb);
4910 
4930 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4931  struct rte_eth_rxq_info *qinfo);
4932 
4952 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4953  struct rte_eth_txq_info *qinfo);
4954 
4975 __rte_experimental
4977  uint16_t queue_id,
4978  struct rte_eth_recycle_rxq_info *recycle_rxq_info);
4979 
4998 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4999  struct rte_eth_burst_mode *mode);
5000 
5019 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5020  struct rte_eth_burst_mode *mode);
5021 
5042 __rte_experimental
5043 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5044  struct rte_power_monitor_cond *pmc);
5045 
5064 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
5065 
5078 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5079 
5096 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5097 
5114 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5115 
5134 __rte_experimental
5135 int
5137  struct rte_eth_dev_module_info *modinfo);
5138 
5158 __rte_experimental
5159 int
5161  struct rte_dev_eeprom_info *info);
5162 
5182 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5183  struct rte_ether_addr *mc_addr_set,
5184  uint32_t nb_mc_addr);
5185 
5198 int rte_eth_timesync_enable(uint16_t port_id);
5199 
5212 int rte_eth_timesync_disable(uint16_t port_id);
5213 
5233  struct timespec *timestamp, uint32_t flags);
5234 
5251  struct timespec *timestamp);
5252 
5270 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5271 
5287 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5288 
5307 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5308 
5354 __rte_experimental
5355 int
5356 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5357 
5373 int
5374 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5375 
5392 int
5393 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5394 
5412  uint16_t *nb_rx_desc,
5413  uint16_t *nb_tx_desc);
5414 
5429 int
5430 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5431 
5441 void *
5442 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5443 
5459 __rte_experimental
5461  struct rte_eth_hairpin_cap *cap);
5462 
5472  int pf;
5473  __extension__
5474  union {
5475  int vf;
5476  int sf;
5477  };
5478  uint32_t id_base;
5479  uint32_t id_end;
5480  char name[RTE_DEV_NAME_MAX_LEN];
5481 };
5482 
5490  uint16_t controller;
5491  uint16_t pf;
5492  uint32_t nb_ranges_alloc;
5493  uint32_t nb_ranges;
5495 };
5496 
5520 __rte_experimental
5521 int rte_eth_representor_info_get(uint16_t port_id,
5522  struct rte_eth_representor_info *info);
5523 
5525 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5526 
5528 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5529 
5531 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5532 
5572 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5573 
5575 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5577 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5578 
5589  uint32_t timeout_ms;
5591  uint16_t max_frags;
5596  uint16_t flags;
5597 };
5598 
5619 __rte_experimental
5621  struct rte_eth_ip_reassembly_params *capa);
5622 
5644 __rte_experimental
5645 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5646  struct rte_eth_ip_reassembly_params *conf);
5647 
5677 __rte_experimental
5678 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5679  const struct rte_eth_ip_reassembly_params *conf);
5680 
5688 typedef struct {
5695  uint16_t time_spent;
5697  uint16_t nb_frags;
5699 
5718 __rte_experimental
5719 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5720 
5744 __rte_experimental
5745 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5746  uint16_t offset, uint16_t num, FILE *file);
5747 
5771 __rte_experimental
5772 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5773  uint16_t offset, uint16_t num, FILE *file);
5774 
5775 
5776 /* Congestion management */
5777 
5787 };
5788 
5805  uint64_t objs_supported;
5810  uint8_t rsvd[8];
5811 };
5812 
5821  enum rte_eth_cman_obj obj;
5823  enum rte_cman_mode mode;
5824  union {
5831  uint16_t rx_queue;
5838  uint8_t rsvd_obj_params[4];
5839  } obj_param;
5840  union {
5846  struct rte_cman_red_params red;
5853  uint8_t rsvd_mode_params[4];
5854  } mode_param;
5855 };
5856 
5874 __rte_experimental
5875 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5876 
5894 __rte_experimental
5895 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5896 
5913 __rte_experimental
5914 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5915 
5936 __rte_experimental
5937 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5938 
5939 #include <rte_ethdev_core.h>
5940 
5964 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5965  struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5966  void *opaque);
5967 
6055 static inline uint16_t
6056 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6057  struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6058 {
6059  uint16_t nb_rx;
6060  struct rte_eth_fp_ops *p;
6061  void *qd;
6062 
6063 #ifdef RTE_ETHDEV_DEBUG_RX
6064  if (port_id >= RTE_MAX_ETHPORTS ||
6065  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6066  RTE_ETHDEV_LOG(ERR,
6067  "Invalid port_id=%u or queue_id=%u\n",
6068  port_id, queue_id);
6069  return 0;
6070  }
6071 #endif
6072 
6073  /* fetch pointer to queue data */
6074  p = &rte_eth_fp_ops[port_id];
6075  qd = p->rxq.data[queue_id];
6076 
6077 #ifdef RTE_ETHDEV_DEBUG_RX
6078  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6079 
6080  if (qd == NULL) {
6081  RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
6082  queue_id, port_id);
6083  return 0;
6084  }
6085 #endif
6086 
6087  nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6088 
6089 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6090  {
6091  void *cb;
6092 
6093  /* rte_memory_order_release memory order was used when the
6094  * call back was inserted into the list.
6095  * Since there is a clear dependency between loading
6096  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6097  * not required.
6098  */
6099  cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6100  rte_memory_order_relaxed);
6101  if (unlikely(cb != NULL))
6102  nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6103  rx_pkts, nb_rx, nb_pkts, cb);
6104  }
6105 #endif
6106 
6107  rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
6108  return nb_rx;
6109 }
6110 
6128 static inline int
6129 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6130 {
6131  struct rte_eth_fp_ops *p;
6132  void *qd;
6133 
6134 #ifdef RTE_ETHDEV_DEBUG_RX
6135  if (port_id >= RTE_MAX_ETHPORTS ||
6136  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6137  RTE_ETHDEV_LOG(ERR,
6138  "Invalid port_id=%u or queue_id=%u\n",
6139  port_id, queue_id);
6140  return -EINVAL;
6141  }
6142 #endif
6143 
6144  /* fetch pointer to queue data */
6145  p = &rte_eth_fp_ops[port_id];
6146  qd = p->rxq.data[queue_id];
6147 
6148 #ifdef RTE_ETHDEV_DEBUG_RX
6149  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6150  if (qd == NULL)
6151  return -EINVAL;
6152 #endif
6153 
6154  if (*p->rx_queue_count == NULL)
6155  return -ENOTSUP;
6156  return (int)(*p->rx_queue_count)(qd);
6157 }
6158 
6162 #define RTE_ETH_RX_DESC_AVAIL 0
6163 #define RTE_ETH_RX_DESC_DONE 1
6164 #define RTE_ETH_RX_DESC_UNAVAIL 2
6200 static inline int
6201 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6202  uint16_t offset)
6203 {
6204  struct rte_eth_fp_ops *p;
6205  void *qd;
6206 
6207 #ifdef RTE_ETHDEV_DEBUG_RX
6208  if (port_id >= RTE_MAX_ETHPORTS ||
6209  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6210  RTE_ETHDEV_LOG(ERR,
6211  "Invalid port_id=%u or queue_id=%u\n",
6212  port_id, queue_id);
6213  return -EINVAL;
6214  }
6215 #endif
6216 
6217  /* fetch pointer to queue data */
6218  p = &rte_eth_fp_ops[port_id];
6219  qd = p->rxq.data[queue_id];
6220 
6221 #ifdef RTE_ETHDEV_DEBUG_RX
6222  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6223  if (qd == NULL)
6224  return -ENODEV;
6225 #endif
6226  if (*p->rx_descriptor_status == NULL)
6227  return -ENOTSUP;
6228  return (*p->rx_descriptor_status)(qd, offset);
6229 }
6230 
6234 #define RTE_ETH_TX_DESC_FULL 0
6235 #define RTE_ETH_TX_DESC_DONE 1
6236 #define RTE_ETH_TX_DESC_UNAVAIL 2
6272 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6273  uint16_t queue_id, uint16_t offset)
6274 {
6275  struct rte_eth_fp_ops *p;
6276  void *qd;
6277 
6278 #ifdef RTE_ETHDEV_DEBUG_TX
6279  if (port_id >= RTE_MAX_ETHPORTS ||
6280  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6281  RTE_ETHDEV_LOG(ERR,
6282  "Invalid port_id=%u or queue_id=%u\n",
6283  port_id, queue_id);
6284  return -EINVAL;
6285  }
6286 #endif
6287 
6288  /* fetch pointer to queue data */
6289  p = &rte_eth_fp_ops[port_id];
6290  qd = p->txq.data[queue_id];
6291 
6292 #ifdef RTE_ETHDEV_DEBUG_TX
6293  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6294  if (qd == NULL)
6295  return -ENODEV;
6296 #endif
6297  if (*p->tx_descriptor_status == NULL)
6298  return -ENOTSUP;
6299  return (*p->tx_descriptor_status)(qd, offset);
6300 }
6301 
6321 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6322  struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6323 
6395 static inline uint16_t
6396 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6397  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6398 {
6399  struct rte_eth_fp_ops *p;
6400  void *qd;
6401 
6402 #ifdef RTE_ETHDEV_DEBUG_TX
6403  if (port_id >= RTE_MAX_ETHPORTS ||
6404  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6405  RTE_ETHDEV_LOG(ERR,
6406  "Invalid port_id=%u or queue_id=%u\n",
6407  port_id, queue_id);
6408  return 0;
6409  }
6410 #endif
6411 
6412  /* fetch pointer to queue data */
6413  p = &rte_eth_fp_ops[port_id];
6414  qd = p->txq.data[queue_id];
6415 
6416 #ifdef RTE_ETHDEV_DEBUG_TX
6417  RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6418 
6419  if (qd == NULL) {
6420  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6421  queue_id, port_id);
6422  return 0;
6423  }
6424 #endif
6425 
6426 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6427  {
6428  void *cb;
6429 
6430  /* rte_memory_order_release memory order was used when the
6431  * call back was inserted into the list.
6432  * Since there is a clear dependency between loading
6433  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6434  * not required.
6435  */
6436  cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6437  rte_memory_order_relaxed);
6438  if (unlikely(cb != NULL))
6439  nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6440  tx_pkts, nb_pkts, cb);
6441  }
6442 #endif
6443 
6444  nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6445 
6446  rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6447  return nb_pkts;
6448 }
6449 
6503 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6504 
6505 static inline uint16_t
6506 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6507  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6508 {
6509  struct rte_eth_fp_ops *p;
6510  void *qd;
6511 
6512 #ifdef RTE_ETHDEV_DEBUG_TX
6513  if (port_id >= RTE_MAX_ETHPORTS ||
6514  queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6515  RTE_ETHDEV_LOG(ERR,
6516  "Invalid port_id=%u or queue_id=%u\n",
6517  port_id, queue_id);
6518  rte_errno = ENODEV;
6519  return 0;
6520  }
6521 #endif
6522 
6523  /* fetch pointer to queue data */
6524  p = &rte_eth_fp_ops[port_id];
6525  qd = p->txq.data[queue_id];
6526 
6527 #ifdef RTE_ETHDEV_DEBUG_TX
6528  if (!rte_eth_dev_is_valid_port(port_id)) {
6529  RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
6530  rte_errno = ENODEV;
6531  return 0;
6532  }
6533  if (qd == NULL) {
6534  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6535  queue_id, port_id);
6536  rte_errno = EINVAL;
6537  return 0;
6538  }
6539 #endif
6540 
6541  if (!p->tx_pkt_prepare)
6542  return nb_pkts;
6543 
6544  return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6545 }
6546 
6547 #else
6548 
6549 /*
6550  * Native NOOP operation for compilation targets which doesn't require any
6551  * preparations steps, and functional NOOP may introduce unnecessary performance
6552  * drop.
6553  *
6554  * Generally this is not a good idea to turn it on globally and didn't should
6555  * be used if behavior of tx_preparation can change.
6556  */
6557 
6558 static inline uint16_t
6559 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6560  __rte_unused uint16_t queue_id,
6561  __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6562 {
6563  return nb_pkts;
6564 }
6565 
6566 #endif
6567 
6590 static inline uint16_t
6591 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6592  struct rte_eth_dev_tx_buffer *buffer)
6593 {
6594  uint16_t sent;
6595  uint16_t to_send = buffer->length;
6596 
6597  if (to_send == 0)
6598  return 0;
6599 
6600  sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6601 
6602  buffer->length = 0;
6603 
6604  /* All packets sent, or to be dealt with by callback below */
6605  if (unlikely(sent != to_send))
6606  buffer->error_callback(&buffer->pkts[sent],
6607  (uint16_t)(to_send - sent),
6608  buffer->error_userdata);
6609 
6610  return sent;
6611 }
6612 
6643 static __rte_always_inline uint16_t
6644 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6645  struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6646 {
6647  buffer->pkts[buffer->length++] = tx_pkt;
6648  if (buffer->length < buffer->size)
6649  return 0;
6650 
6651  return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6652 }
6653 
6707 __rte_experimental
6708 static inline uint16_t
6709 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6710  uint16_t tx_port_id, uint16_t tx_queue_id,
6711  struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6712 {
6713  struct rte_eth_fp_ops *p1, *p2;
6714  void *qd1, *qd2;
6715  uint16_t nb_mbufs;
6716 
6717 #ifdef RTE_ETHDEV_DEBUG_TX
6718  if (tx_port_id >= RTE_MAX_ETHPORTS ||
6719  tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6720  RTE_ETHDEV_LOG(ERR,
6721  "Invalid tx_port_id=%u or tx_queue_id=%u\n",
6722  tx_port_id, tx_queue_id);
6723  return 0;
6724  }
6725 #endif
6726 
6727  /* fetch pointer to Tx queue data */
6728  p1 = &rte_eth_fp_ops[tx_port_id];
6729  qd1 = p1->txq.data[tx_queue_id];
6730 
6731 #ifdef RTE_ETHDEV_DEBUG_TX
6732  RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6733 
6734  if (qd1 == NULL) {
6735  RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6736  tx_queue_id, tx_port_id);
6737  return 0;
6738  }
6739 #endif
6740  if (p1->recycle_tx_mbufs_reuse == NULL)
6741  return 0;
6742 
6743 #ifdef RTE_ETHDEV_DEBUG_RX
6744  if (rx_port_id >= RTE_MAX_ETHPORTS ||
6745  rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6746  RTE_ETHDEV_LOG(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u\n",
6747  rx_port_id, rx_queue_id);
6748  return 0;
6749  }
6750 #endif
6751 
6752  /* fetch pointer to Rx queue data */
6753  p2 = &rte_eth_fp_ops[rx_port_id];
6754  qd2 = p2->rxq.data[rx_queue_id];
6755 
6756 #ifdef RTE_ETHDEV_DEBUG_RX
6757  RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6758 
6759  if (qd2 == NULL) {
6760  RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
6761  rx_queue_id, rx_port_id);
6762  return 0;
6763  }
6764 #endif
6765  if (p2->recycle_rx_descriptors_refill == NULL)
6766  return 0;
6767 
6768  /* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6769  * into Rx mbuf ring.
6770  */
6771  nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6772 
6773  /* If no recycling mbufs, return 0. */
6774  if (nb_mbufs == 0)
6775  return 0;
6776 
6777  /* Replenish the Rx descriptors with the recycling
6778  * into Rx mbuf ring.
6779  */
6780  p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6781 
6782  return nb_mbufs;
6783 }
6784 
6813 __rte_experimental
6814 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6815 
6816 #ifdef __cplusplus
6817 }
6818 #endif
6819 
6820 #endif /* _RTE_ETHDEV_H_ */
#define RTE_BIT32(nr)
Definition: rte_bitops.h:40
#define unlikely(x)
rte_cman_mode
Definition: rte_cman.h:20
#define __rte_cache_min_aligned
Definition: rte_common.h:528
#define __rte_unused
Definition: rte_common.h:143
#define __rte_always_inline
Definition: rte_common.h:331
#define rte_errno
Definition: rte_errno.h:29
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
rte_eth_nb_pools
Definition: rte_ethdev.h:911
@ RTE_ETH_64_POOLS
Definition: rte_ethdev.h:915
@ RTE_ETH_32_POOLS
Definition: rte_ethdev.h:914
@ RTE_ETH_8_POOLS
Definition: rte_ethdev.h:912
@ RTE_ETH_16_POOLS
Definition: rte_ethdev.h:913
rte_eth_event_ipsec_subtype
Definition: rte_ethdev.h:3957
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_END
Definition: rte_ethdev.h:3961
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
Definition: rte_ethdev.h:3963
@ RTE_ETH_EVENT_IPSEC_MAX
Definition: rte_ethdev.h:3989
@ RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
Definition: rte_ethdev.h:3977
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
Definition: rte_ethdev.h:3965
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
Definition: rte_ethdev.h:3982
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_START
Definition: rte_ethdev.h:3959
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
Definition: rte_ethdev.h:3972
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
Definition: rte_ethdev.h:3967
@ RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
Definition: rte_ethdev.h:3987
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
Definition: rte_ethdev.h:665
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
Definition: rte_ethdev.h:6644
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
rte_eth_event_macsec_subtype
Definition: rte_ethdev.h:3881
@ RTE_ETH_SUBEVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:3883
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1
Definition: rte_ethdev.h:3893
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48
Definition: rte_ethdev.h:3898
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1
Definition: rte_ethdev.h:3908
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1
Definition: rte_ethdev.h:3903
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1
Definition: rte_ethdev.h:3888
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
rte_eth_event_type
Definition: rte_ethdev.h:4022
@ RTE_ETH_EVENT_RECOVERY_FAILED
Definition: rte_ethdev.h:4099
@ RTE_ETH_EVENT_UNKNOWN
Definition: rte_ethdev.h:4023
@ RTE_ETH_EVENT_VF_MBOX
Definition: rte_ethdev.h:4029
@ RTE_ETH_EVENT_IPSEC
Definition: rte_ethdev.h:4040
@ RTE_ETH_EVENT_INTR_RESET
Definition: rte_ethdev.h:4028
@ RTE_ETH_EVENT_INTR_RMV
Definition: rte_ethdev.h:4031
@ RTE_ETH_EVENT_ERR_RECOVERING
Definition: rte_ethdev.h:4063
@ RTE_ETH_EVENT_MACSEC
Definition: rte_ethdev.h:4030
@ RTE_ETH_EVENT_RECOVERY_SUCCESS
Definition: rte_ethdev.h:4094
@ RTE_ETH_EVENT_DESTROY
Definition: rte_ethdev.h:4039
@ RTE_ETH_EVENT_FLOW_AGED
Definition: rte_ethdev.h:4041
@ RTE_ETH_EVENT_QUEUE_STATE
Definition: rte_ethdev.h:4026
@ RTE_ETH_EVENT_INTR_LSC
Definition: rte_ethdev.h:4024
@ RTE_ETH_EVENT_MAX
Definition: rte_ethdev.h:4100
@ RTE_ETH_EVENT_RX_AVAIL_THRESH
Definition: rte_ethdev.h:4046
@ RTE_ETH_EVENT_NEW
Definition: rte_ethdev.h:4038
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_is_valid_port(uint16_t port_id)
rte_eth_cman_obj
Definition: rte_ethdev.h:5779
@ RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL
Definition: rte_ethdev.h:5786
@ RTE_ETH_CMAN_OBJ_RX_QUEUE
Definition: rte_ethdev.h:5781
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
Definition: rte_ethdev.h:841
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
Definition: rte_ethdev.h:1908
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
rte_eth_dev_state
Definition: rte_ethdev.h:2072
@ RTE_ETH_DEV_ATTACHED
Definition: rte_ethdev.h:2076
@ RTE_ETH_DEV_UNUSED
Definition: rte_ethdev.h:2074
@ RTE_ETH_DEV_REMOVED
Definition: rte_ethdev.h:2078
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:6056
rte_eth_fec_mode
Definition: rte_ethdev.h:1983
@ RTE_ETH_FEC_NOFEC
Definition: rte_ethdev.h:1984
@ RTE_ETH_FEC_BASER
Definition: rte_ethdev.h:1986
@ RTE_ETH_FEC_AUTO
Definition: rte_ethdev.h:1985
@ RTE_ETH_FEC_RS
Definition: rte_ethdev.h:1987
@ RTE_ETH_FEC_LLRS
Definition: rte_ethdev.h:1988
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
rte_eth_err_handle_mode
Definition: rte_ethdev.h:1733
@ RTE_ETH_ERROR_HANDLE_MODE_PASSIVE
Definition: rte_ethdev.h:1740
@ RTE_ETH_ERROR_HANDLE_MODE_NONE
Definition: rte_ethdev.h:1735
@ RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE
Definition: rte_ethdev.h:1746
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
rte_eth_tx_mq_mode
Definition: rte_ethdev.h:407
@ RTE_ETH_MQ_TX_DCB
Definition: rte_ethdev.h:409
@ RTE_ETH_MQ_TX_VMDQ_DCB
Definition: rte_ethdev.h:410
@ RTE_ETH_MQ_TX_VMDQ_ONLY
Definition: rte_ethdev.h:411
@ RTE_ETH_MQ_TX_NONE
Definition: rte_ethdev.h:408
int rte_eth_promiscuous_get(uint16_t port_id)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t rte_eth_find_next(uint16_t port_id)
rte_eth_rx_mq_mode
Definition: rte_ethdev.h:381
@ RTE_ETH_MQ_RX_DCB_RSS
Definition: rte_ethdev.h:390
@ RTE_ETH_MQ_RX_VMDQ_DCB_RSS
Definition: rte_ethdev.h:399
@ RTE_ETH_MQ_RX_DCB
Definition: rte_ethdev.h:388
@ RTE_ETH_MQ_RX_VMDQ_DCB
Definition: rte_ethdev.h:397
@ RTE_ETH_MQ_RX_VMDQ_RSS
Definition: rte_ethdev.h:395
@ RTE_ETH_MQ_RX_NONE
Definition: rte_ethdev.h:383
@ RTE_ETH_MQ_RX_RSS
Definition: rte_ethdev.h:386
@ RTE_ETH_MQ_RX_VMDQ_ONLY
Definition: rte_ethdev.h:393
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_promiscuous_enable(uint16_t port_id)
rte_eth_representor_type
Definition: rte_ethdev.h:1720
@ RTE_ETH_REPRESENTOR_PF
Definition: rte_ethdev.h:1724
@ RTE_ETH_REPRESENTOR_VF
Definition: rte_ethdev.h:1722
@ RTE_ETH_REPRESENTOR_SF
Definition: rte_ethdev.h:1723
@ RTE_ETH_REPRESENTOR_NONE
Definition: rte_ethdev.h:1721
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
Definition: rte_ethdev.h:840
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
Definition: rte_ethdev.h:2042
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6506
rte_eth_tunnel_type
Definition: rte_ethdev.h:1443
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:6396
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
Definition: rte_ethdev.h:4104
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
#define RTE_ETH_MQ_RX_DCB_FLAG
Definition: rte_ethdev.h:373
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
rte_eth_fc_mode
Definition: rte_ethdev.h:1354
@ RTE_ETH_FC_TX_PAUSE
Definition: rte_ethdev.h:1357
@ RTE_ETH_FC_RX_PAUSE
Definition: rte_ethdev.h:1356
@ RTE_ETH_FC_NONE
Definition: rte_ethdev.h:1355
@ RTE_ETH_FC_FULL
Definition: rte_ethdev.h:1358
rte_eth_event_macsec_type
Definition: rte_ethdev.h:3915
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:3921
@ RTE_ETH_EVENT_MACSEC_SA_NOT_VALID
Definition: rte_ethdev.h:3929
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:3923
@ RTE_ETH_EVENT_MACSEC_UNKNOWN
Definition: rte_ethdev.h:3917
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP
Definition: rte_ethdev.h:3925
@ RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR
Definition: rte_ethdev.h:3919
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP
Definition: rte_ethdev.h:3927
int rte_eth_led_on(uint16_t port_id)
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define RTE_ETH_MQ_RX_RSS_FLAG
Definition: rte_ethdev.h:372
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define RTE_ETH_MQ_RX_VMDQ_FLAG
Definition: rte_ethdev.h:374
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
Definition: rte_ethdev.h:6591
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6201
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
Definition: rte_ethdev.h:6272
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
Definition: rte_ethdev.h:6709
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_dev_owner_new(uint64_t *owner_id)
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_reset(uint16_t port_id)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
rte_vlan_type
Definition: rte_ethdev.h:438
@ RTE_ETH_VLAN_TYPE_OUTER
Definition: rte_ethdev.h:441
@ RTE_ETH_VLAN_TYPE_INNER
Definition: rte_ethdev.h:440
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
Definition: rte_ethdev.h:2066
rte_eth_hash_function
Definition: rte_ethdev.h:456
@ RTE_ETH_HASH_FUNCTION_DEFAULT
Definition: rte_ethdev.h:458
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT
Definition: rte_ethdev.h:473
@ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR
Definition: rte_ethdev.h:460
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ
Definition: rte_ethdev.h:466
@ RTE_ETH_HASH_FUNCTION_TOEPLITZ
Definition: rte_ethdev.h:459
uint16_t rte_eth_dev_count_total(void)
#define RTE_ETH_XSTATS_NAME_SIZE
Definition: rte_ethdev.h:1913
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
Definition: rte_ethdev.h:6129
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
rte_eth_nb_tcs
Definition: rte_ethdev.h:902
@ RTE_ETH_4_TCS
Definition: rte_ethdev.h:903
@ RTE_ETH_8_TCS
Definition: rte_ethdev.h:904
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
Definition: rte_ethdev.h:1909
uint8_t rsvd_mode_params[4]
Definition: rte_ethdev.h:5853
enum rte_eth_cman_obj obj
Definition: rte_ethdev.h:5821
struct rte_cman_red_params red
Definition: rte_ethdev.h:5846
uint8_t rsvd_obj_params[4]
Definition: rte_ethdev.h:5838
enum rte_cman_mode mode
Definition: rte_ethdev.h:5823
uint8_t rsvd[8]
Definition: rte_ethdev.h:5810
uint64_t modes_supported
Definition: rte_ethdev.h:5800
uint64_t objs_supported
Definition: rte_ethdev.h:5805
struct rte_eth_intr_conf intr_conf
Definition: rte_ethdev.h:1527
struct rte_eth_vmdq_rx_conf vmdq_rx_conf
Definition: rte_ethdev.h:1514
struct rte_eth_txmode txmode
Definition: rte_ethdev.h:1501
struct rte_eth_rxmode rxmode
Definition: rte_ethdev.h:1500
struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf
Definition: rte_ethdev.h:1510
struct rte_eth_conf::@126 rx_adv_conf
uint32_t lpbk_mode
Definition: rte_ethdev.h:1502
uint32_t dcb_capability_en
Definition: rte_ethdev.h:1526
union rte_eth_conf::@127 tx_adv_conf
struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf
Definition: rte_ethdev.h:1518
uint32_t link_speeds
Definition: rte_ethdev.h:1493
struct rte_eth_rss_conf rss_conf
Definition: rte_ethdev.h:1508
struct rte_eth_dcb_tx_conf dcb_tx_conf
Definition: rte_ethdev.h:1520
struct rte_eth_dcb_rx_conf dcb_rx_conf
Definition: rte_ethdev.h:1512
struct rte_eth_vmdq_tx_conf vmdq_tx_conf
Definition: rte_ethdev.h:1522
uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]
Definition: rte_ethdev.h:1974
uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:1973
struct rte_eth_dcb_tc_queue_mapping tc_queue
Definition: rte_ethdev.h:1976
struct rte_eth_dcb_tc_queue_mapping::@129 tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@128 tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
uint16_t nb_mtu_seg_max
Definition: rte_ethdev.h:1348
uint16_t nb_seg_max
Definition: rte_ethdev.h:1335
uint16_t nb_align
Definition: rte_ethdev.h:1325
uint32_t max_rx_bufsize
Definition: rte_ethdev.h:1770
uint32_t max_hash_mac_addrs
Definition: rte_ethdev.h:1778
struct rte_eth_desc_lim rx_desc_lim
Definition: rte_ethdev.h:1801
unsigned int if_index
Definition: rte_ethdev.h:1757
uint16_t max_rx_queues
Definition: rte_ethdev.h:1774
uint64_t dev_capa
Definition: rte_ethdev.h:1819
uint16_t vmdq_queue_num
Definition: rte_ethdev.h:1799
uint32_t min_rx_bufsize
Definition: rte_ethdev.h:1763
uint16_t max_tx_queues
Definition: rte_ethdev.h:1775
struct rte_eth_txconf default_txconf
Definition: rte_ethdev.h:1797
uint16_t max_vmdq_pools
Definition: rte_ethdev.h:1780
struct rte_device * device
Definition: rte_ethdev.h:1755
struct rte_eth_rxconf default_rxconf
Definition: rte_ethdev.h:1796
uint16_t nb_tx_queues
Definition: rte_ethdev.h:1806
enum rte_eth_err_handle_mode err_handle_mode
Definition: rte_ethdev.h:1826
uint32_t max_rx_pktlen
Definition: rte_ethdev.h:1771
uint16_t max_mtu
Definition: rte_ethdev.h:1760
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:1773
uint16_t vmdq_queue_base
Definition: rte_ethdev.h:1798
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1829
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1828
uint64_t tx_queue_offload_capa
Definition: rte_ethdev.h:1789
uint16_t vmdq_pool_base
Definition: rte_ethdev.h:1800
uint16_t min_mtu
Definition: rte_ethdev.h:1759
uint16_t reta_size
Definition: rte_ethdev.h:1791
struct rte_eth_desc_lim tx_desc_lim
Definition: rte_ethdev.h:1802
uint64_t flow_type_rss_offloads
Definition: rte_ethdev.h:1795
uint16_t max_rx_mempools
Definition: rte_ethdev.h:1813
uint16_t max_vfs
Definition: rte_ethdev.h:1779
struct rte_eth_dev_portconf default_txportconf
Definition: rte_ethdev.h:1817
uint64_t tx_offload_capa
Definition: rte_ethdev.h:1785
const char * driver_name
Definition: rte_ethdev.h:1756
uint8_t hash_key_size
Definition: rte_ethdev.h:1792
uint32_t speed_capa
Definition: rte_ethdev.h:1803
struct rte_eth_dev_portconf default_rxportconf
Definition: rte_ethdev.h:1815
struct rte_eth_switch_info switch_info
Definition: rte_ethdev.h:1824
struct rte_eth_rxseg_capa rx_seg_capa
Definition: rte_ethdev.h:1781
uint64_t rx_queue_offload_capa
Definition: rte_ethdev.h:1787
uint64_t rx_offload_capa
Definition: rte_ethdev.h:1783
uint16_t nb_rx_queues
Definition: rte_ethdev.h:1805
uint32_t max_mac_addrs
Definition: rte_ethdev.h:1776
const uint32_t * dev_flags
Definition: rte_ethdev.h:1761
struct rte_mbuf * pkts[]
Definition: rte_ethdev.h:3743
enum rte_eth_event_ipsec_subtype subtype
Definition: rte_ethdev.h:3998
enum rte_eth_event_macsec_type type
Definition: rte_ethdev.h:3938
enum rte_eth_event_macsec_subtype subtype
Definition: rte_ethdev.h:3940
uint32_t low_water
Definition: rte_ethdev.h:1368
uint16_t send_xon
Definition: rte_ethdev.h:1370
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1371
uint32_t high_water
Definition: rte_ethdev.h:1367
uint16_t pause_time
Definition: rte_ethdev.h:1369
uint8_t mac_ctrl_frame_fwd
Definition: rte_ethdev.h:1372
uint16_t max_nb_queues
Definition: rte_ethdev.h:1220
struct rte_eth_hairpin_queue_cap tx_cap
Definition: rte_ethdev.h:1227
struct rte_eth_hairpin_queue_cap rx_cap
Definition: rte_ethdev.h:1226
uint32_t use_locked_device_memory
Definition: rte_ethdev.h:1287
struct rte_eth_fc_conf fc
Definition: rte_ethdev.h:1382
enum rte_eth_fc_mode mode
Definition: rte_ethdev.h:1419
enum rte_eth_fc_mode mode_capa
Definition: rte_ethdev.h:1398
struct rte_mempool * mp
Definition: rte_ethdev.h:1878
struct rte_mbuf ** mbuf_ring
Definition: rte_ethdev.h:1877
struct rte_eth_representor_range ranges[]
Definition: rte_ethdev.h:5494
enum rte_eth_representor_type type
Definition: rte_ethdev.h:5470
char name[RTE_DEV_NAME_MAX_LEN]
Definition: rte_ethdev.h:5480
uint8_t * rss_key
Definition: rte_ethdev.h:497
uint8_t rss_key_len
Definition: rte_ethdev.h:498
enum rte_eth_hash_function algorithm
Definition: rte_ethdev.h:504
uint64_t rss_hf
Definition: rte_ethdev.h:503
uint16_t reta[RTE_ETH_RETA_GROUP_SIZE]
Definition: rte_ethdev.h:895
struct rte_eth_thresh rx_thresh
Definition: rte_ethdev.h:1115
uint64_t offloads
Definition: rte_ethdev.h:1133
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1166
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1165
uint8_t rx_deferred_start
Definition: rte_ethdev.h:1118
uint16_t share_group
Definition: rte_ethdev.h:1126
uint8_t rx_drop_en
Definition: rte_ethdev.h:1117
uint16_t share_qid
Definition: rte_ethdev.h:1127
union rte_eth_rxseg * rx_seg
Definition: rte_ethdev.h:1141
struct rte_mempool ** rx_mempools
Definition: rte_ethdev.h:1162
uint16_t rx_nseg
Definition: rte_ethdev.h:1119
uint16_t rx_free_thresh
Definition: rte_ethdev.h:1116
uint32_t mtu
Definition: rte_ethdev.h:420
uint32_t max_lro_pkt_size
Definition: rte_ethdev.h:422
uint64_t offloads
Definition: rte_ethdev.h:428
void * reserved_ptrs[2]
Definition: rte_ethdev.h:431
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:430
enum rte_eth_rx_mq_mode mq_mode
Definition: rte_ethdev.h:419
struct rte_eth_rxconf conf
Definition: rte_ethdev.h:1844
uint8_t scattered_rx
Definition: rte_ethdev.h:1845
struct rte_mempool * mp
Definition: rte_ethdev.h:1843
uint8_t queue_state
Definition: rte_ethdev.h:1846
uint8_t avail_thresh
Definition: rte_ethdev.h:1855
uint16_t nb_desc
Definition: rte_ethdev.h:1847
uint16_t rx_buf_size
Definition: rte_ethdev.h:1848
__extension__ uint32_t multi_pools
Definition: rte_ethdev.h:1706
uint32_t offset_allowed
Definition: rte_ethdev.h:1707
uint32_t offset_align_log2
Definition: rte_ethdev.h:1708
struct rte_mempool * mp
Definition: rte_ethdev.h:1082
uint64_t imissed
Definition: rte_ethdev.h:270
uint64_t obytes
Definition: rte_ethdev.h:265
uint64_t opackets
Definition: rte_ethdev.h:263
uint64_t rx_nombuf
Definition: rte_ethdev.h:273
uint64_t ibytes
Definition: rte_ethdev.h:264
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:280
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:278
uint64_t ierrors
Definition: rte_ethdev.h:271
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:284
uint64_t ipackets
Definition: rte_ethdev.h:262
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:276
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
Definition: rte_ethdev.h:282
uint64_t oerrors
Definition: rte_ethdev.h:272
const char * name
Definition: rte_ethdev.h:1680
uint8_t hthresh
Definition: rte_ethdev.h:365
uint8_t pthresh
Definition: rte_ethdev.h:364
uint8_t wthresh
Definition: rte_ethdev.h:366
uint8_t tx_deferred_start
Definition: rte_ethdev.h:1178
uint64_t offloads
Definition: rte_ethdev.h:1184
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1187
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1186
struct rte_eth_thresh tx_thresh
Definition: rte_ethdev.h:1173
uint16_t tx_rs_thresh
Definition: rte_ethdev.h:1174
uint16_t tx_free_thresh
Definition: rte_ethdev.h:1175
uint64_t offloads
Definition: rte_ethdev.h:1006
__extension__ uint8_t hw_vlan_insert_pvid
Definition: rte_ethdev.h:1015
void * reserved_ptrs[2]
Definition: rte_ethdev.h:1018
__extension__ uint8_t hw_vlan_reject_tagged
Definition: rte_ethdev.h:1011
uint64_t reserved_64s[2]
Definition: rte_ethdev.h:1017
__extension__ uint8_t hw_vlan_reject_untagged
Definition: rte_ethdev.h:1013
enum rte_eth_tx_mq_mode mq_mode
Definition: rte_ethdev.h:1000
struct rte_eth_txconf conf
Definition: rte_ethdev.h:1863
uint8_t queue_state
Definition: rte_ethdev.h:1865
uint16_t nb_desc
Definition: rte_ethdev.h:1864
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:953
struct rte_eth_vmdq_dcb_conf::@122 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
Definition: rte_ethdev.h:962
uint8_t enable_default_pool
Definition: rte_ethdev.h:954
enum rte_eth_nb_pools nb_queue_pools
Definition: rte_ethdev.h:984
struct rte_eth_vmdq_rx_conf::@123 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t enable_default_pool
Definition: rte_ethdev.h:985
uint8_t enable_loop_back
Definition: rte_ethdev.h:987
char name[RTE_ETH_XSTATS_NAME_SIZE]
Definition: rte_ethdev.h:1944
uint64_t value
Definition: rte_ethdev.h:1926
uint64_t id
Definition: rte_ethdev.h:1925