diff --git a/SOURCES/openvswitch-3.3.0.patch b/SOURCES/openvswitch-3.3.0.patch index 0c2a8b0..85ea2ab 100644 --- a/SOURCES/openvswitch-3.3.0.patch +++ b/SOURCES/openvswitch-3.3.0.patch @@ -11858,7 +11858,7 @@ index 272a6ffc7f..6b35c56da0 100644 name: ${{ steps.get_keys.outputs.logs }} path: | diff --git a/dpdk/.mailmap b/dpdk/.mailmap -index ab0742a382..f2883144f3 100644 +index ab0742a382..ff5b0821ba 100644 --- a/dpdk/.mailmap +++ b/dpdk/.mailmap @@ -2,7 +2,7 @@ Aakash Sasidharan @@ -11898,15 +11898,24 @@ index ab0742a382..f2883144f3 100644 Alexander Solganik Alexander V Gutkin Alexandre Ferrieux -@@ -224,6 +228,7 @@ Cheng Liu +@@ -173,6 +177,7 @@ Bert van Leeuwen + Bhagyada Modali + Bharat Mota + Bill Hong ++Bill Xiang + Billy McFall + Billy O'Mahony + Bing Zhao +@@ -224,6 +229,8 @@ Cheng Liu Cheng Peng Chengwen Feng Chenmin Sun +Chenming Chang ++Chenxingyu Wang Chenxu Di Chenyu Huang Cheryl Houser -@@ -367,6 +372,7 @@ Elad Persiko +@@ -367,6 +374,7 @@ Elad Persiko Elena Agostini Eli Britstein Elza Mathew @@ -11914,7 +11923,7 @@ index ab0742a382..f2883144f3 100644 Emma Finn Emma Kenny Emmanuel Roullit -@@ -401,9 +407,11 @@ Fengtian Guo +@@ -401,9 +409,11 @@ Fengtian Guo Ferdinand Thiessen Ferruh Yigit Fidaullah Noonari @@ -11926,7 +11935,7 @@ index ab0742a382..f2883144f3 100644 Forrest Shi Francesco Mancino Francesco Santoro -@@ -483,6 +491,7 @@ Hanoch Haim +@@ -483,6 +493,7 @@ Hanoch Haim Hanumanth Pothula Hao Chen Hao Wu @@ -11934,7 +11943,7 @@ index ab0742a382..f2883144f3 100644 Hari Kumar Vemula Harini Ramakrishnan Hariprasad Govindharajan -@@ -518,6 +527,8 @@ Hiral Shah +@@ -518,6 +529,8 @@ Hiral Shah Hiroki Shirokura Hiroshi Shimamoto Hiroyuki Mikita @@ -11943,7 +11952,7 @@ index ab0742a382..f2883144f3 100644 Hongbo Zheng Hongjun Ni Hongzhi Guo -@@ -654,6 +665,7 @@ Jin Yu +@@ -654,6 +667,7 @@ Jin Yu Jiri Slaby Job Abraham Jochen Behrens @@ -11951,7 +11960,7 @@ index ab0742a382..f2883144f3 100644 Joey Xing Johan Faltstrom Johan Källström -@@ -675,7 +687,7 @@ John Ousterhout +@@ -675,7 +689,7 @@ John Ousterhout John Romein John W. Linville Jonas Pfefferle @@ -11960,7 +11969,15 @@ index ab0742a382..f2883144f3 100644 Jonathan Tsai Jon DeVree Jon Loeliger -@@ -707,6 +719,7 @@ Junjie Wan +@@ -698,6 +712,7 @@ Julien Aube + Julien Castets + Julien Courtat + Julien Cretin ++Julien Hascoet + Julien Massonneau + Julien Meunier + Július Milan +@@ -707,6 +722,7 @@ Junjie Wan Jun Qiu Jun W Zhou Junxiao Shi @@ -11968,7 +11985,7 @@ index ab0742a382..f2883144f3 100644 Jun Yang Junyu Jiang Juraj Linkeš -@@ -721,7 +734,7 @@ Kamalakshitha Aligeri +@@ -721,7 +737,7 @@ Kamalakshitha Aligeri Kamil Bednarczyk Kamil Chalupnik Kamil Rytarowski @@ -11977,7 +11994,16 @@ index ab0742a382..f2883144f3 100644 Kanaka Durga Kotamarthy Karen Kelly Karen Sornek -@@ -785,9 +798,11 @@ Leszek Zygo +@@ -754,7 +770,7 @@ Kirill Rybalchenko + Kishore Padmanabha + Klaus Degner + Kommula Shiva Shankar +-Konstantin Ananyev ++Konstantin Ananyev + Krishna Murthy + Krzysztof Galazka + Krzysztof Kanas +@@ -785,9 +801,11 @@ Leszek Zygo Levend Sayar Lev Faerman Lewei Yang @@ -11989,7 +12015,15 @@ index ab0742a382..f2883144f3 100644 Liang Xu Liang Zhang Li Feng -@@ -853,7 +868,6 @@ Manish Chopra +@@ -844,6 +862,7 @@ Mahesh Adulla + Mahipal Challa + Mah Yock Gen + Mairtin o Loingsigh ++Malcolm Bumgardner + Mallesham Jatharakonda + Mallesh Koujalagi + Malvika Gupta +@@ -853,7 +872,6 @@ Manish Chopra Manish Kurup Manish Tomar Mao Jiang @@ -11997,7 +12031,7 @@ index ab0742a382..f2883144f3 100644 Marcel Apfelbaum Marcel Cornu Marcelo Ricardo Leitner -@@ -870,6 +884,7 @@ Marcin Wojtas +@@ -870,6 +888,7 @@ Marcin Wojtas Marcin Zapolski Marco Varlese Marc Sune @@ -12005,7 +12039,7 @@ index ab0742a382..f2883144f3 100644 Maria Lingemark Mario Carrillo Mário Kuka -@@ -891,8 +906,9 @@ Martin Klozik +@@ -891,8 +910,9 @@ Martin Klozik Martin Spinler Martin Weiser Martyna Szapar-Mudlaw @@ -12016,7 +12050,7 @@ index ab0742a382..f2883144f3 100644 Matan Azrad Matej Vido Mateusz Kowalski -@@ -936,6 +952,7 @@ Michael Santana +@@ -936,6 +956,7 @@ Michael Santana Michael Savisko Michael Shamis Michael S. Tsirkin @@ -12024,9 +12058,19 @@ index ab0742a382..f2883144f3 100644 Michael Wildt Michal Berger Michal Jastrzebski -@@ -1009,7 +1026,8 @@ Nemanja Marjanovic +@@ -950,6 +971,7 @@ Michal Swiatkowski + Michal Wilczynski + Michel Machado + Miguel Bernal Marin ++Mihai Brodschi + Mihai Pogonaru + Mike Baucom + Mike Pattrick +@@ -1008,8 +1030,10 @@ Nelson Escobar + Nemanja Marjanovic Netanel Belgazal Netanel Gonen ++Niall Meade Niall Power -Nick Connolly +Nicholas Pratte @@ -12034,7 +12078,35 @@ index ab0742a382..f2883144f3 100644 Nick Nunley Niclas Storm Nicolas Chautru -@@ -1127,6 +1145,7 @@ Przemyslaw Czesnowicz +@@ -1035,9 +1059,11 @@ Noa Ezra + Nobuhiro Miki + Norbert Ciosek + Odi Assli ++Ofer Dagan + Ognjen Joldzic + Ola Liljedahl + Oleg Polyakov ++Oleksandr Nahnybida + Olga Shern + Olivier Gournet + Olivier Matz +@@ -1097,6 +1123,7 @@ Peng Yu + Peng Zhang + Pengzhen Liu + Peter Mccarthy ++Peter Morrow + Peter Nilsson + Peter Spreadborough + Petr Houska +@@ -1117,6 +1144,7 @@ Pradeep Satyanarayana + Prashant Bhole + Prashant Upadhyaya + Prateek Agarwal ++Praveen Kaligineedi + Praveen Shetty + Pravin Pathak + Prince Takkar +@@ -1127,6 +1155,7 @@ Przemyslaw Czesnowicz Przemyslaw Patynowski Przemyslaw Zegan Pu Xu <583493798@qq.com> @@ -12042,7 +12114,7 @@ index ab0742a382..f2883144f3 100644 Qian Xu Qiao Liu Qi Fu -@@ -1142,6 +1161,7 @@ Quentin Armitage +@@ -1142,6 +1171,7 @@ Quentin Armitage Qun Wan Radha Mohan Chintakuntla Radoslaw Biernacki @@ -12050,7 +12122,7 @@ index ab0742a382..f2883144f3 100644 Radu Bulie Radu Nicolau Rafael Ávila de Espíndola -@@ -1205,7 +1225,7 @@ Roman Kapl +@@ -1205,7 +1235,7 @@ Roman Kapl Roman Korynkevych Roman Storozhenko Roman Zhukov @@ -12059,7 +12131,15 @@ index ab0742a382..f2883144f3 100644 Ron Beider Ronghua Zhang RongQiang Xie -@@ -1275,9 +1295,11 @@ Shahed Shaikh +@@ -1235,6 +1265,7 @@ Sampath Peechu + Samuel Gauthier + Sandilya Bhagi + Sangjin Han ++Sangtani Parag Satishbhai + Sankar Chokkalingam + Santoshkumar Karanappa Rastapur + Santosh Shukla +@@ -1275,9 +1306,11 @@ Shahed Shaikh Shai Brandes Shailendra Bhatnagar Shally Verma @@ -12071,7 +12151,7 @@ index ab0742a382..f2883144f3 100644 Sharmila Podury Sharon Haroni Shay Agroskin -@@ -1298,6 +1320,7 @@ Shiyang He +@@ -1298,6 +1331,7 @@ Shiyang He Shlomi Gridish Shougang Wang Shraddha Joshi @@ -12079,7 +12159,7 @@ index ab0742a382..f2883144f3 100644 Shreyansh Jain Shrikrishna Khare Shuai Zhu -@@ -1306,6 +1329,7 @@ Shuki Katzenelson +@@ -1306,6 +1340,7 @@ Shuki Katzenelson Shun Hao Shu Shen Shujing Dong @@ -12087,9 +12167,11 @@ index ab0742a382..f2883144f3 100644 Shweta Choudaha Shyam Kumar Shrivastav Shy Shyman -@@ -1424,7 +1448,9 @@ Timothy McDaniel +@@ -1423,8 +1458,11 @@ Timmons C. Player + Timothy McDaniel Timothy Miskell Timothy Redaelli ++Tim Martin Tim Shearer +Ting-Kai Ku Ting Xu @@ -12097,7 +12179,7 @@ index ab0742a382..f2883144f3 100644 Tiwei Bie Todd Fujinaka Tomasz Cel -@@ -1437,6 +1463,7 @@ Tomasz Kulasek +@@ -1437,6 +1475,7 @@ Tomasz Kulasek Tomasz Zawadzki Tom Barbette Tom Crugnale @@ -12105,15 +12187,16 @@ index ab0742a382..f2883144f3 100644 Tom Millington Tom Rix Tomer Shmilovich -@@ -1457,6 +1484,7 @@ Vadim Suraev +@@ -1457,6 +1496,8 @@ Vadim Suraev Vakul Garg Vamsi Attunuru Vanshika Shukla ++Varun Lakkur Ambaji Rao +Varun Sethi Vasily Philipov Veerasenareddy Burru Venkata Suresh Kumar P -@@ -1485,6 +1513,8 @@ Vincent Guo +@@ -1485,6 +1526,8 @@ Vincent Guo Vincent Jardin Vincent Li Vincent S. Cojot @@ -12122,7 +12205,7 @@ index ab0742a382..f2883144f3 100644 Vipin Varghese Vipul Ashri Visa Hankala -@@ -1507,6 +1537,7 @@ Walter Heymans +@@ -1507,6 +1550,7 @@ Walter Heymans Wang Sheng-Hui Wangyu (Eric) Waterman Cao @@ -12130,7 +12213,15 @@ index ab0742a382..f2883144f3 100644 Weichun Chen Wei Dai Weifeng Li -@@ -1604,6 +1635,7 @@ Yi Lu +@@ -1565,6 +1609,7 @@ Xieming Katty + Xinfeng Zhao + Xingguang He + Xingyou Chen ++Xinying Yu + Xin Long + Xi Zhang + Xuan Ding +@@ -1604,6 +1649,7 @@ Yi Lu Yilun Xu Yinan Wang Ying A Wang @@ -12139,17 +12230,25 @@ index ab0742a382..f2883144f3 100644 Yinjun Zhang Yipeng Wang diff --git a/dpdk/VERSION b/dpdk/VERSION -index 94c0153b26..d1bc17f504 100644 +index 94c0153b26..c7c153650a 100644 --- a/dpdk/VERSION +++ b/dpdk/VERSION @@ -1 +1 @@ -23.11.0 -+23.11.2 ++23.11.3 diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c -index fc28e2d702..76c7475114 100644 +index fc28e2d702..5eaab6350f 100644 --- a/dpdk/app/dumpcap/main.c +++ b/dpdk/app/dumpcap/main.c -@@ -628,6 +628,11 @@ static void dpdk_init(void) +@@ -93,7 +93,6 @@ struct interface { + struct rte_bpf_prm *bpf_prm; + char name[RTE_ETH_NAME_MAX_LEN]; + +- struct rte_rxtx_callback *rx_cb[RTE_MAX_QUEUES_PER_PORT]; + const char *ifname; + const char *ifdescr; + }; +@@ -628,6 +627,11 @@ static void dpdk_init(void) eal_argv[i++] = strdup(file_prefix); } @@ -12161,7 +12260,40 @@ index fc28e2d702..76c7475114 100644 if (rte_eal_init(eal_argc, eal_argv) < 0) rte_exit(EXIT_FAILURE, "EAL init failed: is primary process running?\n"); } -@@ -934,6 +939,11 @@ int main(int argc, char **argv) +@@ -869,7 +873,7 @@ static ssize_t + pcap_write_packets(pcap_dumper_t *dumper, + struct rte_mbuf *pkts[], uint16_t n) + { +- uint8_t temp_data[RTE_MBUF_DEFAULT_BUF_SIZE]; ++ uint8_t temp_data[RTE_ETHER_MAX_JUMBO_FRAME_LEN]; + struct pcap_pkthdr header; + uint16_t i; + size_t total = 0; +@@ -878,14 +882,19 @@ pcap_write_packets(pcap_dumper_t *dumper, + + for (i = 0; i < n; i++) { + struct rte_mbuf *m = pkts[i]; ++ size_t len, caplen; + +- header.len = rte_pktmbuf_pkt_len(m); +- header.caplen = RTE_MIN(header.len, sizeof(temp_data)); ++ len = caplen = rte_pktmbuf_pkt_len(m); ++ if (unlikely(!rte_pktmbuf_is_contiguous(m) && len > sizeof(temp_data))) ++ caplen = sizeof(temp_data); ++ ++ header.len = len; ++ header.caplen = caplen; + + pcap_dump((u_char *)dumper, &header, +- rte_pktmbuf_read(m, 0, header.caplen, temp_data)); ++ rte_pktmbuf_read(m, 0, caplen, temp_data)); + +- total += sizeof(header) + header.len; ++ total += sizeof(header) + caplen; + } + + return total; +@@ -934,6 +943,11 @@ int main(int argc, char **argv) { struct rte_ring *r; struct rte_mempool *mp; @@ -12173,7 +12305,7 @@ index fc28e2d702..76c7475114 100644 dumpcap_out_t out; char *p; -@@ -959,8 +969,13 @@ int main(int argc, char **argv) +@@ -959,8 +973,13 @@ int main(int argc, char **argv) compile_filters(); @@ -12271,6 +12403,32 @@ index 7a1c7bdf60..3592f8a865 100644 argp[0] = argv[0]; argp[1] = n_flag; +diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c +index b672aaefbe..4a558705cc 100644 +--- a/dpdk/app/proc-info/main.c ++++ b/dpdk/app/proc-info/main.c +@@ -2166,11 +2166,11 @@ main(int argc, char **argv) + + if (mem_info) { + meminfo_display(); +- return 0; ++ goto cleanup; + } + + if (eventdev_xstats() > 0) +- return 0; ++ goto cleanup; + + nb_ports = rte_eth_dev_count_avail(); + if (nb_ports == 0) +@@ -2251,6 +2251,7 @@ main(int argc, char **argv) + RTE_ETH_FOREACH_DEV(i) + rte_eth_dev_close(i); + ++cleanup: + ret = rte_eal_cleanup(); + if (ret) + printf("Error from rte_eal_cleanup(), %d\n", ret); diff --git a/dpdk/app/test-bbdev/test_bbdev_perf.c b/dpdk/app/test-bbdev/test_bbdev_perf.c index dcce00aa0a..5c1755ae0d 100644 --- a/dpdk/app/test-bbdev/test_bbdev_perf.c @@ -12826,7 +12984,7 @@ index 46f6b7d6d2..24d34f983e 100644 (void *)&cmd_load_bpf_start, (void *)&cmd_load_bpf_dir, diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c -index 9369d3b4c5..d9304e4a32 100644 +index 9369d3b4c5..bf6794ee1d 100644 --- a/dpdk/app/test-pmd/cmdline.c +++ b/dpdk/app/test-pmd/cmdline.c @@ -3528,6 +3528,8 @@ parse_hdrs_list(const char *str, const char *item_name, unsigned int max_items, @@ -12838,11 +12996,496 @@ index 9369d3b4c5..d9304e4a32 100644 cur = strtok_r(str2, ",", &tmp); while (cur != NULL) { parsed_items[nb_item] = get_ptype(cur); +@@ -13045,240 +13047,240 @@ static cmdline_parse_inst_t cmd_config_tx_affinity_map = { + + /* list of instructions */ + static cmdline_parse_ctx_t builtin_ctx[] = { +- (cmdline_parse_inst_t *)&cmd_help_brief, +- (cmdline_parse_inst_t *)&cmd_help_long, +- (cmdline_parse_inst_t *)&cmd_quit, +- (cmdline_parse_inst_t *)&cmd_load_from_file, +- (cmdline_parse_inst_t *)&cmd_showport, +- (cmdline_parse_inst_t *)&cmd_showqueue, +- (cmdline_parse_inst_t *)&cmd_showeeprom, +- (cmdline_parse_inst_t *)&cmd_showportall, +- (cmdline_parse_inst_t *)&cmd_representor_info, +- (cmdline_parse_inst_t *)&cmd_showdevice, +- (cmdline_parse_inst_t *)&cmd_showcfg, +- (cmdline_parse_inst_t *)&cmd_showfwdall, +- (cmdline_parse_inst_t *)&cmd_start, +- (cmdline_parse_inst_t *)&cmd_start_tx_first, +- (cmdline_parse_inst_t *)&cmd_start_tx_first_n, +- (cmdline_parse_inst_t *)&cmd_set_link_up, +- (cmdline_parse_inst_t *)&cmd_set_link_down, +- (cmdline_parse_inst_t *)&cmd_reset, +- (cmdline_parse_inst_t *)&cmd_set_numbers, +- (cmdline_parse_inst_t *)&cmd_set_log, +- (cmdline_parse_inst_t *)&cmd_set_rxoffs, +- (cmdline_parse_inst_t *)&cmd_set_rxpkts, +- (cmdline_parse_inst_t *)&cmd_set_rxhdrs, +- (cmdline_parse_inst_t *)&cmd_set_txpkts, +- (cmdline_parse_inst_t *)&cmd_set_txsplit, +- (cmdline_parse_inst_t *)&cmd_set_txtimes, +- (cmdline_parse_inst_t *)&cmd_set_fwd_list, +- (cmdline_parse_inst_t *)&cmd_set_fwd_mask, +- (cmdline_parse_inst_t *)&cmd_set_fwd_mode, +- (cmdline_parse_inst_t *)&cmd_set_fwd_retry_mode, +- (cmdline_parse_inst_t *)&cmd_set_burst_tx_retry, +- (cmdline_parse_inst_t *)&cmd_set_promisc_mode_one, +- (cmdline_parse_inst_t *)&cmd_set_promisc_mode_all, +- (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_one, +- (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_all, +- (cmdline_parse_inst_t *)&cmd_set_flush_rx, +- (cmdline_parse_inst_t *)&cmd_set_link_check, +- (cmdline_parse_inst_t *)&cmd_vlan_offload, +- (cmdline_parse_inst_t *)&cmd_vlan_tpid, +- (cmdline_parse_inst_t *)&cmd_rx_vlan_filter_all, +- (cmdline_parse_inst_t *)&cmd_rx_vlan_filter, +- (cmdline_parse_inst_t *)&cmd_tx_vlan_set, +- (cmdline_parse_inst_t *)&cmd_tx_vlan_set_qinq, +- (cmdline_parse_inst_t *)&cmd_tx_vlan_reset, +- (cmdline_parse_inst_t *)&cmd_tx_vlan_set_pvid, +- (cmdline_parse_inst_t *)&cmd_csum_set, +- (cmdline_parse_inst_t *)&cmd_csum_show, +- (cmdline_parse_inst_t *)&cmd_csum_tunnel, +- (cmdline_parse_inst_t *)&cmd_csum_mac_swap, +- (cmdline_parse_inst_t *)&cmd_tso_set, +- (cmdline_parse_inst_t *)&cmd_tso_show, +- (cmdline_parse_inst_t *)&cmd_tunnel_tso_set, +- (cmdline_parse_inst_t *)&cmd_tunnel_tso_show, ++ &cmd_help_brief, ++ &cmd_help_long, ++ &cmd_quit, ++ &cmd_load_from_file, ++ &cmd_showport, ++ &cmd_showqueue, ++ &cmd_showeeprom, ++ &cmd_showportall, ++ &cmd_representor_info, ++ &cmd_showdevice, ++ &cmd_showcfg, ++ &cmd_showfwdall, ++ &cmd_start, ++ &cmd_start_tx_first, ++ &cmd_start_tx_first_n, ++ &cmd_set_link_up, ++ &cmd_set_link_down, ++ &cmd_reset, ++ &cmd_set_numbers, ++ &cmd_set_log, ++ &cmd_set_rxoffs, ++ &cmd_set_rxpkts, ++ &cmd_set_rxhdrs, ++ &cmd_set_txpkts, ++ &cmd_set_txsplit, ++ &cmd_set_txtimes, ++ &cmd_set_fwd_list, ++ &cmd_set_fwd_mask, ++ &cmd_set_fwd_mode, ++ &cmd_set_fwd_retry_mode, ++ &cmd_set_burst_tx_retry, ++ &cmd_set_promisc_mode_one, ++ &cmd_set_promisc_mode_all, ++ &cmd_set_allmulti_mode_one, ++ &cmd_set_allmulti_mode_all, ++ &cmd_set_flush_rx, ++ &cmd_set_link_check, ++ &cmd_vlan_offload, ++ &cmd_vlan_tpid, ++ &cmd_rx_vlan_filter_all, ++ &cmd_rx_vlan_filter, ++ &cmd_tx_vlan_set, ++ &cmd_tx_vlan_set_qinq, ++ &cmd_tx_vlan_reset, ++ &cmd_tx_vlan_set_pvid, ++ &cmd_csum_set, ++ &cmd_csum_show, ++ &cmd_csum_tunnel, ++ &cmd_csum_mac_swap, ++ &cmd_tso_set, ++ &cmd_tso_show, ++ &cmd_tunnel_tso_set, ++ &cmd_tunnel_tso_show, + #ifdef RTE_LIB_GRO +- (cmdline_parse_inst_t *)&cmd_gro_enable, +- (cmdline_parse_inst_t *)&cmd_gro_flush, +- (cmdline_parse_inst_t *)&cmd_gro_show, ++ &cmd_gro_enable, ++ &cmd_gro_flush, ++ &cmd_gro_show, + #endif + #ifdef RTE_LIB_GSO +- (cmdline_parse_inst_t *)&cmd_gso_enable, +- (cmdline_parse_inst_t *)&cmd_gso_size, +- (cmdline_parse_inst_t *)&cmd_gso_show, ++ &cmd_gso_enable, ++ &cmd_gso_size, ++ &cmd_gso_show, + #endif +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_hw, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_lw, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_pt, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_xon, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_macfwd, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_autoneg, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_show, +- (cmdline_parse_inst_t *)&cmd_priority_flow_control_set, +- (cmdline_parse_inst_t *)&cmd_queue_priority_flow_control_set, +- (cmdline_parse_inst_t *)&cmd_config_dcb, +- (cmdline_parse_inst_t *)&cmd_read_rxd_txd, +- (cmdline_parse_inst_t *)&cmd_stop, +- (cmdline_parse_inst_t *)&cmd_mac_addr, +- (cmdline_parse_inst_t *)&cmd_set_fwd_eth_peer, +- (cmdline_parse_inst_t *)&cmd_set_qmap, +- (cmdline_parse_inst_t *)&cmd_set_xstats_hide_zero, +- (cmdline_parse_inst_t *)&cmd_set_record_core_cycles, +- (cmdline_parse_inst_t *)&cmd_set_record_burst_stats, +- (cmdline_parse_inst_t *)&cmd_operate_port, +- (cmdline_parse_inst_t *)&cmd_operate_specific_port, +- (cmdline_parse_inst_t *)&cmd_operate_attach_port, +- (cmdline_parse_inst_t *)&cmd_operate_detach_port, +- (cmdline_parse_inst_t *)&cmd_operate_detach_device, +- (cmdline_parse_inst_t *)&cmd_set_port_setup_on, +- (cmdline_parse_inst_t *)&cmd_config_speed_all, +- (cmdline_parse_inst_t *)&cmd_config_speed_specific, +- (cmdline_parse_inst_t *)&cmd_config_loopback_all, +- (cmdline_parse_inst_t *)&cmd_config_loopback_specific, +- (cmdline_parse_inst_t *)&cmd_config_rx_tx, +- (cmdline_parse_inst_t *)&cmd_config_mtu, +- (cmdline_parse_inst_t *)&cmd_config_max_pkt_len, +- (cmdline_parse_inst_t *)&cmd_config_max_lro_pkt_size, +- (cmdline_parse_inst_t *)&cmd_config_rx_mode_flag, +- (cmdline_parse_inst_t *)&cmd_config_rss, +- (cmdline_parse_inst_t *)&cmd_config_rxtx_ring_size, +- (cmdline_parse_inst_t *)&cmd_config_rxtx_queue, +- (cmdline_parse_inst_t *)&cmd_config_deferred_start_rxtx_queue, +- (cmdline_parse_inst_t *)&cmd_setup_rxtx_queue, +- (cmdline_parse_inst_t *)&cmd_config_rss_reta, +- (cmdline_parse_inst_t *)&cmd_showport_reta, +- (cmdline_parse_inst_t *)&cmd_showport_macs, +- (cmdline_parse_inst_t *)&cmd_show_port_flow_transfer_proxy, +- (cmdline_parse_inst_t *)&cmd_config_burst, +- (cmdline_parse_inst_t *)&cmd_config_thresh, +- (cmdline_parse_inst_t *)&cmd_config_threshold, +- (cmdline_parse_inst_t *)&cmd_set_uc_hash_filter, +- (cmdline_parse_inst_t *)&cmd_set_uc_all_hash_filter, +- (cmdline_parse_inst_t *)&cmd_vf_mac_addr_filter, +- (cmdline_parse_inst_t *)&cmd_queue_rate_limit, +- (cmdline_parse_inst_t *)&cmd_tunnel_udp_config, +- (cmdline_parse_inst_t *)&cmd_showport_rss_hash, +- (cmdline_parse_inst_t *)&cmd_showport_rss_hash_key, +- (cmdline_parse_inst_t *)&cmd_showport_rss_hash_algo, +- (cmdline_parse_inst_t *)&cmd_config_rss_hash_key, +- (cmdline_parse_inst_t *)&cmd_cleanup_txq_mbufs, +- (cmdline_parse_inst_t *)&cmd_dump, +- (cmdline_parse_inst_t *)&cmd_dump_one, +- (cmdline_parse_inst_t *)&cmd_flow, +- (cmdline_parse_inst_t *)&cmd_show_port_meter_cap, +- (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_srtcm, +- (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm, +- (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm_rfc4115, +- (cmdline_parse_inst_t *)&cmd_del_port_meter_profile, +- (cmdline_parse_inst_t *)&cmd_create_port_meter, +- (cmdline_parse_inst_t *)&cmd_enable_port_meter, +- (cmdline_parse_inst_t *)&cmd_disable_port_meter, +- (cmdline_parse_inst_t *)&cmd_del_port_meter, +- (cmdline_parse_inst_t *)&cmd_del_port_meter_policy, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_profile, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_dscp_table, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_vlan_table, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_in_proto, +- (cmdline_parse_inst_t *)&cmd_get_port_meter_in_proto, +- (cmdline_parse_inst_t *)&cmd_get_port_meter_in_proto_prio, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_stats_mask, +- (cmdline_parse_inst_t *)&cmd_show_port_meter_stats, +- (cmdline_parse_inst_t *)&cmd_mcast_addr, +- (cmdline_parse_inst_t *)&cmd_mcast_addr_flush, +- (cmdline_parse_inst_t *)&cmd_set_vf_vlan_anti_spoof, +- (cmdline_parse_inst_t *)&cmd_set_vf_mac_anti_spoof, +- (cmdline_parse_inst_t *)&cmd_set_vf_vlan_stripq, +- (cmdline_parse_inst_t *)&cmd_set_vf_vlan_insert, +- (cmdline_parse_inst_t *)&cmd_set_tx_loopback, +- (cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en, +- (cmdline_parse_inst_t *)&cmd_set_vf_traffic, +- (cmdline_parse_inst_t *)&cmd_set_vf_rxmode, +- (cmdline_parse_inst_t *)&cmd_vf_rate_limit, +- (cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter, +- (cmdline_parse_inst_t *)&cmd_set_vf_mac_addr, +- (cmdline_parse_inst_t *)&cmd_set_vxlan, +- (cmdline_parse_inst_t *)&cmd_set_vxlan_tos_ttl, +- (cmdline_parse_inst_t *)&cmd_set_vxlan_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_nvgre, +- (cmdline_parse_inst_t *)&cmd_set_nvgre_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_l2_encap, +- (cmdline_parse_inst_t *)&cmd_set_l2_encap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_l2_decap, +- (cmdline_parse_inst_t *)&cmd_set_l2_decap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_mplsogre_encap, +- (cmdline_parse_inst_t *)&cmd_set_mplsogre_encap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_mplsogre_decap, +- (cmdline_parse_inst_t *)&cmd_set_mplsogre_decap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_mplsoudp_encap, +- (cmdline_parse_inst_t *)&cmd_set_mplsoudp_encap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_mplsoudp_decap, +- (cmdline_parse_inst_t *)&cmd_set_mplsoudp_decap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_conntrack_common, +- (cmdline_parse_inst_t *)&cmd_set_conntrack_dir, +- (cmdline_parse_inst_t *)&cmd_show_vf_stats, +- (cmdline_parse_inst_t *)&cmd_clear_vf_stats, +- (cmdline_parse_inst_t *)&cmd_show_port_supported_ptypes, +- (cmdline_parse_inst_t *)&cmd_set_port_ptypes, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_cap, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_level_cap, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_node_cap, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_node_type, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_node_stats, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_node_shaper_profile, +- (cmdline_parse_inst_t *)&cmd_del_port_tm_node_shaper_profile, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_node_shared_shaper, +- (cmdline_parse_inst_t *)&cmd_del_port_tm_node_shared_shaper, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_node_wred_profile, +- (cmdline_parse_inst_t *)&cmd_del_port_tm_node_wred_profile, +- (cmdline_parse_inst_t *)&cmd_set_port_tm_node_shaper_profile, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_nonleaf_node, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_nonleaf_node_pmode, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_leaf_node, +- (cmdline_parse_inst_t *)&cmd_del_port_tm_node, +- (cmdline_parse_inst_t *)&cmd_set_port_tm_node_parent, +- (cmdline_parse_inst_t *)&cmd_suspend_port_tm_node, +- (cmdline_parse_inst_t *)&cmd_resume_port_tm_node, +- (cmdline_parse_inst_t *)&cmd_port_tm_hierarchy_commit, +- (cmdline_parse_inst_t *)&cmd_port_tm_mark_ip_ecn, +- (cmdline_parse_inst_t *)&cmd_port_tm_mark_ip_dscp, +- (cmdline_parse_inst_t *)&cmd_port_tm_mark_vlan_dei, +- (cmdline_parse_inst_t *)&cmd_cfg_tunnel_udp_port, +- (cmdline_parse_inst_t *)&cmd_rx_offload_get_capa, +- (cmdline_parse_inst_t *)&cmd_rx_offload_get_configuration, +- (cmdline_parse_inst_t *)&cmd_config_per_port_rx_offload, +- (cmdline_parse_inst_t *)&cmd_config_all_port_rx_offload, +- (cmdline_parse_inst_t *)&cmd_config_per_queue_rx_offload, +- (cmdline_parse_inst_t *)&cmd_tx_offload_get_capa, +- (cmdline_parse_inst_t *)&cmd_tx_offload_get_configuration, +- (cmdline_parse_inst_t *)&cmd_config_per_port_tx_offload, +- (cmdline_parse_inst_t *)&cmd_config_all_port_tx_offload, +- (cmdline_parse_inst_t *)&cmd_config_per_queue_tx_offload, ++ &cmd_link_flow_control_set, ++ &cmd_link_flow_control_set_rx, ++ &cmd_link_flow_control_set_tx, ++ &cmd_link_flow_control_set_hw, ++ &cmd_link_flow_control_set_lw, ++ &cmd_link_flow_control_set_pt, ++ &cmd_link_flow_control_set_xon, ++ &cmd_link_flow_control_set_macfwd, ++ &cmd_link_flow_control_set_autoneg, ++ &cmd_link_flow_control_show, ++ &cmd_priority_flow_control_set, ++ &cmd_queue_priority_flow_control_set, ++ &cmd_config_dcb, ++ &cmd_read_rxd_txd, ++ &cmd_stop, ++ &cmd_mac_addr, ++ &cmd_set_fwd_eth_peer, ++ &cmd_set_qmap, ++ &cmd_set_xstats_hide_zero, ++ &cmd_set_record_core_cycles, ++ &cmd_set_record_burst_stats, ++ &cmd_operate_port, ++ &cmd_operate_specific_port, ++ &cmd_operate_attach_port, ++ &cmd_operate_detach_port, ++ &cmd_operate_detach_device, ++ &cmd_set_port_setup_on, ++ &cmd_config_speed_all, ++ &cmd_config_speed_specific, ++ &cmd_config_loopback_all, ++ &cmd_config_loopback_specific, ++ &cmd_config_rx_tx, ++ &cmd_config_mtu, ++ &cmd_config_max_pkt_len, ++ &cmd_config_max_lro_pkt_size, ++ &cmd_config_rx_mode_flag, ++ &cmd_config_rss, ++ &cmd_config_rxtx_ring_size, ++ &cmd_config_rxtx_queue, ++ &cmd_config_deferred_start_rxtx_queue, ++ &cmd_setup_rxtx_queue, ++ &cmd_config_rss_reta, ++ &cmd_showport_reta, ++ &cmd_showport_macs, ++ &cmd_show_port_flow_transfer_proxy, ++ &cmd_config_burst, ++ &cmd_config_thresh, ++ &cmd_config_threshold, ++ &cmd_set_uc_hash_filter, ++ &cmd_set_uc_all_hash_filter, ++ &cmd_vf_mac_addr_filter, ++ &cmd_queue_rate_limit, ++ &cmd_tunnel_udp_config, ++ &cmd_showport_rss_hash, ++ &cmd_showport_rss_hash_key, ++ &cmd_showport_rss_hash_algo, ++ &cmd_config_rss_hash_key, ++ &cmd_cleanup_txq_mbufs, ++ &cmd_dump, ++ &cmd_dump_one, ++ &cmd_flow, ++ &cmd_show_port_meter_cap, ++ &cmd_add_port_meter_profile_srtcm, ++ &cmd_add_port_meter_profile_trtcm, ++ &cmd_add_port_meter_profile_trtcm_rfc4115, ++ &cmd_del_port_meter_profile, ++ &cmd_create_port_meter, ++ &cmd_enable_port_meter, ++ &cmd_disable_port_meter, ++ &cmd_del_port_meter, ++ &cmd_del_port_meter_policy, ++ &cmd_set_port_meter_profile, ++ &cmd_set_port_meter_dscp_table, ++ &cmd_set_port_meter_vlan_table, ++ &cmd_set_port_meter_in_proto, ++ &cmd_get_port_meter_in_proto, ++ &cmd_get_port_meter_in_proto_prio, ++ &cmd_set_port_meter_stats_mask, ++ &cmd_show_port_meter_stats, ++ &cmd_mcast_addr, ++ &cmd_mcast_addr_flush, ++ &cmd_set_vf_vlan_anti_spoof, ++ &cmd_set_vf_mac_anti_spoof, ++ &cmd_set_vf_vlan_stripq, ++ &cmd_set_vf_vlan_insert, ++ &cmd_set_tx_loopback, ++ &cmd_set_all_queues_drop_en, ++ &cmd_set_vf_traffic, ++ &cmd_set_vf_rxmode, ++ &cmd_vf_rate_limit, ++ &cmd_vf_rxvlan_filter, ++ &cmd_set_vf_mac_addr, ++ &cmd_set_vxlan, ++ &cmd_set_vxlan_tos_ttl, ++ &cmd_set_vxlan_with_vlan, ++ &cmd_set_nvgre, ++ &cmd_set_nvgre_with_vlan, ++ &cmd_set_l2_encap, ++ &cmd_set_l2_encap_with_vlan, ++ &cmd_set_l2_decap, ++ &cmd_set_l2_decap_with_vlan, ++ &cmd_set_mplsogre_encap, ++ &cmd_set_mplsogre_encap_with_vlan, ++ &cmd_set_mplsogre_decap, ++ &cmd_set_mplsogre_decap_with_vlan, ++ &cmd_set_mplsoudp_encap, ++ &cmd_set_mplsoudp_encap_with_vlan, ++ &cmd_set_mplsoudp_decap, ++ &cmd_set_mplsoudp_decap_with_vlan, ++ &cmd_set_conntrack_common, ++ &cmd_set_conntrack_dir, ++ &cmd_show_vf_stats, ++ &cmd_clear_vf_stats, ++ &cmd_show_port_supported_ptypes, ++ &cmd_set_port_ptypes, ++ &cmd_show_port_tm_cap, ++ &cmd_show_port_tm_level_cap, ++ &cmd_show_port_tm_node_cap, ++ &cmd_show_port_tm_node_type, ++ &cmd_show_port_tm_node_stats, ++ &cmd_add_port_tm_node_shaper_profile, ++ &cmd_del_port_tm_node_shaper_profile, ++ &cmd_add_port_tm_node_shared_shaper, ++ &cmd_del_port_tm_node_shared_shaper, ++ &cmd_add_port_tm_node_wred_profile, ++ &cmd_del_port_tm_node_wred_profile, ++ &cmd_set_port_tm_node_shaper_profile, ++ &cmd_add_port_tm_nonleaf_node, ++ &cmd_add_port_tm_nonleaf_node_pmode, ++ &cmd_add_port_tm_leaf_node, ++ &cmd_del_port_tm_node, ++ &cmd_set_port_tm_node_parent, ++ &cmd_suspend_port_tm_node, ++ &cmd_resume_port_tm_node, ++ &cmd_port_tm_hierarchy_commit, ++ &cmd_port_tm_mark_ip_ecn, ++ &cmd_port_tm_mark_ip_dscp, ++ &cmd_port_tm_mark_vlan_dei, ++ &cmd_cfg_tunnel_udp_port, ++ &cmd_rx_offload_get_capa, ++ &cmd_rx_offload_get_configuration, ++ &cmd_config_per_port_rx_offload, ++ &cmd_config_all_port_rx_offload, ++ &cmd_config_per_queue_rx_offload, ++ &cmd_tx_offload_get_capa, ++ &cmd_tx_offload_get_configuration, ++ &cmd_config_per_port_tx_offload, ++ &cmd_config_all_port_tx_offload, ++ &cmd_config_per_queue_tx_offload, + #ifdef RTE_LIB_BPF +- (cmdline_parse_inst_t *)&cmd_operate_bpf_ld_parse, +- (cmdline_parse_inst_t *)&cmd_operate_bpf_unld_parse, ++ &cmd_operate_bpf_ld_parse, ++ &cmd_operate_bpf_unld_parse, + #endif +- (cmdline_parse_inst_t *)&cmd_config_tx_metadata_specific, +- (cmdline_parse_inst_t *)&cmd_show_tx_metadata, +- (cmdline_parse_inst_t *)&cmd_show_rx_tx_desc_status, +- (cmdline_parse_inst_t *)&cmd_show_rx_queue_desc_used_count, +- (cmdline_parse_inst_t *)&cmd_set_raw, +- (cmdline_parse_inst_t *)&cmd_show_set_raw, +- (cmdline_parse_inst_t *)&cmd_show_set_raw_all, +- (cmdline_parse_inst_t *)&cmd_config_tx_dynf_specific, +- (cmdline_parse_inst_t *)&cmd_show_fec_mode, +- (cmdline_parse_inst_t *)&cmd_set_fec_mode, +- (cmdline_parse_inst_t *)&cmd_set_rxq_avail_thresh, +- (cmdline_parse_inst_t *)&cmd_show_capability, +- (cmdline_parse_inst_t *)&cmd_set_flex_is_pattern, +- (cmdline_parse_inst_t *)&cmd_set_flex_spec_pattern, +- (cmdline_parse_inst_t *)&cmd_show_port_cman_capa, +- (cmdline_parse_inst_t *)&cmd_show_port_cman_config, +- (cmdline_parse_inst_t *)&cmd_set_port_cman_config, +- (cmdline_parse_inst_t *)&cmd_config_tx_affinity_map, ++ &cmd_config_tx_metadata_specific, ++ &cmd_show_tx_metadata, ++ &cmd_show_rx_tx_desc_status, ++ &cmd_show_rx_queue_desc_used_count, ++ &cmd_set_raw, ++ &cmd_show_set_raw, ++ &cmd_show_set_raw_all, ++ &cmd_config_tx_dynf_specific, ++ &cmd_show_fec_mode, ++ &cmd_set_fec_mode, ++ &cmd_set_rxq_avail_thresh, ++ &cmd_show_capability, ++ &cmd_set_flex_is_pattern, ++ &cmd_set_flex_spec_pattern, ++ &cmd_show_port_cman_capa, ++ &cmd_show_port_cman_config, ++ &cmd_set_port_cman_config, ++ &cmd_config_tx_affinity_map, + NULL, + }; + diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c -index ce71818705..7e6e06a04f 100644 +index ce71818705..661c72c7ef 100644 --- a/dpdk/app/test-pmd/cmdline_flow.c +++ b/dpdk/app/test-pmd/cmdline_flow.c -@@ -3520,7 +3520,7 @@ static const struct token token_list[] = { +@@ -105,7 +105,6 @@ enum index { + HASH, + + /* Flex arguments */ +- FLEX_ITEM_INIT, + FLEX_ITEM_CREATE, + FLEX_ITEM_DESTROY, + +@@ -1249,7 +1248,6 @@ struct parse_action_priv { + }) + + static const enum index next_flex_item[] = { +- FLEX_ITEM_INIT, + FLEX_ITEM_CREATE, + FLEX_ITEM_DESTROY, + ZERO, +@@ -3520,7 +3518,7 @@ static const struct token token_list[] = { [QUEUE_DESTROY] = { .name = "destroy", .help = "destroy a flow rule", @@ -12851,7 +13494,23 @@ index ce71818705..7e6e06a04f 100644 NEXT_ENTRY(COMMON_QUEUE_ID)), .args = ARGS(ARGS_ENTRY(struct buffer, queue)), .call = parse_qo_destroy, -@@ -5543,9 +5543,12 @@ static const struct token token_list[] = { +@@ -3932,15 +3930,6 @@ static const struct token token_list[] = { + .next = NEXT(next_flex_item), + .call = parse_flex, + }, +- [FLEX_ITEM_INIT] = { +- .name = "init", +- .help = "flex item init", +- .args = ARGS(ARGS_ENTRY(struct buffer, args.flex.token), +- ARGS_ENTRY(struct buffer, port)), +- .next = NEXT(NEXT_ENTRY(COMMON_FLEX_TOKEN), +- NEXT_ENTRY(COMMON_PORT_ID)), +- .call = parse_flex +- }, + [FLEX_ITEM_CREATE] = { + .name = "create", + .help = "flex item create", +@@ -5543,9 +5532,12 @@ static const struct token token_list[] = { [ITEM_CONNTRACK] = { .name = "conntrack", .help = "conntrack state", @@ -12864,7 +13523,7 @@ index ce71818705..7e6e06a04f 100644 }, [ITEM_PORT_REPRESENTOR] = { .name = "port_representor", -@@ -6905,7 +6908,7 @@ static const struct token token_list[] = { +@@ -6905,7 +6897,7 @@ static const struct token token_list[] = { .comp = comp_none, }, [ACTION_MODIFY_FIELD_SRC_TAG_INDEX] = { @@ -12873,7 +13532,7 @@ index ce71818705..7e6e06a04f 100644 .help = "source field tag array", .next = NEXT(action_modify_field_src, NEXT_ENTRY(COMMON_UNSIGNED)), -@@ -7395,11 +7398,13 @@ static const struct token token_list[] = { +@@ -7395,11 +7387,13 @@ static const struct token token_list[] = { .type = "UNSIGNED", .help = "unsigned integer value", .call = parse_indlst_id2ptr, @@ -12887,7 +13546,15 @@ index ce71818705..7e6e06a04f 100644 }, [ACTION_SHARED_INDIRECT] = { .name = "shared_indirect", -@@ -11334,34 +11339,36 @@ parse_indlst_id2ptr(struct context *ctx, const struct token *token, +@@ -10715,7 +10709,6 @@ parse_flex(struct context *ctx, const struct token *token, + switch (ctx->curr) { + default: + break; +- case FLEX_ITEM_INIT: + case FLEX_ITEM_CREATE: + case FLEX_ITEM_DESTROY: + out->command = ctx->curr; +@@ -11334,34 +11327,36 @@ parse_indlst_id2ptr(struct context *ctx, const struct token *token, uint32_t id; int ret; @@ -12945,7 +13612,16 @@ index ce71818705..7e6e06a04f 100644 } return ret; } -@@ -12609,6 +12616,7 @@ cmd_flow_parsed(const struct buffer *in) +@@ -12116,7 +12111,7 @@ comp_names_to_index(struct context *ctx, const struct token *token, + RTE_SET_USED(token); + if (!buf) + return names_size; +- if (names[ent] && ent < names_size) ++ if (ent < names_size && names[ent] != NULL) + return rte_strscpy(buf, names[ent], size); + return -1; + +@@ -12609,6 +12604,7 @@ cmd_flow_parsed(const struct buffer *in) port_queue_action_handle_create( in->port, in->queue, in->postpone, in->args.vc.attr.group, @@ -12954,7 +13630,7 @@ index ce71818705..7e6e06a04f 100644 .ingress = in->args.vc.attr.ingress, .egress = in->args.vc.attr.egress, diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c -index cad7537bc6..40e4e83fb8 100644 +index cad7537bc6..9c3d668e56 100644 --- a/dpdk/app/test-pmd/config.c +++ b/dpdk/app/test-pmd/config.c @@ -1891,8 +1891,7 @@ port_action_handle_flush(portid_t port_id) @@ -12981,6 +13657,15 @@ index cad7537bc6..40e4e83fb8 100644 } return ret; } +@@ -2222,7 +2219,7 @@ port_meter_policy_add(portid_t port_id, uint32_t policy_id, + for (act_n = 0, start = act; + act->type != RTE_FLOW_ACTION_TYPE_END; act++) + act_n++; +- if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) ++ if (act_n > 0) + policy.actions[i] = start; + else + policy.actions[i] = NULL; @@ -2789,8 +2786,7 @@ port_queue_flow_create(portid_t port_id, queueid_t queue_id, flow = rte_flow_async_create_by_index(port_id, queue_id, &op_attr, pt->table, rule_idx, actions, actions_idx, job, &error); @@ -13017,7 +13702,31 @@ index cad7537bc6..40e4e83fb8 100644 queue_action_list_handle_create(port_id, queue_id, pia, job, &attr, conf, action, &error); else -@@ -4611,9 +4606,9 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, +@@ -3918,8 +3913,10 @@ port_flow_aged(portid_t port_id, uint8_t destroy) + } + type = (enum age_action_context_type *)contexts[idx]; + switch (*type) { +- case ACTION_AGE_CONTEXT_TYPE_FLOW: ++ case ACTION_AGE_CONTEXT_TYPE_FLOW: { ++ uint64_t flow_id; + ctx.pf = container_of(type, struct port_flow, age_type); ++ flow_id = ctx.pf->id; + printf("%-20s\t%" PRIu64 "\t%" PRIu32 "\t%" PRIu32 + "\t%c%c%c\t\n", + "Flow", +@@ -3930,9 +3927,10 @@ port_flow_aged(portid_t port_id, uint8_t destroy) + ctx.pf->rule.attr->egress ? 'e' : '-', + ctx.pf->rule.attr->transfer ? 't' : '-'); + if (destroy && !port_flow_destroy(port_id, 1, +- &ctx.pf->id, false)) ++ &flow_id, false)) + total++; + break; ++ } + case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: + ctx.pia = container_of(type, + struct port_indirect_action, age_type); +@@ -4611,9 +4609,9 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, continue; printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", share_group, share_rxq); @@ -13029,7 +13738,7 @@ index cad7537bc6..40e4e83fb8 100644 lc_id, fs->rx_port, fs->rx_queue); printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", nb_rxq); -@@ -4794,7 +4789,6 @@ rss_fwd_config_setup(void) +@@ -4794,7 +4792,6 @@ rss_fwd_config_setup(void) queueid_t nb_q; streamid_t sm_id; int start; @@ -13037,7 +13746,7 @@ index cad7537bc6..40e4e83fb8 100644 nb_q = nb_rxq; if (nb_q > nb_txq) -@@ -4802,7 +4796,7 @@ rss_fwd_config_setup(void) +@@ -4802,7 +4799,7 @@ rss_fwd_config_setup(void) cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; cur_fwd_config.nb_fwd_ports = nb_fwd_ports; cur_fwd_config.nb_fwd_streams = @@ -13046,7 +13755,7 @@ index cad7537bc6..40e4e83fb8 100644 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) cur_fwd_config.nb_fwd_lcores = -@@ -4824,7 +4818,6 @@ rss_fwd_config_setup(void) +@@ -4824,7 +4821,6 @@ rss_fwd_config_setup(void) * the 2~3 queue for secondary process. */ start = proc_id * nb_q / num_procs; @@ -13054,7 +13763,7 @@ index cad7537bc6..40e4e83fb8 100644 rxp = 0; rxq = start; for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { -@@ -4843,8 +4836,6 @@ rss_fwd_config_setup(void) +@@ -4843,8 +4839,6 @@ rss_fwd_config_setup(void) continue; rxp = 0; rxq++; @@ -13063,7 +13772,7 @@ index cad7537bc6..40e4e83fb8 100644 } } -@@ -4989,7 +4980,7 @@ icmp_echo_config_setup(void) +@@ -4989,7 +4983,7 @@ icmp_echo_config_setup(void) lcoreid_t lc_id; uint16_t sm_id; @@ -13438,8 +14147,61 @@ index 2f596affee..a5e3d8699c 100644 return 0; } +diff --git a/dpdk/app/test/test_common.c b/dpdk/app/test/test_common.c +index 21eb2285e1..6dbd7fc9a9 100644 +--- a/dpdk/app/test/test_common.c ++++ b/dpdk/app/test/test_common.c +@@ -9,11 +9,12 @@ + #include + #include + #include ++#include + #include + + #include "test.h" + +-#define MAX_NUM 1 << 20 ++#define MAX_NUM (1 << 20) + + #define FAIL(x)\ + {printf(x "() test failed!\n");\ +@@ -218,19 +219,21 @@ test_align(void) + } + } + +- for (p = 1; p <= MAX_NUM / 2; p++) { +- for (i = 1; i <= MAX_NUM / 2; i++) { +- val = RTE_ALIGN_MUL_CEIL(i, p); +- if (val % p != 0 || val < i) +- FAIL_ALIGN("RTE_ALIGN_MUL_CEIL", i, p); +- val = RTE_ALIGN_MUL_FLOOR(i, p); +- if (val % p != 0 || val > i) +- FAIL_ALIGN("RTE_ALIGN_MUL_FLOOR", i, p); +- val = RTE_ALIGN_MUL_NEAR(i, p); +- if (val % p != 0 || ((val != RTE_ALIGN_MUL_CEIL(i, p)) +- & (val != RTE_ALIGN_MUL_FLOOR(i, p)))) +- FAIL_ALIGN("RTE_ALIGN_MUL_NEAR", i, p); +- } ++ /* testing the whole space of 2^20^2 takes too long. */ ++ for (j = 1; j <= MAX_NUM ; j++) { ++ i = rte_rand_max(MAX_NUM - 1) + 1; ++ p = rte_rand_max(MAX_NUM - 1) + 1; ++ ++ val = RTE_ALIGN_MUL_CEIL(i, p); ++ if (val % p != 0 || val < i) ++ FAIL_ALIGN("RTE_ALIGN_MUL_CEIL", i, p); ++ val = RTE_ALIGN_MUL_FLOOR(i, p); ++ if (val % p != 0 || val > i) ++ FAIL_ALIGN("RTE_ALIGN_MUL_FLOOR", i, p); ++ val = RTE_ALIGN_MUL_NEAR(i, p); ++ if (val % p != 0 || ((val != RTE_ALIGN_MUL_CEIL(i, p)) ++ & (val != RTE_ALIGN_MUL_FLOOR(i, p)))) ++ FAIL_ALIGN("RTE_ALIGN_MUL_NEAR", i, p); + } + + return 0; diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c -index 58561ededf..6cd38aefae 100644 +index 58561ededf..7d9fe29c02 100644 --- a/dpdk/app/test/test_cryptodev.c +++ b/dpdk/app/test/test_cryptodev.c @@ -196,6 +196,8 @@ post_process_raw_dp_op(void *user_data, uint32_t index __rte_unused, @@ -13551,12 +14313,16 @@ index 58561ededf..6cd38aefae 100644 struct rte_cryptodev_info dev_info; struct rte_cryptodev_qp_conf qp_conf = { .nb_descriptors = MAX_NUM_OPS_INFLIGHT -@@ -13582,6 +13623,19 @@ test_enq_callback_setup(void) +@@ -13582,6 +13623,23 @@ test_enq_callback_setup(void) struct rte_cryptodev_cb *cb; uint16_t qp_id = 0; + int j = 0; + ++ /* Skip test if synchronous API is used */ ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + /* Verify the crypto capabilities for which enqueue/dequeue is done. */ + cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; + cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL; @@ -13571,7 +14337,7 @@ index 58561ededf..6cd38aefae 100644 /* Stop the device in case it's started so it can be configured */ rte_cryptodev_stop(ts_params->valid_devs[0]); -@@ -13605,9 +13659,16 @@ test_enq_callback_setup(void) +@@ -13605,9 +13663,16 @@ test_enq_callback_setup(void) qp_conf.nb_descriptors, qp_id, ts_params->valid_devs[0]); @@ -13588,7 +14354,7 @@ index 58561ededf..6cd38aefae 100644 TEST_ASSERT_NULL(cb, "Add callback on qp %u on " "cryptodev %u did not fail", qp_id, RTE_CRYPTO_MAX_DEVS); -@@ -13637,12 +13698,11 @@ test_enq_callback_setup(void) +@@ -13637,12 +13702,11 @@ test_enq_callback_setup(void) rte_cryptodev_start(ts_params->valid_devs[0]); @@ -13605,7 +14371,7 @@ index 58561ededf..6cd38aefae 100644 /* Test with invalid crypto device */ TEST_ASSERT_FAIL(rte_cryptodev_remove_enq_callback( -@@ -13667,6 +13727,8 @@ test_enq_callback_setup(void) +@@ -13667,6 +13731,8 @@ test_enq_callback_setup(void) "qp %u on cryptodev %u", qp_id, ts_params->valid_devs[0]); @@ -13614,7 +14380,7 @@ index 58561ededf..6cd38aefae 100644 return TEST_SUCCESS; } -@@ -13674,6 +13736,7 @@ static int +@@ -13674,6 +13740,7 @@ static int test_deq_callback_setup(void) { struct crypto_testsuite_params *ts_params = &testsuite_params; @@ -13622,12 +14388,16 @@ index 58561ededf..6cd38aefae 100644 struct rte_cryptodev_info dev_info; struct rte_cryptodev_qp_conf qp_conf = { .nb_descriptors = MAX_NUM_OPS_INFLIGHT -@@ -13681,6 +13744,19 @@ test_deq_callback_setup(void) +@@ -13681,6 +13748,23 @@ test_deq_callback_setup(void) struct rte_cryptodev_cb *cb; uint16_t qp_id = 0; + int j = 0; + ++ /* Skip test if synchronous API is used */ ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + /* Verify the crypto capabilities for which enqueue/dequeue is done. */ + cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; + cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL; @@ -13642,7 +14412,7 @@ index 58561ededf..6cd38aefae 100644 /* Stop the device in case it's started so it can be configured */ rte_cryptodev_stop(ts_params->valid_devs[0]); -@@ -13704,9 +13780,16 @@ test_deq_callback_setup(void) +@@ -13704,9 +13788,16 @@ test_deq_callback_setup(void) qp_conf.nb_descriptors, qp_id, ts_params->valid_devs[0]); @@ -13659,7 +14429,7 @@ index 58561ededf..6cd38aefae 100644 TEST_ASSERT_NULL(cb, "Add callback on qp %u on " "cryptodev %u did not fail", qp_id, RTE_CRYPTO_MAX_DEVS); -@@ -13736,12 +13819,11 @@ test_deq_callback_setup(void) +@@ -13736,12 +13827,11 @@ test_deq_callback_setup(void) rte_cryptodev_start(ts_params->valid_devs[0]); @@ -13676,7 +14446,7 @@ index 58561ededf..6cd38aefae 100644 /* Test with invalid crypto device */ TEST_ASSERT_FAIL(rte_cryptodev_remove_deq_callback( -@@ -13766,6 +13848,8 @@ test_deq_callback_setup(void) +@@ -13766,6 +13856,8 @@ test_deq_callback_setup(void) "qp %u on cryptodev %u", qp_id, ts_params->valid_devs[0]); @@ -13685,7 +14455,7 @@ index 58561ededf..6cd38aefae 100644 return TEST_SUCCESS; } -@@ -15486,7 +15570,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, +@@ -15486,7 +15578,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, } /* @@ -14013,6 +14783,73 @@ index b4982014a2..89981f13f0 100644 .description = "RSA Encryption Decryption (n=128, pt=20, e=3) EXP, QT", .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA, +diff --git a/dpdk/app/test/test_eal_flags.c b/dpdk/app/test/test_eal_flags.c +index 6cb4b06757..767cf1481c 100644 +--- a/dpdk/app/test/test_eal_flags.c ++++ b/dpdk/app/test/test_eal_flags.c +@@ -677,8 +677,8 @@ test_missing_c_flag(void) + + if (rte_lcore_is_enabled(0) && rte_lcore_is_enabled(1) && + rte_lcore_is_enabled(2) && rte_lcore_is_enabled(3) && +- rte_lcore_is_enabled(3) && rte_lcore_is_enabled(5) && +- rte_lcore_is_enabled(4) && rte_lcore_is_enabled(7) && ++ rte_lcore_is_enabled(4) && rte_lcore_is_enabled(5) && ++ rte_lcore_is_enabled(6) && rte_lcore_is_enabled(7) && + launch_proc(argv29) != 0) { + printf("Error - " + "process did not run ok with valid corelist value\n"); +diff --git a/dpdk/app/test/test_event_crypto_adapter.c b/dpdk/app/test/test_event_crypto_adapter.c +index 0c56744ba0..e733df738e 100644 +--- a/dpdk/app/test/test_event_crypto_adapter.c ++++ b/dpdk/app/test/test_event_crypto_adapter.c +@@ -1154,21 +1154,17 @@ configure_cryptodev(void) + + static inline void + evdev_set_conf_values(struct rte_event_dev_config *dev_conf, +- struct rte_event_dev_info *info) ++ const struct rte_event_dev_info *info) + { +- memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); +- dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; +- dev_conf->nb_event_ports = NB_TEST_PORTS; +- dev_conf->nb_event_queues = NB_TEST_QUEUES; +- dev_conf->nb_event_queue_flows = info->max_event_queue_flows; +- dev_conf->nb_event_port_dequeue_depth = +- info->max_event_port_dequeue_depth; +- dev_conf->nb_event_port_enqueue_depth = +- info->max_event_port_enqueue_depth; +- dev_conf->nb_event_port_enqueue_depth = +- info->max_event_port_enqueue_depth; +- dev_conf->nb_events_limit = +- info->max_num_events; ++ *dev_conf = (struct rte_event_dev_config) { ++ .dequeue_timeout_ns = info->min_dequeue_timeout_ns, ++ .nb_event_ports = NB_TEST_PORTS, ++ .nb_event_queues = NB_TEST_QUEUES, ++ .nb_event_queue_flows = info->max_event_queue_flows, ++ .nb_event_port_dequeue_depth = info->max_event_port_dequeue_depth, ++ .nb_event_port_enqueue_depth = info->max_event_port_enqueue_depth, ++ .nb_events_limit = info->max_num_events, ++ }; + } + + static int +diff --git a/dpdk/app/test/test_event_dma_adapter.c b/dpdk/app/test/test_event_dma_adapter.c +index 35b417b69f..de0d671d3f 100644 +--- a/dpdk/app/test/test_event_dma_adapter.c ++++ b/dpdk/app/test/test_event_dma_adapter.c +@@ -276,7 +276,10 @@ test_op_forward_mode(void) + memset(&ev[i], 0, sizeof(struct rte_event)); + ev[i].event = 0; + ev[i].event_type = RTE_EVENT_TYPE_DMADEV; +- ev[i].queue_id = TEST_DMA_EV_QUEUE_ID; ++ if (params.internal_port_op_fwd) ++ ev[i].queue_id = TEST_APP_EV_QUEUE_ID; ++ else ++ ev[i].queue_id = TEST_DMA_EV_QUEUE_ID; + ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC; + ev[i].flow_id = 0xAABB; + ev[i].event_ptr = op; diff --git a/dpdk/app/test/test_event_eth_tx_adapter.c b/dpdk/app/test/test_event_eth_tx_adapter.c index dbd22f6800..482b8e69e3 100644 --- a/dpdk/app/test/test_event_eth_tx_adapter.c @@ -14029,7 +14866,7 @@ index dbd22f6800..482b8e69e3 100644 err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); diff --git a/dpdk/app/test/test_eventdev.c b/dpdk/app/test/test_eventdev.c -index 71de947ce4..e4e234dc98 100644 +index 71de947ce4..9a6c8f470c 100644 --- a/dpdk/app/test/test_eventdev.c +++ b/dpdk/app/test/test_eventdev.c @@ -33,9 +33,15 @@ testsuite_setup(void) @@ -14050,6 +14887,14 @@ index 71de947ce4..e4e234dc98 100644 } return TEST_SUCCESS; } +@@ -1183,6 +1189,7 @@ test_eventdev_profile_switch(void) + ev.op = RTE_EVENT_OP_NEW; + ev.flow_id = 0; + ev.u64 = 0xBADF00D0; ++ ev.sched_type = RTE_SCHED_TYPE_PARALLEL; + rc = rte_event_enqueue_burst(TEST_DEV_ID, 0, &ev, 1); + TEST_ASSERT(rc == 1, "Failed to enqueue event"); + ev.queue_id = 1; diff --git a/dpdk/app/test/test_fbarray.c b/dpdk/app/test/test_fbarray.c index 26a51e2a3e..09f6907fb1 100644 --- a/dpdk/app/test/test_fbarray.c @@ -14450,6 +15295,31 @@ index 3dd017ebfb..eb4f9a61e3 100644 TEST_CASE(test_graph_model_mcore_dispatch_node_lcore_affinity_set), TEST_CASE(test_graph_model_mcore_dispatch_core_bind_unbind), TEST_CASE(test_graph_worker_model_set_get), +diff --git a/dpdk/app/test/test_link_bonding.c b/dpdk/app/test/test_link_bonding.c +index 4d54706c21..b53f512815 100644 +--- a/dpdk/app/test/test_link_bonding.c ++++ b/dpdk/app/test/test_link_bonding.c +@@ -792,7 +792,7 @@ test_set_primary_member(void) + &read_mac_addr), + "Failed to get mac address (port %d)", + test_params->bonding_port_id); +- TEST_ASSERT_SUCCESS(memcmp(&read_mac_addr, &read_mac_addr, ++ TEST_ASSERT_SUCCESS(memcmp(expected_mac_addr, &read_mac_addr, + sizeof(read_mac_addr)), + "bonding port mac address not set to that of primary port\n"); + +diff --git a/dpdk/app/test/test_link_bonding_rssconf.c b/dpdk/app/test/test_link_bonding_rssconf.c +index 3c9c824335..2cb689b1de 100644 +--- a/dpdk/app/test/test_link_bonding_rssconf.c ++++ b/dpdk/app/test/test_link_bonding_rssconf.c +@@ -616,7 +616,6 @@ test_setup(void) + mac_addr.addr_bytes[5] = 0x10 + port->port_id; + rte_eth_dev_default_mac_addr_set(port->port_id, &mac_addr); + +- rte_eth_dev_info_get(port->port_id, &port->dev_info); + retval = rte_eth_dev_info_get(port->port_id, &port->dev_info); + TEST_ASSERT((retval == 0), + "Error during getting device (port %u) info: %s\n", diff --git a/dpdk/app/test/test_mbuf.c b/dpdk/app/test/test_mbuf.c index d7393df7eb..a39288a5f8 100644 --- a/dpdk/app/test/test_mbuf.c @@ -14472,6 +15342,43 @@ index d7393df7eb..a39288a5f8 100644 if (clone->ol_flags != RTE_MBUF_F_EXTERNAL) GOTO_FAIL("%s: External buffer is not attached to mbuf\n", __func__); +diff --git a/dpdk/app/test/test_pcapng.c b/dpdk/app/test/test_pcapng.c +index 89535efad0..5cdde0542a 100644 +--- a/dpdk/app/test/test_pcapng.c ++++ b/dpdk/app/test/test_pcapng.c +@@ -102,6 +102,14 @@ mbuf1_prepare(struct dummy_mbuf *dm, uint32_t plen) + pkt.udp.dgram_len = rte_cpu_to_be_16(plen); + + memcpy(rte_pktmbuf_mtod(dm->mb, void *), &pkt, sizeof(pkt)); ++ ++ /* Idea here is to create mbuf chain big enough that after mbuf deep copy they won't be ++ * compressed into single mbuf to properly test store of chained mbufs ++ */ ++ dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[1]), pkt_len); ++ dummy_mbuf_prep(&dm->mb[2], dm->buf[2], sizeof(dm->buf[2]), pkt_len); ++ rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]); ++ rte_pktmbuf_chain(&dm->mb[0], &dm->mb[2]); + } + + static int +@@ -117,7 +125,7 @@ test_setup(void) + + /* Make a pool for cloned packets */ + mp = rte_pktmbuf_pool_create_by_ops("pcapng_test_pool", +- MAX_BURST, 0, 0, ++ MAX_BURST * 32, 0, 0, + rte_pcapng_mbuf_size(pkt_len) + 128, + SOCKET_ID_ANY, "ring_mp_sc"); + if (mp == NULL) { +@@ -155,7 +163,7 @@ fill_pcapng_file(rte_pcapng_t *pcapng, unsigned int num_packets) + for (i = 0; i < burst_size; i++) { + struct rte_mbuf *mc; + +- mc = rte_pcapng_copy(port_id, 0, orig, mp, pkt_len, ++ mc = rte_pcapng_copy(port_id, 0, orig, mp, rte_pktmbuf_pkt_len(orig), + RTE_PCAPNG_DIRECTION_IN, NULL); + if (mc == NULL) { + fprintf(stderr, "Cannot copy packet\n"); diff --git a/dpdk/app/test/test_power.c b/dpdk/app/test/test_power.c index f1e80299d3..403adc22d6 100644 --- a/dpdk/app/test/test_power.c @@ -14485,6 +15392,66 @@ index f1e80299d3..403adc22d6 100644 return -1; } +diff --git a/dpdk/app/test/test_power_cpufreq.c b/dpdk/app/test/test_power_cpufreq.c +index 619b2811c6..edbd34424e 100644 +--- a/dpdk/app/test/test_power_cpufreq.c ++++ b/dpdk/app/test/test_power_cpufreq.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + + #include "test.h" + +@@ -46,9 +47,10 @@ test_power_caps(void) + + static uint32_t total_freq_num; + static uint32_t freqs[TEST_POWER_FREQS_NUM_MAX]; ++static uint32_t cpu_id; + + static int +-check_cur_freq(unsigned int lcore_id, uint32_t idx, bool turbo) ++check_cur_freq(__rte_unused unsigned int lcore_id, uint32_t idx, bool turbo) + { + #define TEST_POWER_CONVERT_TO_DECIMAL 10 + #define MAX_LOOP 100 +@@ -62,13 +64,13 @@ check_cur_freq(unsigned int lcore_id, uint32_t idx, bool turbo) + int i; + + if (snprintf(fullpath, sizeof(fullpath), +- TEST_POWER_SYSFILE_CPUINFO_FREQ, lcore_id) < 0) { ++ TEST_POWER_SYSFILE_CPUINFO_FREQ, cpu_id) < 0) { + return 0; + } + f = fopen(fullpath, "r"); + if (f == NULL) { + if (snprintf(fullpath, sizeof(fullpath), +- TEST_POWER_SYSFILE_SCALING_FREQ, lcore_id) < 0) { ++ TEST_POWER_SYSFILE_SCALING_FREQ, cpu_id) < 0) { + return 0; + } + f = fopen(fullpath, "r"); +@@ -497,6 +499,19 @@ test_power_cpufreq(void) + { + int ret = -1; + enum power_management_env env; ++ rte_cpuset_t lcore_cpus; ++ ++ lcore_cpus = rte_lcore_cpuset(TEST_POWER_LCORE_ID); ++ if (CPU_COUNT(&lcore_cpus) != 1) { ++ printf("Power management doesn't support lcore %u mapping to %u CPUs\n", ++ TEST_POWER_LCORE_ID, ++ CPU_COUNT(&lcore_cpus)); ++ return TEST_SKIPPED; ++ } ++ for (cpu_id = 0; cpu_id < CPU_SETSIZE; cpu_id++) { ++ if (CPU_ISSET(cpu_id, &lcore_cpus)) ++ break; ++ } + + /* Test initialisation of a valid lcore */ + ret = rte_power_init(TEST_POWER_LCORE_ID); diff --git a/dpdk/app/test/test_power_intel_uncore.c b/dpdk/app/test/test_power_intel_uncore.c index 80b45ce46e..049658627d 100644 --- a/dpdk/app/test/test_power_intel_uncore.c @@ -14505,6 +15472,26 @@ index 80b45ce46e..049658627d 100644 static int check_power_uncore_init(void) { +diff --git a/dpdk/app/test/test_security_inline_proto_vectors.h b/dpdk/app/test/test_security_inline_proto_vectors.h +index 3ac75588a3..0b4093d19a 100644 +--- a/dpdk/app/test/test_security_inline_proto_vectors.h ++++ b/dpdk/app/test/test_security_inline_proto_vectors.h +@@ -498,10 +498,12 @@ test_vector_payload_populate(struct ip_reassembly_test_packet *pkt, + if (extra_data_sum) { + proto = hdr->proto; + p += sizeof(struct rte_ipv6_hdr); +- while (proto != IPPROTO_FRAGMENT && +- (proto = rte_ipv6_get_next_ext(p, proto, &ext_len) >= 0)) ++ while (proto != IPPROTO_FRAGMENT) { ++ proto = rte_ipv6_get_next_ext(p, proto, &ext_len); ++ if (proto < 0) ++ break; + p += ext_len; +- ++ } + /* Found fragment header, update the frag offset */ + if (proto == IPPROTO_FRAGMENT) { + frag_ext = (struct rte_ipv6_fragment_ext *)p; diff --git a/dpdk/buildtools/dpdk-cmdline-gen.py b/dpdk/buildtools/dpdk-cmdline-gen.py index 49b03bee4a..30d32ac183 100755 --- a/dpdk/buildtools/dpdk-cmdline-gen.py @@ -15027,6 +16014,19 @@ index cddebda5b5..11597eaa26 100644 objdump = 'x86_64-w64-mingw32-objdump' [host_machine] +diff --git a/dpdk/devtools/git-log-fixes.sh b/dpdk/devtools/git-log-fixes.sh +index 8a4a8470c2..4690dd4545 100755 +--- a/dpdk/devtools/git-log-fixes.sh ++++ b/dpdk/devtools/git-log-fixes.sh +@@ -68,7 +68,7 @@ origin_version () # ... + { + for origin in $* ; do + # check hash is valid +- git rev-parse -q --verify $1 >&- || continue ++ git rev-parse -q --verify $origin >&- || continue + # get version of this bug origin + local origver=$(commit_version $origin) + local roothashes="$(origin_filter $origin)" diff --git a/dpdk/doc/api/doxy-api-index.md b/dpdk/doc/api/doxy-api-index.md index a6a768bd7c..29eaad6523 100644 --- a/dpdk/doc/api/doxy-api-index.md @@ -15869,7 +16869,7 @@ index 9ec52e380f..501ef1f826 100644 + | 2 | NPC | --log-level='pmd\.common.cnxk\.flow,8' | +---+------------+-------------------------------------------------------+ diff --git a/dpdk/doc/guides/nics/features.rst b/dpdk/doc/guides/nics/features.rst -index f7d9980849..cf9fabb8b8 100644 +index f7d9980849..7b48e4e991 100644 --- a/dpdk/doc/guides/nics/features.rst +++ b/dpdk/doc/guides/nics/features.rst @@ -34,6 +34,17 @@ Supports getting the speed capabilities that the current device is capable of. @@ -15890,7 +16890,61 @@ index f7d9980849..cf9fabb8b8 100644 .. _nic_features_link_status: Link status -@@ -751,6 +762,19 @@ Supports congestion management. +@@ -705,14 +716,32 @@ Basic stats + Support basic statistics such as: ipackets, opackets, ibytes, obytes, + imissed, ierrors, oerrors, rx_nombuf. + +-And per queue stats: q_ipackets, q_opackets, q_ibytes, q_obytes, q_errors. +- + These apply to all drivers. + + * **[implements] eth_dev_ops**: ``stats_get``, ``stats_reset``. + * **[related] API**: ``rte_eth_stats_get``, ``rte_eth_stats_reset()``. + + ++.. _nic_features_stats_per_queue: ++ ++Stats per queue ++--------------- ++ ++Supports per queue stats: q_ipackets, q_opackets, q_ibytes, q_obytes, q_errors. ++Statistics only supplied for first ``RTE_ETHDEV_QUEUE_STAT_CNTRS`` (16) queues. ++If driver does not support this feature the per queue stats will be zero. ++ ++* **[implements] eth_dev_ops**: ``stats_get``, ``stats_reset``. ++* **[related] API**: ``rte_eth_stats_get``, ``rte_eth_stats_reset()``. ++ ++May also support configuring per-queue stat counter mapping. ++Used by some drivers to workaround HW limitations. ++ ++* **[implements] eth_dev_ops**: ``queue_stats_mapping_set``. ++* **[related] API**: ``rte_eth_dev_set_rx_queue_stats_mapping()``, ++ ``rte_eth_dev_set_tx_queue_stats_mapping()``. ++ ++ + .. _nic_features_extended_stats: + + Extended stats +@@ -727,18 +756,6 @@ Supports Extended Statistics, changes from driver to driver. + ``rte_eth_xstats_get_names_by_id()``, ``rte_eth_xstats_get_id_by_name()``. + + +-.. _nic_features_stats_per_queue: +- +-Stats per queue +---------------- +- +-Supports configuring per-queue stat counter mapping. +- +-* **[implements] eth_dev_ops**: ``queue_stats_mapping_set``. +-* **[related] API**: ``rte_eth_dev_set_rx_queue_stats_mapping()``, +- ``rte_eth_dev_set_tx_queue_stats_mapping()``. +- +- + .. _nic_features_congestion_management: + + Congestion management +@@ -751,6 +768,19 @@ Supports congestion management. ``rte_eth_cman_config_set()``, ``rte_eth_cman_config_get()``. @@ -16252,7 +17306,7 @@ index 3b0613fc1b..3e84d1ff1c 100644 Features -------- diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst -index 6b52fb93c5..2c59b24d78 100644 +index 6b52fb93c5..d1c3284ca1 100644 --- a/dpdk/doc/guides/nics/mlx5.rst +++ b/dpdk/doc/guides/nics/mlx5.rst @@ -245,6 +245,26 @@ Limitations @@ -16293,6 +17347,84 @@ index 6b52fb93c5..2c59b24d78 100644 - Multi-thread flow insertion: +@@ -1854,6 +1874,77 @@ directly but neither destroyed nor flushed. + The application should re-create the flows as required after the port restart. + + ++Notes for flow counters ++----------------------- ++ ++mlx5 PMD supports the ``COUNT`` flow action, ++which provides an ability to count packets (and bytes) ++matched against a given flow rule. ++This section describes the high level overview of ++how this support is implemented and limitations. ++ ++HW steering flow engine ++~~~~~~~~~~~~~~~~~~~~~~~ ++ ++Flow counters are allocated from HW in bulks. ++A set of bulks forms a flow counter pool managed by PMD. ++When flow counters are queried from HW, ++each counter is identified by an offset in a given bulk. ++Querying HW flow counter requires sending a request to HW, ++which will request a read of counter values for given offsets. ++HW will asynchronously provide these values through a DMA write. ++ ++In order to optimize HW to SW communication, ++these requests are handled in a separate counter service thread ++spawned by mlx5 PMD. ++This service thread will refresh the counter values stored in memory, ++in cycles, each spanning ``svc_cycle_time`` milliseconds. ++By default, ``svc_cycle_time`` is set to 500. ++When applications query the ``COUNT`` flow action, ++PMD returns the values stored in host memory. ++ ++mlx5 PMD manages 3 global rings of allocated counter offsets: ++ ++- ``free`` ring - Counters which were not used at all. ++- ``wait_reset`` ring - Counters which were used in some flow rules, ++ but were recently freed (flow rule was destroyed ++ or an indirect action was destroyed). ++ Since the count value might have changed ++ between the last counter service thread cycle and the moment it was freed, ++ the value in host memory might be stale. ++ During the next service thread cycle, ++ such counters will be moved to ``reuse`` ring. ++- ``reuse`` ring - Counters which were used at least once ++ and can be reused in new flow rules. ++ ++When counters are assigned to a flow rule (or allocated to indirect action), ++the PMD first tries to fetch a counter from ``reuse`` ring. ++If it's empty, the PMD fetches a counter from ``free`` ring. ++ ++The counter service thread works as follows: ++ ++#. Record counters stored in ``wait_reset`` ring. ++#. Read values of all counters which were used at least once ++ or are currently in use. ++#. Move recorded counters from ``wait_reset`` to ``reuse`` ring. ++#. Sleep for ``(query time) - svc_cycle_time`` milliseconds ++#. Repeat. ++ ++Because freeing a counter (by destroying a flow rule or destroying indirect action) ++does not immediately make it available for the application, ++the PMD might return: ++ ++- ``ENOENT`` if no counter is available in ``free``, ``reuse`` ++ or ``wait_reset`` rings. ++ No counter will be available until the application releases some of them. ++- ``EAGAIN`` if no counter is available in ``free`` and ``reuse`` rings, ++ but there are counters in ``wait_reset`` ring. ++ This means that after the next service thread cycle new counters will be available. ++ ++The application has to be aware that flow rule create or indirect action create ++might need be retried. ++ ++ + Notes for hairpin + ----------------- + diff --git a/dpdk/doc/guides/nics/nfp.rst b/dpdk/doc/guides/nics/nfp.rst index fee1860f4a..b577229bda 100644 --- a/dpdk/doc/guides/nics/nfp.rst @@ -16476,10 +17608,10 @@ index 81b93515cb..10630ba255 100644 used by telemetry callbacks for adding unsigned integer values to be returned to the user, are renamed to ``rte_tel_data_add_array_uint`` and ``rte_tel_data_add_dict_uint`` respectively. diff --git a/dpdk/doc/guides/rel_notes/release_23_11.rst b/dpdk/doc/guides/rel_notes/release_23_11.rst -index 6d83682d73..538e06aede 100644 +index 6d83682d73..7d250da7a7 100644 --- a/dpdk/doc/guides/rel_notes/release_23_11.rst +++ b/dpdk/doc/guides/rel_notes/release_23_11.rst -@@ -669,3 +669,744 @@ Tested Platforms +@@ -669,3 +669,1074 @@ Tested Platforms * OFED: * MLNX_OFED 23.07-0.5.1.2 @@ -17224,6 +18356,336 @@ index 6d83682d73..538e06aede 100644 +* Failed to add vdev when launch dpdk-pdump with vdev secondary process + + Fix available in upstream. ++ ++23.11.3 Release Notes ++--------------------- ++ ++ ++23.11.3 Fixes ++~~~~~~~~~~~~~ ++ ++* 23.11.3-rc1 ++* Revert "test/bonding: fix loop on members" ++* app/dumpcap: fix handling of jumbo frames ++* app/dumpcap: remove unused struct array ++* app/procinfo: fix leak on exit ++* app/testpmd: avoid potential outside of array reference ++* app/testpmd: fix aged flow destroy ++* app/testpmd: remove flex item init command leftover ++* app/testpmd: remove redundant policy action condition ++* app/testpmd: remove unnecessary cast ++* baseband/acc: fix access to deallocated mem ++* baseband/acc: fix ring memory allocation ++* baseband/acc: fix soft output bypass RM ++* baseband/la12xx: fix use after free in modem config ++* bpf: fix free function mismatch if convert fails ++* build: remove version check on compiler links function ++* bus/dpaa: fix PFDRs leaks due to FQRNIs ++* bus/dpaa: fix VSP for 1G fm1-mac9 and 10 ++* bus/dpaa: fix lock condition during error handling ++* bus/dpaa: fix the fman details status ++* bus/fslmc: fix Coverity warnings in QBMAN ++* bus/vdev: revert fix devargs in secondary process ++* common/cnxk: fix CPT HW word size for outbound SA ++* common/cnxk: fix IRQ reconfiguration ++* common/cnxk: fix MAC address change with active VF ++* common/cnxk: fix base log level ++* common/cnxk: fix build on Ubuntu 24.04 ++* common/cnxk: fix double free of flow aging resources ++* common/cnxk: fix inline CTX write ++* common/dpaax/caamflib: enable fallthrough warnings ++* common/dpaax/caamflib: fix PDCP SNOW-ZUC watchdog ++* common/idpf: fix AVX-512 pointer copy on 32-bit ++* common/idpf: fix use after free in mailbox init ++* common/mlx5: fix error CQE handling for 128 bytes CQE ++* common/mlx5: fix misalignment ++* common/qat: fix use after free in device probe ++* crypto/bcmfs: fix free function mismatch ++* crypto/dpaa2_sec: fix memory leak ++* crypto/openssl: fix 3DES-CTR with big endian CPUs ++* crypto/openssl: fix potential string overflow ++* crypto/qat: fix ECDSA session handling ++* crypto/qat: fix modexp/inv length ++* crypto/scheduler: fix session size computation ++* dev: fix callback lookup when unregistering device ++* devtools: fix check of multiple commits fixed at once ++* dma/idxd: fix free function mismatch in device probe ++* dmadev: fix potential null pointer access ++* doc: correct definition of stats per queue feature ++* drivers: remove redundant newline from logs ++* eal/unix: optimize thread creation ++* eal/x86: fix 32-bit write combining store ++* ethdev: fix overflow in descriptor count ++* ethdev: verify queue ID in Tx done cleanup ++* event/cnxk: fix OOP handling in event mode ++* event/cnxk: fix Rx timestamp handling ++* event/cnxk: fix free function mismatch in port config ++* event/octeontx: fix possible integer overflow ++* eventdev: fix possible array underflow/overflow ++* examples/eventdev: fix queue crash with generic pipeline ++* examples/ipsec-secgw: fix dequeue count from cryptodev ++* examples/l2fwd-event: fix spinlock handling ++* examples/l3fwd-power: fix options parsing overflow ++* examples/l3fwd: fix read beyond boundaries ++* examples/ntb: check info query return ++* examples/vhost: fix free function mismatch ++* fib6: add runtime checks in AVX512 lookup ++* fib: fix AVX512 lookup ++* hash: fix thash LFSR initialization ++* log: remove per line log helper ++* member: fix choice of bucket for displacement ++* ml/cnxk: fix handling of TVM model I/O ++* net/bnx2x: fix always true expression ++* net/bnx2x: fix duplicate branch ++* net/bnx2x: fix possible infinite loop at startup ++* net/bnx2x: remove dead conditional ++* net/bnxt/tf_core: fix TCAM manager data corruption ++* net/bnxt/tf_core: fix Thor TF EM key size check ++* net/bnxt/tf_core: fix WC TCAM multi-slice delete ++* net/bnxt/tf_core: fix slice count in case of HA entry move ++* net/bnxt: fix TCP and UDP checksum flags ++* net/bnxt: fix bad action offset in Tx BD ++* net/bnxt: fix reading SFF-8436 SFP EEPROMs ++* net/cnxk: fix OOP handling for inbound packets ++* net/cnxk: fix Rx offloads to handle timestamp ++* net/cnxk: fix Rx timestamp handling for VF ++* net/cnxk: fix build on Ubuntu 24.04 ++* net/cnxk: fix use after free in mempool create ++* net/cpfl: add checks for flow action types ++* net/cpfl: fix forwarding to physical port ++* net/cpfl: fix invalid free in JSON parser ++* net/cpfl: fix parsing protocol ID mask field ++* net/dpaa2: fix memory corruption in TM ++* net/dpaa2: remove unnecessary check for null before free ++* net/dpaa: fix reallocate mbuf handling ++* net/dpaa: fix typecasting channel ID ++* net/e1000/base: fix fallthrough in switch ++* net/e1000: fix link status crash in secondary process ++* net/e1000: fix use after free in filter flush ++* net/ena: revert redefining memcpy ++* net/gve/base: fix build with Fedora Rawhide ++* net/gve: add IO memory barriers before reading descriptors ++* net/gve: always attempt Rx refill on DQ ++* net/gve: fix Tx for chained mbuf ++* net/gve: fix mbuf allocation memory leak for DQ Rx ++* net/gve: fix queue setup and stop ++* net/gve: fix refill logic causing memory corruption ++* net/hns3: fix dump counter of registers ++* net/hns3: fix error code for repeatedly create counter ++* net/hns3: fix fully use hardware flow director table ++* net/hns3: register VLAN flow match mode parameter ++* net/hns3: remove ROH devices ++* net/hns3: remove some basic address dump ++* net/hns3: restrict tunnel flow rule to one header ++* net/hns3: verify reset type from firmware ++* net/i40e/base: add missing X710TL device check ++* net/i40e/base: fix DDP loading with reserved track ID ++* net/i40e/base: fix blinking X722 with X557 PHY ++* net/i40e/base: fix loop bounds ++* net/i40e/base: fix misleading debug logs and comments ++* net/i40e/base: fix repeated register dumps ++* net/i40e/base: fix setting flags in init function ++* net/i40e/base: fix unchecked return value ++* net/i40e: check register read for outer VLAN ++* net/i40e: fix AVX-512 pointer copy on 32-bit ++* net/iavf: add segment-length check to Tx prep ++* net/iavf: delay VF reset command ++* net/iavf: fix AVX-512 pointer copy on 32-bit ++* net/iavf: fix crash when link is unstable ++* net/iavf: preserve MAC address with i40e PF Linux driver ++* net/ice/base: add bounds check ++* net/ice/base: fix VLAN replay after reset ++* net/ice/base: fix iteration of TLVs in Preserved Fields Area ++* net/ice/base: fix link speed for 200G ++* net/ice: detect stopping a flow director queue twice ++* net/ice: fix AVX-512 pointer copy on 32-bit ++* net/igc: fix Rx buffers when timestamping enabled ++* net/ionic: fix build with Fedora Rawhide ++* net/ixgbe/base: fix unchecked return value ++* net/ixgbe: fix link status delay on FreeBSD ++* net/mana: support rdma-core via pkg-config ++* net/memif: fix buffer overflow in zero copy Rx ++* net/mlx5/hws: fix allocation of STCs ++* net/mlx5/hws: fix flex item as tunnel header ++* net/mlx5/hws: fix range definer error recovery ++* net/mlx5: add flex item query for tunnel mode ++* net/mlx5: fix GRE flow item translation for root table ++* net/mlx5: fix Rx queue control management ++* net/mlx5: fix Rx queue reference count in flushing flows ++* net/mlx5: fix SQ flow item size ++* net/mlx5: fix SWS meter state initialization ++* net/mlx5: fix Tx tracing to use single clock source ++* net/mlx5: fix counter query loop getting stuck ++* net/mlx5: fix default RSS flows creation order ++* net/mlx5: fix flex item header length field translation ++* net/mlx5: fix flex item tunnel mode ++* net/mlx5: fix indirect list flow action callback invocation ++* net/mlx5: fix memory leak in metering ++* net/mlx5: fix miniCQEs number calculation ++* net/mlx5: fix next protocol validation after flex item ++* net/mlx5: fix non full word sample fields in flex item ++* net/mlx5: fix non-template flow action validation ++* net/mlx5: fix number of supported flex parsers ++* net/mlx5: fix real time counter reading from PCI BAR ++* net/mlx5: fix reported Rx/Tx descriptor limits ++* net/mlx5: fix shared Rx queue control release ++* net/mlx5: fix shared queue port number in vector Rx ++* net/mlx5: fix trace script for multiple burst completion ++* net/mlx5: workaround list management of Rx queue control ++* net/mvneta: fix possible out-of-bounds write ++* net/netvsc: fix using Tx queue higher than Rx queues ++* net/netvsc: force Tx VLAN offload on 801.2Q packet ++* net/nfb: fix use after free ++* net/nfp: do not set IPv6 flag in transport mode ++* net/nfp: fix double free in flow destroy ++* net/nfp: fix link change return value ++* net/nfp: fix pause frame setting check ++* net/nfp: fix representor port link status update ++* net/nfp: fix type declaration of some variables ++* net/nfp: notify flower firmware about PF speed ++* net/ngbe: fix driver load bit to inform firmware ++* net/ngbe: fix interrupt lost in legacy or MSI mode ++* net/ngbe: reconfigure more MAC Rx registers ++* net/ngbe: restrict configuration of VLAN strip offload ++* net/pcap: fix blocking Rx ++* net/pcap: set live interface as non-blocking ++* net/sfc: fix use after free in debug logs ++* net/tap: avoid memcpy with null argument ++* net/tap: restrict maximum number of MP FDs ++* net/txgbe: fix SWFW mbox ++* net/txgbe: fix VF-PF mbox interrupt ++* net/txgbe: fix a mass of interrupts ++* net/txgbe: fix driver load bit to inform firmware ++* net/txgbe: remove outer UDP checksum capability ++* net/virtio-user: reset used index counter ++* net/virtio: fix Rx checksum calculation ++* net/vmxnet3: fix crash after configuration failure ++* net/vmxnet3: fix potential out of bounds stats access ++* net/vmxnet3: support larger MTU with version 6 ++* pcapng: avoid potential unaligned data ++* pcapng: fix handling of chained mbufs ++* power: enable CPPC ++* power: fix log message when checking lcore ID ++* power: fix mapped lcore ID ++* raw/ifpga/base: fix use after free ++* raw/ifpga: fix free function mismatch in interrupt config ++* rcu: fix implicit conversion in bit shift ++* test/bonding: fix MAC address comparison ++* test/bonding: fix loop on members ++* test/bonding: remove redundant info query ++* test/crypto: fix synchronous API calls ++* test/eal: fix lcore check ++* test/eal: fix loop coverage for alignment macros ++* test/event: avoid duplicate initialization ++* test/event: fix schedule type ++* test/event: fix target event queue ++* test/security: fix IPv6 extension loop ++* vdpa/nfp: fix hardware initialization ++* vdpa/nfp: fix reconfiguration ++* vdpa: update used flags in used ring relay ++* version: 23.11.3-rc1 ++* vhost: fix deadlock in Rx async path ++* vhost: fix offset while mapping log base address ++* vhost: restrict set max queue pair API to VDUSE ++ ++23.11.3 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* RedHat Testing: ++ ++ * Test scenarios: ++ ++ * VM with device assignment(PF) throughput testing(1G hugepage size) ++ * VM with device assignment(PF) throughput testing(2M hugepage size) ++ * VM with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q - cross numa node throughput testing ++ * VM with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server(qemu reconnect) ++ * vhost-user reconnect with dpdk-client, qemu-server(ovs reconnect) ++ * PVP reconnect with dpdk-client, qemu-server ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * VM with ovs+dpdk+vhost-user 1Q live migration testing ++ * VM with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * VM with ovs+dpdk+vhost-user 2Q live migration testing ++ * VM with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ * Test Versions and device: ++ ++ * RHEL 9.4 ++ * qemu-kvm-8.2.0 ++ * kernel 5.14 ++ * libvirt 10.0 ++ * openvswitch 3.1 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++* Nvidia(R) Testing: ++ ++ * Test scenarios: ++ ++ * Send and receive multiple types of traffic. ++ * testpmd xstats counter test. ++ * testpmd timestamp test. ++ * Changing/checking link status through testpmd. ++ * rte_flow tests (https://doc.dpdk.org/guides/nics/mlx5.html#supported-hardware-offloads) ++ * RSS tests. ++ * VLAN filtering, stripping, and insertion tests. ++ * Checksum and TSO tests. ++ * ptype tests. ++ * link_status_interrupt example application tests. ++ * l3fwd-power example application tests. ++ * Multi-process example applications tests. ++ * Hardware LRO tests. ++ * Buffer Split tests. ++ * Tx scheduling tests. ++ ++ * Test platform: ++ ++ * NIC: ConnectX-6 Dx / OS: Ubuntu 22.04 / Driver: MLNX_OFED_LINUX-24.10-1.1.4.0 / Firmware: 22.43.2026 ++ * NIC: ConnectX-7 / OS: Ubuntu 22.04 / Driver: MLNX_OFED_LINUX-24.10-1.1.4.0 / Firmware: 28.43.2026 ++ * DPU: BlueField-2 / DOCA SW version: 2.9.1 / Firmware: 24.43.2026 ++ ++ * OS/driver combinations: ++ ++ * Debian 12 with MLNX_OFED_LINUX-24.10-1.1.4.0. ++ * Ubuntu 22.04 with MLNX_OFED_LINUX-24.10-1.1.4.0. ++ * Ubuntu 24.04 with MLNX_OFED_LINUX-24.10-1.1.4.0. ++ * Ubuntu 24.04 with rdma-core v50.0. ++ * Fedora 40 with rdma-core v48.0. ++ * Fedora 42 (Rawhide) with rdma-core v51.0. ++ * OpenSUSE Leap 15.6 with rdma-core v49.1. ++ ++* Intel(R) Testing: ++ ++ * Basic NIC testing ++ ++ * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu24.10, Ubuntu22.04, Fedora40, RHEL8.10, RHEL9.4, FreeBSD14.1, SUSE15, openEuler22.03-SP2, OpenAnolis8.9 etc. ++ * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * PF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. ++ * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc. ++ * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc. ++ ++ * Basic cryptodev and virtio testing ++ ++ * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc. ++ * Cryptodev Function test: Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. ++ * Cryptodev Performance test: test scenarios including Throughput Performance/Cryptodev Latency, etc. ++ ++23.11.3 Known Issues ++~~~~~~~~~~~~~~~~~~~~ ++ ++* Start dpdk-pdump in VM with virtio-0.95 protocol failed ++ ++ Fix available in upstream. ++ ++* Failed to add vdev when launch dpdk-pdump with vdev secondary process ++ ++ Fix available in upstream. diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst index ce49eab96f..7ff304d05c 100644 --- a/dpdk/doc/guides/sample_app_ug/l2_forward_crypto.rst @@ -17412,9 +18874,18 @@ index fc36bfb30c..3fcc2c9894 100644 --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a \ --enable_vector --vector_size 512 diff --git a/dpdk/drivers/baseband/acc/acc_common.h b/dpdk/drivers/baseband/acc/acc_common.h -index bda2ad2f7a..6752c256d2 100644 +index bda2ad2f7a..13f7ec40e6 100644 --- a/dpdk/drivers/baseband/acc/acc_common.h +++ b/dpdk/drivers/baseband/acc/acc_common.h +@@ -787,7 +787,7 @@ alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc_device *d, + sw_rings_base, ACC_SIZE_64MBYTE); + next_64mb_align_addr_iova = sw_rings_base_iova + + next_64mb_align_offset; +- sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size; ++ sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size - 1; + + /* Check if the end of the sw ring memory block is before the + * start of next 64MB aligned mem address @@ -1110,6 +1110,9 @@ acc_dma_enqueue(struct acc_queue *q, uint16_t n, req_elem_addr, (void *)q->mmio_reg_enqueue); @@ -17434,57 +18905,736 @@ index bda2ad2f7a..6752c256d2 100644 n -= enq_batch_size; } while (n); -diff --git a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c -index bb754a5395..1a56e73abd 100644 ---- a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c -+++ b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c -@@ -1084,6 +1084,9 @@ la12xx_bbdev_remove(struct rte_vdev_device *vdev) +diff --git a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c +index 292537e24d..3e135c480d 100644 +--- a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c ++++ b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c +@@ -230,7 +230,7 @@ fetch_acc100_config(struct rte_bbdev *dev) + } + + rte_bbdev_log_debug( +- "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u\n", ++ "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u AQ %u %u %u %u Len %u %u %u %u", + (d->pf_device) ? "PF" : "VF", + (acc_conf->input_pos_llr_1_bit) ? "POS" : "NEG", + (acc_conf->output_pos_llr_1_bit) ? "POS" : "NEG", +@@ -838,51 +838,15 @@ free_q: + return ret; + } - PMD_INIT_FUNC_TRACE(); +-static inline void +-acc100_print_op(struct rte_bbdev_dec_op *op, enum rte_bbdev_op_type op_type, +- uint16_t index) +-{ +- if (op == NULL) +- return; +- if (op_type == RTE_BBDEV_OP_LDPC_DEC) +- rte_bbdev_log(DEBUG, +- " Op 5GUL %d %d %d %d %d %d %d %d %d %d %d %d", +- index, +- op->ldpc_dec.basegraph, op->ldpc_dec.z_c, +- op->ldpc_dec.n_cb, op->ldpc_dec.q_m, +- op->ldpc_dec.n_filler, op->ldpc_dec.cb_params.e, +- op->ldpc_dec.op_flags, op->ldpc_dec.rv_index, +- op->ldpc_dec.iter_max, op->ldpc_dec.iter_count, +- op->ldpc_dec.harq_combined_input.length +- ); +- else if (op_type == RTE_BBDEV_OP_LDPC_ENC) { +- struct rte_bbdev_enc_op *op_dl = (struct rte_bbdev_enc_op *) op; +- rte_bbdev_log(DEBUG, +- " Op 5GDL %d %d %d %d %d %d %d %d %d", +- index, +- op_dl->ldpc_enc.basegraph, op_dl->ldpc_enc.z_c, +- op_dl->ldpc_enc.n_cb, op_dl->ldpc_enc.q_m, +- op_dl->ldpc_enc.n_filler, op_dl->ldpc_enc.cb_params.e, +- op_dl->ldpc_enc.op_flags, op_dl->ldpc_enc.rv_index +- ); +- } +-} +- + static int + acc100_queue_stop(struct rte_bbdev *dev, uint16_t queue_id) + { + struct acc_queue *q; +- struct rte_bbdev_dec_op *op; +- uint16_t i; -+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) -+ return 0; -+ - if (vdev == NULL) + q = dev->data->queues[queue_id].queue_private; + rte_bbdev_log(INFO, "Queue Stop %d H/T/D %d %d %x OpType %d", + queue_id, q->sw_ring_head, q->sw_ring_tail, + q->sw_ring_depth, q->op_type); +- for (i = 0; i < q->sw_ring_depth; ++i) { +- op = (q->ring_addr + i)->req.op_addr; +- acc100_print_op(op, q->op_type, i); +- } + /* ignore all operations in flight and clear counters */ + q->sw_ring_tail = q->sw_ring_head; + q->aq_enqueued = 0; +@@ -1229,7 +1193,7 @@ acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, + harq_in_length = RTE_ALIGN_FLOOR(harq_in_length, ACC100_HARQ_ALIGN_COMP); + + if ((harq_layout[harq_index].offset > 0) && harq_prun) { +- rte_bbdev_log_debug("HARQ IN offset unexpected for now\n"); ++ rte_bbdev_log_debug("HARQ IN offset unexpected for now"); + fcw->hcin_size0 = harq_layout[harq_index].size0; + fcw->hcin_offset = harq_layout[harq_index].offset; + fcw->hcin_size1 = harq_in_length - harq_layout[harq_index].offset; +@@ -2890,7 +2854,7 @@ harq_loopback(struct acc_queue *q, struct rte_bbdev_dec_op *op, + uint32_t harq_index; + + if (harq_in_length == 0) { +- rte_bbdev_log(ERR, "Loopback of invalid null size\n"); ++ rte_bbdev_log(ERR, "Loopback of invalid null size"); return -EINVAL; + } -diff --git a/dpdk/drivers/bus/dpaa/base/qbman/process.c b/dpdk/drivers/bus/dpaa/base/qbman/process.c -index 3504ec97db..3e4622f606 100644 ---- a/dpdk/drivers/bus/dpaa/base/qbman/process.c -+++ b/dpdk/drivers/bus/dpaa/base/qbman/process.c -@@ -1,7 +1,7 @@ - /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) - * - * Copyright 2011-2016 Freescale Semiconductor Inc. -- * Copyright 2017,2020 NXP -+ * Copyright 2017,2020,2022,2024 NXP - * - */ - #include -@@ -27,15 +27,16 @@ static int check_fd(void) - { - int ret; +@@ -2928,7 +2892,7 @@ harq_loopback(struct acc_queue *q, struct rte_bbdev_dec_op *op, + fcw->hcin_en = 1; + fcw->hcout_en = 1; + +- rte_bbdev_log(DEBUG, "Loopback IN %d Index %d offset %d length %d %d\n", ++ rte_bbdev_log(DEBUG, "Loopback IN %d Index %d offset %d length %d %d", + ddr_mem_in, harq_index, + harq_layout[harq_index].offset, harq_in_length, + harq_dma_length_in); +@@ -2944,7 +2908,7 @@ harq_loopback(struct acc_queue *q, struct rte_bbdev_dec_op *op, + fcw->hcin_size0 = harq_in_length; + } + harq_layout[harq_index].val = 0; +- rte_bbdev_log(DEBUG, "Loopback FCW Config %d %d %d\n", ++ rte_bbdev_log(DEBUG, "Loopback FCW Config %d %d %d", + fcw->hcin_size0, fcw->hcin_offset, fcw->hcin_size1); + fcw->hcout_size0 = harq_in_length; + fcw->hcin_decomp_mode = h_comp; +@@ -3691,7 +3655,7 @@ acc100_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data, + + if (i > 0) + same_op = cmp_ldpc_dec_op(&ops[i-1]); +- rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n", ++ rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d", + i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index, + ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count, + ops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c, +@@ -3808,7 +3772,7 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, + return -1; -- if (fd >= 0) -- return 0; - ret = pthread_mutex_lock(&fd_init_lock); - assert(!ret); -+ - /* check again with the lock held */ - if (fd < 0) - fd = open(PROCESS_PATH, O_RDWR); -+ - ret = pthread_mutex_unlock(&fd_init_lock); - assert(!ret); -+ - return (fd >= 0) ? 0 : -ENODEV; - } + rsp.val = atom_desc.rsp.val; +- rte_bbdev_log_debug("Resp. desc %p: %x num %d\n", desc, rsp.val, desc->req.numCBs); ++ rte_bbdev_log_debug("Resp. desc %p: %x num %d", desc, rsp.val, desc->req.numCBs); + + /* Dequeue */ + op = desc->req.op_addr; +@@ -3885,7 +3849,7 @@ dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, + __ATOMIC_RELAXED); + rsp.val = atom_desc.rsp.val; +- rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d\n", ++ rte_bbdev_log_debug("Resp. desc %p: %x descs %d cbs %d", + desc, rsp.val, descs_in_tb, desc->req.numCBs); + + op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); +@@ -3981,7 +3945,7 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, + return -1; -diff --git a/dpdk/drivers/bus/dpaa/dpaa_bus.c b/dpdk/drivers/bus/dpaa/dpaa_bus.c -index e57159f5d8..aaf2a5f43e 100644 ---- a/dpdk/drivers/bus/dpaa/dpaa_bus.c -+++ b/dpdk/drivers/bus/dpaa/dpaa_bus.c -@@ -187,6 +187,7 @@ dpaa_create_device_list(void) + rsp.val = atom_desc.rsp.val; +- rte_bbdev_log_debug("Resp. desc %p: %x\n", desc, rsp.val); ++ rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val); + + /* Dequeue */ + op = desc->req.op_addr; +@@ -4060,7 +4024,7 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, + atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, + __ATOMIC_RELAXED); + rsp.val = atom_desc.rsp.val; +- rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d\n", ++ rte_bbdev_log_debug("Resp. desc %p: %x r %d c %d", + desc, rsp.val, cb_idx, cbs_in_tb); + + op->status |= ((rsp.input_err) ? (1 << RTE_BBDEV_DATA_ERROR) : 0); +@@ -4797,7 +4761,7 @@ acc100_configure(const char *dev_name, struct rte_acc_conf *conf) + } + + if (aram_address > ACC100_WORDS_IN_ARAM_SIZE) { +- rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d\n", ++ rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d", + aram_address, ACC100_WORDS_IN_ARAM_SIZE); + return -EINVAL; + } +diff --git a/dpdk/drivers/baseband/acc/rte_vrb_pmd.c b/dpdk/drivers/baseband/acc/rte_vrb_pmd.c +index 686e086a5c..4979bb8cec 100644 +--- a/dpdk/drivers/baseband/acc/rte_vrb_pmd.c ++++ b/dpdk/drivers/baseband/acc/rte_vrb_pmd.c +@@ -348,7 +348,7 @@ fetch_acc_config(struct rte_bbdev *dev) + } + + rte_bbdev_log_debug( +- "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u %u %u AQ %u %u %u %u %u %u Len %u %u %u %u %u %u\n", ++ "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u %u %u AQ %u %u %u %u %u %u Len %u %u %u %u %u %u", + (d->pf_device) ? "PF" : "VF", + (acc_conf->input_pos_llr_1_bit) ? "POS" : "NEG", + (acc_conf->output_pos_llr_1_bit) ? "POS" : "NEG", +@@ -464,7 +464,7 @@ vrb_dev_interrupt_handler(void *cb_arg) + } + } else { + rte_bbdev_log_debug( +- "VRB VF Interrupt received, Info Ring data: 0x%x\n", ++ "VRB VF Interrupt received, Info Ring data: 0x%x", + ring_data->val); + switch (int_nb) { + case ACC_VF_INT_DMA_DL_DESC_IRQ: +@@ -698,7 +698,7 @@ vrb_intr_enable(struct rte_bbdev *dev) + + if (d->device_variant == VRB1_VARIANT) { + /* On VRB1: cannot enable MSI/IR to avoid potential back-pressure corner case. */ +- rte_bbdev_log(ERR, "VRB1 (%s) doesn't support any MSI/MSI-X interrupt\n", ++ rte_bbdev_log(ERR, "VRB1 (%s) doesn't support any MSI/MSI-X interrupt", + dev->data->name); + return -ENOTSUP; + } +@@ -800,7 +800,7 @@ vrb_intr_enable(struct rte_bbdev *dev) + return 0; + } + +- rte_bbdev_log(ERR, "Device (%s) supports only VFIO MSI/MSI-X interrupts\n", ++ rte_bbdev_log(ERR, "Device (%s) supports only VFIO MSI/MSI-X interrupts", + dev->data->name); + return -ENOTSUP; + } +@@ -1023,7 +1023,7 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id, + d->queue_offset(d->pf_device, q->vf_id, q->qgrp_id, q->aq_id)); + + rte_bbdev_log_debug( +- "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p base %p\n", ++ "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p base %p", + dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id, + q->aq_id, q->aq_depth, q->mmio_reg_enqueue, + d->mmio_base); +@@ -1047,58 +1047,16 @@ free_q: + return ret; + } + +-static inline void +-vrb_print_op(struct rte_bbdev_dec_op *op, enum rte_bbdev_op_type op_type, +- uint16_t index) +-{ +- if (op == NULL) +- return; +- if (op_type == RTE_BBDEV_OP_LDPC_DEC) +- rte_bbdev_log(INFO, +- " Op 5GUL %d %d %d %d %d %d %d %d %d %d %d %d", +- index, +- op->ldpc_dec.basegraph, op->ldpc_dec.z_c, +- op->ldpc_dec.n_cb, op->ldpc_dec.q_m, +- op->ldpc_dec.n_filler, op->ldpc_dec.cb_params.e, +- op->ldpc_dec.op_flags, op->ldpc_dec.rv_index, +- op->ldpc_dec.iter_max, op->ldpc_dec.iter_count, +- op->ldpc_dec.harq_combined_input.length +- ); +- else if (op_type == RTE_BBDEV_OP_LDPC_ENC) { +- struct rte_bbdev_enc_op *op_dl = (struct rte_bbdev_enc_op *) op; +- rte_bbdev_log(INFO, +- " Op 5GDL %d %d %d %d %d %d %d %d %d", +- index, +- op_dl->ldpc_enc.basegraph, op_dl->ldpc_enc.z_c, +- op_dl->ldpc_enc.n_cb, op_dl->ldpc_enc.q_m, +- op_dl->ldpc_enc.n_filler, op_dl->ldpc_enc.cb_params.e, +- op_dl->ldpc_enc.op_flags, op_dl->ldpc_enc.rv_index +- ); +- } else if (op_type == RTE_BBDEV_OP_MLDTS) { +- struct rte_bbdev_mldts_op *op_mldts = (struct rte_bbdev_mldts_op *) op; +- rte_bbdev_log(INFO, " Op MLD %d RBs %d NL %d Rp %d %d %x\n", +- index, +- op_mldts->mldts.num_rbs, op_mldts->mldts.num_layers, +- op_mldts->mldts.r_rep, +- op_mldts->mldts.c_rep, op_mldts->mldts.op_flags); +- } +-} +- + /* Stop queue and clear counters. */ + static int + vrb_queue_stop(struct rte_bbdev *dev, uint16_t queue_id) + { + struct acc_queue *q; +- struct rte_bbdev_dec_op *op; +- uint16_t i; ++ + q = dev->data->queues[queue_id].queue_private; + rte_bbdev_log(INFO, "Queue Stop %d H/T/D %d %d %x OpType %d", + queue_id, q->sw_ring_head, q->sw_ring_tail, + q->sw_ring_depth, q->op_type); +- for (i = 0; i < q->sw_ring_depth; ++i) { +- op = (q->ring_addr + i)->req.op_addr; +- vrb_print_op(op, q->op_type, i); +- } + /* ignore all operations in flight and clear counters */ + q->sw_ring_tail = q->sw_ring_head; + q->aq_enqueued = 0; +@@ -1312,7 +1270,6 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) + RTE_BBDEV_LDPC_HARQ_4BIT_COMPRESSION | + RTE_BBDEV_LDPC_LLR_COMPRESSION | + RTE_BBDEV_LDPC_SOFT_OUT_ENABLE | +- RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS | + RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS | + RTE_BBDEV_LDPC_DEC_INTERRUPTS, + .llr_size = 8, +@@ -1626,18 +1583,18 @@ vrb_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, + fcw->so_en = check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_SOFT_OUT_ENABLE); + fcw->so_bypass_intlv = check_bit(op->ldpc_dec.op_flags, + RTE_BBDEV_LDPC_SOFT_OUT_DEINTERLEAVER_BYPASS); +- fcw->so_bypass_rm = check_bit(op->ldpc_dec.op_flags, +- RTE_BBDEV_LDPC_SOFT_OUT_RM_BYPASS); ++ fcw->so_bypass_rm = 0; + fcw->minsum_offset = 1; + fcw->dec_llrclip = 2; + } + + /* +- * These are all implicitly set ++ * These are all implicitly set: + * fcw->synd_post = 0; + * fcw->dec_convllr = 0; + * fcw->hcout_convllr = 0; + * fcw->hcout_size1 = 0; ++ * fcw->so_it = 0; + * fcw->hcout_offset = 0; + * fcw->negstop_th = 0; + * fcw->negstop_it = 0; +@@ -2492,7 +2449,7 @@ vrb_enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, + hq_output = op->ldpc_dec.harq_combined_output.data; + hq_len = op->ldpc_dec.harq_combined_output.length; + if (unlikely(!mbuf_append(hq_output_head, hq_output, hq_len))) { +- rte_bbdev_log(ERR, "HARQ output mbuf issue %d %d\n", ++ rte_bbdev_log(ERR, "HARQ output mbuf issue %d %d", + hq_output->buf_len, + hq_len); + return -1; +@@ -2985,7 +2942,7 @@ vrb_enqueue_ldpc_dec_cb(struct rte_bbdev_queue_data *q_data, + break; + } + avail -= 1; +- rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d\n", ++ rte_bbdev_log(INFO, "Op %d %d %d %d %d %d %d %d %d %d %d %d", + i, ops[i]->ldpc_dec.op_flags, ops[i]->ldpc_dec.rv_index, + ops[i]->ldpc_dec.iter_max, ops[i]->ldpc_dec.iter_count, + ops[i]->ldpc_dec.basegraph, ops[i]->ldpc_dec.z_c, +@@ -3319,7 +3276,7 @@ vrb_dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, + return -1; + + rsp.val = atom_desc.rsp.val; +- rte_bbdev_log_debug("Resp. desc %p: %x %x %x\n", desc, rsp.val, desc->rsp.add_info_0, ++ rte_bbdev_log_debug("Resp. desc %p: %x %x %x", desc, rsp.val, desc->rsp.add_info_0, + desc->rsp.add_info_1); + + /* Dequeue. */ +@@ -3440,7 +3397,7 @@ vrb_dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, + } + + if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK)) { +- rte_bbdev_log_debug("TB-CRC Check %x\n", tb_crc_check); ++ rte_bbdev_log_debug("TB-CRC Check %x", tb_crc_check); + if (tb_crc_check > 0) + op->status |= 1 << RTE_BBDEV_CRC_ERROR; + } +@@ -3985,7 +3942,7 @@ vrb2_check_mld_r_constraint(struct rte_bbdev_mldts_op *op) { + layer_idx = RTE_MIN(op->mldts.num_layers - VRB2_MLD_MIN_LAYER, + VRB2_MLD_MAX_LAYER - VRB2_MLD_MIN_LAYER); + rrep_idx = RTE_MIN(op->mldts.r_rep, VRB2_MLD_MAX_RREP); +- rte_bbdev_log_debug("RB %d index %d %d max %d\n", op->mldts.num_rbs, layer_idx, rrep_idx, ++ rte_bbdev_log_debug("RB %d index %d %d max %d", op->mldts.num_rbs, layer_idx, rrep_idx, + max_rb[layer_idx][rrep_idx]); + + return (op->mldts.num_rbs <= max_rb[layer_idx][rrep_idx]); +@@ -4650,7 +4607,7 @@ vrb1_configure(const char *dev_name, struct rte_acc_conf *conf) + } + + if (aram_address > VRB1_WORDS_IN_ARAM_SIZE) { +- rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d\n", ++ rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d", + aram_address, VRB1_WORDS_IN_ARAM_SIZE); + return -EINVAL; + } +@@ -5020,7 +4977,7 @@ vrb2_configure(const char *dev_name, struct rte_acc_conf *conf) + } + } + if (aram_address > VRB2_WORDS_IN_ARAM_SIZE) { +- rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d\n", ++ rte_bbdev_log(ERR, "ARAM Configuration not fitting %d %d", + aram_address, VRB2_WORDS_IN_ARAM_SIZE); + return -EINVAL; + } +diff --git a/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c +index 6b0644ffc5..d60cd3a5c5 100644 +--- a/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c ++++ b/dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c +@@ -1498,14 +1498,14 @@ fpga_mutex_acquisition(struct fpga_queue *q) + do { + if (cnt > 0) + usleep(FPGA_TIMEOUT_CHECK_INTERVAL); +- rte_bbdev_log_debug("Acquiring Mutex for %x\n", ++ rte_bbdev_log_debug("Acquiring Mutex for %x", + q->ddr_mutex_uuid); + fpga_reg_write_32(q->d->mmio_base, + FPGA_5GNR_FEC_MUTEX, + mutex_ctrl); + mutex_read = fpga_reg_read_32(q->d->mmio_base, + FPGA_5GNR_FEC_MUTEX); +- rte_bbdev_log_debug("Mutex %x cnt %d owner %x\n", ++ rte_bbdev_log_debug("Mutex %x cnt %d owner %x", + mutex_read, cnt, q->ddr_mutex_uuid); + cnt++; + } while ((mutex_read >> 16) != q->ddr_mutex_uuid); +@@ -1546,7 +1546,7 @@ fpga_harq_write_loopback(struct fpga_queue *q, + FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS); + if (reg_32 < harq_in_length) { + left_length = reg_32; +- rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n"); ++ rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size"); + } + + input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_input, +@@ -1609,18 +1609,18 @@ fpga_harq_read_loopback(struct fpga_queue *q, + FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS); + if (reg < harq_in_length) { + harq_in_length = reg; +- rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n"); ++ rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size"); + } + + if (!mbuf_append(harq_output, harq_output, harq_in_length)) { +- rte_bbdev_log(ERR, "HARQ output buffer warning %d %d\n", ++ rte_bbdev_log(ERR, "HARQ output buffer warning %d %d", + harq_output->buf_len - + rte_pktmbuf_headroom(harq_output), + harq_in_length); + harq_in_length = harq_output->buf_len - + rte_pktmbuf_headroom(harq_output); + if (!mbuf_append(harq_output, harq_output, harq_in_length)) { +- rte_bbdev_log(ERR, "HARQ output buffer issue %d %d\n", ++ rte_bbdev_log(ERR, "HARQ output buffer issue %d %d", + harq_output->buf_len, harq_in_length); + return -1; + } +@@ -1642,7 +1642,7 @@ fpga_harq_read_loopback(struct fpga_queue *q, + FPGA_5GNR_FEC_DDR4_RD_RDY_REGS); + if (reg == FPGA_DDR_OVERFLOW) { + rte_bbdev_log(ERR, +- "Read address is overflow!\n"); ++ "Read address is overflow!"); + return -1; + } + } +diff --git a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c +index bb754a5395..2432cdf884 100644 +--- a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c ++++ b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c +@@ -201,7 +201,7 @@ la12xx_e200_queue_setup(struct rte_bbdev *dev, + q_priv->la12xx_core_id = LA12XX_LDPC_DEC_CORE; + break; + default: +- rte_bbdev_log(ERR, "Unsupported op type\n"); ++ rte_bbdev_log(ERR, "Unsupported op type"); + return -1; + } + +@@ -269,7 +269,7 @@ la12xx_e200_queue_setup(struct rte_bbdev *dev, + ch->feca_blk_id = rte_cpu_to_be_32(priv->num_ldpc_dec_queues++); + break; + default: +- rte_bbdev_log(ERR, "Not supported op type\n"); ++ rte_bbdev_log(ERR, "Not supported op type"); + return -1; + } + ch->op_type = rte_cpu_to_be_32(q_priv->op_type); +@@ -789,6 +789,7 @@ setup_la12xx_dev(struct rte_bbdev *dev) + ipc_priv->hugepg_start.size = hp->len; + + rte_free(hp); ++ hp = NULL; + } + + dev_ipc = open_ipc_dev(priv->modem_id); +@@ -1084,6 +1085,9 @@ la12xx_bbdev_remove(struct rte_vdev_device *vdev) + + PMD_INIT_FUNC_TRACE(); + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ + if (vdev == NULL) + return -EINVAL; + +diff --git a/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c b/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c +index 8ddc7ff05f..a66dcd8962 100644 +--- a/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c ++++ b/dpdk/drivers/baseband/turbo_sw/bbdev_turbo_software.c +@@ -269,7 +269,7 @@ info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) + dev_info->num_queues[op_cap->type] = num_queue_per_type; + } + +- rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id); ++ rte_bbdev_log_debug("got device info from %u", dev->data->dev_id); + } + + /* Release queue */ +@@ -1951,7 +1951,7 @@ turbo_sw_bbdev_probe(struct rte_vdev_device *vdev) + parse_turbo_sw_params(&init_params, input_args); + + rte_bbdev_log_debug( +- "Initialising %s on NUMA node %d with max queues: %d\n", ++ "Initialising %s on NUMA node %d with max queues: %d", + name, init_params.socket_id, init_params.queues_num); + + return turbo_sw_bbdev_create(vdev, &init_params); +diff --git a/dpdk/drivers/bus/cdx/cdx_vfio.c b/dpdk/drivers/bus/cdx/cdx_vfio.c +index 79abc3f120..664f267471 100644 +--- a/dpdk/drivers/bus/cdx/cdx_vfio.c ++++ b/dpdk/drivers/bus/cdx/cdx_vfio.c +@@ -638,7 +638,7 @@ rte_cdx_vfio_bm_enable(struct rte_cdx_device *dev) + feature->flags |= VFIO_DEVICE_FEATURE_SET; + ret = ioctl(vfio_dev_fd, RTE_VFIO_DEVICE_FEATURE, feature); + if (ret) { +- CDX_BUS_ERR("Bus Master configuring not supported for device: %s, error: %d (%s)\n", ++ CDX_BUS_ERR("Bus Master configuring not supported for device: %s, error: %d (%s)", + dev->name, errno, strerror(errno)); + free(feature); + return ret; +@@ -648,7 +648,7 @@ rte_cdx_vfio_bm_enable(struct rte_cdx_device *dev) + vfio_bm_feature->op = VFIO_DEVICE_FEATURE_SET_MASTER; + ret = ioctl(vfio_dev_fd, RTE_VFIO_DEVICE_FEATURE, feature); + if (ret < 0) +- CDX_BUS_ERR("BM Enable Error for device: %s, Error: %d (%s)\n", ++ CDX_BUS_ERR("BM Enable Error for device: %s, Error: %d (%s)", + dev->name, errno, strerror(errno)); + + free(feature); +@@ -682,7 +682,7 @@ rte_cdx_vfio_bm_disable(struct rte_cdx_device *dev) + feature->flags |= VFIO_DEVICE_FEATURE_SET; + ret = ioctl(vfio_dev_fd, RTE_VFIO_DEVICE_FEATURE, feature); + if (ret) { +- CDX_BUS_ERR("Bus Master configuring not supported for device: %s, Error: %d (%s)\n", ++ CDX_BUS_ERR("Bus Master configuring not supported for device: %s, Error: %d (%s)", + dev->name, errno, strerror(errno)); + free(feature); + return ret; +@@ -692,7 +692,7 @@ rte_cdx_vfio_bm_disable(struct rte_cdx_device *dev) + vfio_bm_feature->op = VFIO_DEVICE_FEATURE_CLEAR_MASTER; + ret = ioctl(vfio_dev_fd, RTE_VFIO_DEVICE_FEATURE, feature); + if (ret < 0) +- CDX_BUS_ERR("BM Disable Error for device: %s, Error: %d (%s)\n", ++ CDX_BUS_ERR("BM Disable Error for device: %s, Error: %d (%s)", + dev->name, errno, strerror(errno)); + + free(feature); +diff --git a/dpdk/drivers/bus/dpaa/base/fman/fman.c b/dpdk/drivers/bus/dpaa/base/fman/fman.c +index 1814372a40..8263d42bed 100644 +--- a/dpdk/drivers/bus/dpaa/base/fman/fman.c ++++ b/dpdk/drivers/bus/dpaa/base/fman/fman.c +@@ -153,7 +153,7 @@ static void fman_if_vsp_init(struct __fman_if *__if) + size_t lenp; + const uint8_t mac_idx[] = {-1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1}; + +- if (__if->__if.mac_type == fman_mac_1g) { ++ if (__if->__if.mac_idx <= 8) { + for_each_compatible_node(dev, NULL, + "fsl,fman-port-1g-rx-extended-args") { + prop = of_get_property(dev, "cell-index", &lenp); +@@ -176,7 +176,32 @@ static void fman_if_vsp_init(struct __fman_if *__if) + } + } + } +- } else if (__if->__if.mac_type == fman_mac_10g) { ++ ++ for_each_compatible_node(dev, NULL, ++ "fsl,fman-port-op-extended-args") { ++ prop = of_get_property(dev, "cell-index", &lenp); ++ ++ if (prop) { ++ cell_index = of_read_number(&prop[0], ++ lenp / sizeof(phandle)); ++ ++ if (cell_index == __if->__if.mac_idx) { ++ prop = of_get_property(dev, ++ "vsp-window", ++ &lenp); ++ ++ if (prop) { ++ __if->__if.num_profiles = ++ of_read_number(&prop[0], ++ 1); ++ __if->__if.base_profile_id = ++ of_read_number(&prop[1], ++ 1); ++ } ++ } ++ } ++ } ++ } else { + for_each_compatible_node(dev, NULL, + "fsl,fman-port-10g-rx-extended-args") { + prop = of_get_property(dev, "cell-index", &lenp); +diff --git a/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c b/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c +index 24a99f7235..97e792806f 100644 +--- a/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c ++++ b/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c +@@ -243,10 +243,11 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n) + int i; + uint64_t base_offset = offsetof(struct memac_regs, reoct_l); + +- for (i = 0; i < n; i++) +- value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) | +- (u64)in_be32((char *)regs + base_offset + +- 8 * i + 4)) << 32); ++ for (i = 0; i < n; i++) { ++ uint64_t a = in_be32((char *)regs + base_offset + 8 * i); ++ uint64_t b = in_be32((char *)regs + base_offset + 8 * i + 4); ++ value[i] = a | b << 32; ++ } + } + + void +diff --git a/dpdk/drivers/bus/dpaa/base/qbman/process.c b/dpdk/drivers/bus/dpaa/base/qbman/process.c +index 3504ec97db..3e4622f606 100644 +--- a/dpdk/drivers/bus/dpaa/base/qbman/process.c ++++ b/dpdk/drivers/bus/dpaa/base/qbman/process.c +@@ -1,7 +1,7 @@ + /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2011-2016 Freescale Semiconductor Inc. +- * Copyright 2017,2020 NXP ++ * Copyright 2017,2020,2022,2024 NXP + * + */ + #include +@@ -27,15 +27,16 @@ static int check_fd(void) + { + int ret; + +- if (fd >= 0) +- return 0; + ret = pthread_mutex_lock(&fd_init_lock); + assert(!ret); ++ + /* check again with the lock held */ + if (fd < 0) + fd = open(PROCESS_PATH, O_RDWR); ++ + ret = pthread_mutex_unlock(&fd_init_lock); + assert(!ret); ++ + return (fd >= 0) ? 0 : -ENODEV; + } + +diff --git a/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/dpdk/drivers/bus/dpaa/base/qbman/qman.c +index 83db0a534e..3a1a843ba0 100644 +--- a/dpdk/drivers/bus/dpaa/base/qbman/qman.c ++++ b/dpdk/drivers/bus/dpaa/base/qbman/qman.c +@@ -294,10 +294,32 @@ static inline void qman_stop_dequeues_ex(struct qman_portal *p) + qm_dqrr_set_maxfill(&p->p, 0); + } + ++static inline void qm_mr_pvb_update(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); ++ ++#ifdef RTE_LIBRTE_DPAA_HWDEBUG ++ DPAA_ASSERT(mr->pmode == qm_mr_pvb); ++#endif ++ /* when accessing 'verb', use __raw_readb() to ensure that compiler ++ * inlining doesn't try to optimise out "excess reads". ++ */ ++ if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) { ++ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); ++ if (!mr->pi) ++ mr->vbit ^= QM_MR_VERB_VBIT; ++ mr->fill++; ++ res = MR_INC(res); ++ } ++ dcbit_ro(res); ++} ++ + static int drain_mr_fqrni(struct qm_portal *p) + { + const struct qm_mr_entry *msg; + loop: ++ qm_mr_pvb_update(p); + msg = qm_mr_current(p); + if (!msg) { + /* +@@ -319,6 +341,7 @@ loop: + do { + now = mfatb(); + } while ((then + 10000) > now); ++ qm_mr_pvb_update(p); + msg = qm_mr_current(p); + if (!msg) + return 0; +@@ -481,27 +504,6 @@ static inline int qm_mr_init(struct qm_portal *portal, + return 0; + } + +-static inline void qm_mr_pvb_update(struct qm_portal *portal) +-{ +- register struct qm_mr *mr = &portal->mr; +- const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); +- +-#ifdef RTE_LIBRTE_DPAA_HWDEBUG +- DPAA_ASSERT(mr->pmode == qm_mr_pvb); +-#endif +- /* when accessing 'verb', use __raw_readb() to ensure that compiler +- * inlining doesn't try to optimise out "excess reads". +- */ +- if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) { +- mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); +- if (!mr->pi) +- mr->vbit ^= QM_MR_VERB_VBIT; +- mr->fill++; +- res = MR_INC(res); +- } +- dcbit_ro(res); +-} +- + struct qman_portal * + qman_init_portal(struct qman_portal *portal, + const struct qm_portal_config *c, +@@ -1825,6 +1827,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) + } + out: + FQUNLOCK(fq); ++ /* Draining FQRNIs, if any */ ++ drain_mr_fqrni(&p->p); + return rval; + } + +@@ -2165,8 +2169,10 @@ int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags) + + if (!p->vdqcr_owned) { + FQLOCK(fq); +- if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) ++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) { ++ FQUNLOCK(fq); + goto escape; ++ } + fq_set(fq, QMAN_FQ_STATE_VDQCR); + FQUNLOCK(fq); + p->vdqcr_owned = fq; +@@ -2199,8 +2205,10 @@ int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, + + if (!p->vdqcr_owned) { + FQLOCK(fq); +- if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) ++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) { ++ FQUNLOCK(fq); + goto escape; ++ } + fq_set(fq, QMAN_FQ_STATE_VDQCR); + FQUNLOCK(fq); + p->vdqcr_owned = fq; +diff --git a/dpdk/drivers/bus/dpaa/dpaa_bus.c b/dpdk/drivers/bus/dpaa/dpaa_bus.c +index e57159f5d8..aaf2a5f43e 100644 +--- a/dpdk/drivers/bus/dpaa/dpaa_bus.c ++++ b/dpdk/drivers/bus/dpaa/dpaa_bus.c +@@ -187,6 +187,7 @@ dpaa_create_device_list(void) if (dev->intr_handle == NULL) { DPAA_BUS_LOG(ERR, "Failed to allocate intr handle"); ret = -ENOMEM; @@ -17528,21 +19678,327 @@ index e57159f5d8..aaf2a5f43e 100644 dev_name = dup + strlen("name="); if (start != NULL) { +diff --git a/dpdk/drivers/bus/dpaa/include/fman.h b/dpdk/drivers/bus/dpaa/include/fman.h +index 3a6dd555a7..19f6132bba 100644 +--- a/dpdk/drivers/bus/dpaa/include/fman.h ++++ b/dpdk/drivers/bus/dpaa/include/fman.h +@@ -403,7 +403,8 @@ extern int fman_ccsr_map_fd; + #define FMAN_ERR(rc, fmt, args...) \ + do { \ + _errno = (rc); \ +- DPAA_BUS_LOG(ERR, fmt "(%d)", ##args, errno); \ ++ rte_log(RTE_LOG_ERR, dpaa_logtype_bus, "dpaa: " fmt "(%d)\n", \ ++ ##args, errno); \ + } while (0) + + #define FMAN_IP_REV_1 0xC30C4 diff --git a/dpdk/drivers/bus/fslmc/fslmc_bus.c b/dpdk/drivers/bus/fslmc/fslmc_bus.c -index 57bfb5111a..89f0f329c0 100644 +index 57bfb5111a..adb452fd3e 100644 --- a/dpdk/drivers/bus/fslmc/fslmc_bus.c +++ b/dpdk/drivers/bus/fslmc/fslmc_bus.c -@@ -634,6 +634,10 @@ fslmc_bus_dev_iterate(const void *start, const char *str, +@@ -499,7 +499,7 @@ rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, + const struct rte_dpaa2_device *dstart; + struct rte_dpaa2_device *dev; + +- DPAA2_BUS_DEBUG("Finding a device named %s\n", (const char *)data); ++ DPAA2_BUS_DEBUG("Finding a device named %s", (const char *)data); + + /* find_device is always called with an opaque object which should be + * passed along to the 'cmp' function iterating over all device obj +@@ -514,7 +514,7 @@ rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, + } + while (dev != NULL) { + if (cmp(&dev->device, data) == 0) { +- DPAA2_BUS_DEBUG("Found device (%s)\n", ++ DPAA2_BUS_DEBUG("Found device (%s)", + dev->device.name); + return &dev->device; + } +@@ -628,12 +628,16 @@ fslmc_bus_dev_iterate(const void *start, const char *str, + + /* Expectation is that device would be name=device_name */ + if (strncmp(str, "name=", 5) != 0) { +- DPAA2_BUS_DEBUG("Invalid device string (%s)\n", str); ++ DPAA2_BUS_DEBUG("Invalid device string (%s)", str); + return NULL; + } /* Now that name=device_name format is available, split */ dup = strdup(str); + if (dup == NULL) { -+ DPAA2_BUS_DEBUG("Dup string (%s) failed!\n", str); ++ DPAA2_BUS_DEBUG("Dup string (%s) failed!", str); + return NULL; + } dev_name = dup + strlen("name="); if (start != NULL) { +diff --git a/dpdk/drivers/bus/fslmc/fslmc_vfio.c b/dpdk/drivers/bus/fslmc/fslmc_vfio.c +index 5966776a85..b90efeb651 100644 +--- a/dpdk/drivers/bus/fslmc/fslmc_vfio.c ++++ b/dpdk/drivers/bus/fslmc/fslmc_vfio.c +@@ -232,7 +232,7 @@ fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len, + + /* iova_addr may be set to RTE_BAD_IOVA */ + if (iova_addr == RTE_BAD_IOVA) { +- DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n"); ++ DPAA2_BUS_DEBUG("Segment has invalid iova, skipping"); + cur_len += map_len; + continue; + } +@@ -389,7 +389,7 @@ rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size) + dma_map.vaddr = vaddr; + dma_map.iova = iova; + +- DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64"\n", ++ DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64, + (uint64_t)dma_map.vaddr, (uint64_t)dma_map.iova, + (uint64_t)dma_map.size); + ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, +@@ -480,13 +480,13 @@ fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr, + ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status); + if (ret) { + DPAA2_BUS_ERR(" %s cannot get group status, " +- "error %i (%s)\n", dev_addr, ++ "error %i (%s)", dev_addr, + errno, strerror(errno)); + close(vfio_group_fd); + rte_vfio_clear_group(vfio_group_fd); + return -1; + } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) { +- DPAA2_BUS_ERR(" %s VFIO group is not viable!\n", dev_addr); ++ DPAA2_BUS_ERR(" %s VFIO group is not viable!", dev_addr); + close(vfio_group_fd); + rte_vfio_clear_group(vfio_group_fd); + return -1; +@@ -503,7 +503,7 @@ fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr, + &vfio_container_fd); + if (ret) { + DPAA2_BUS_ERR(" %s cannot add VFIO group to container, " +- "error %i (%s)\n", dev_addr, ++ "error %i (%s)", dev_addr, + errno, strerror(errno)); + close(vfio_group_fd); + close(vfio_container_fd); +diff --git a/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c +index 07256ed7ec..7e858a113f 100644 +--- a/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c ++++ b/dpdk/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c +@@ -86,7 +86,7 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!rxq->q_storage) { +- DPAA2_BUS_ERR("q_storage allocation failed\n"); ++ DPAA2_BUS_ERR("q_storage allocation failed"); + ret = -ENOMEM; + goto err; + } +@@ -94,7 +94,7 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused, + memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t)); + ret = dpaa2_alloc_dq_storage(rxq->q_storage); + if (ret) { +- DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n"); ++ DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed"); + goto err; + } + } +diff --git a/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c b/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c +index eea06988ff..0e471ec3fd 100644 +--- a/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c ++++ b/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c +@@ -1,6 +1,6 @@ + /* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2015 Freescale Semiconductor, Inc. +- * Copyright 2018-2020 NXP ++ * Copyright 2018-2020,2022 NXP + */ + + #include "compat.h" +@@ -37,6 +37,7 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, + struct qbman_bp_query_rslt *r) + { + struct qbman_bp_query_desc *p; ++ struct qbman_bp_query_rslt *bp_query_rslt; + + /* Start the management command */ + p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s); +@@ -47,14 +48,16 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, + p->bpid = bpid; + + /* Complete the management command */ +- *r = *(struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_BP_QUERY); +- if (!r) { ++ bp_query_rslt = (struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s, ++ p, QBMAN_BP_QUERY); ++ if (!bp_query_rslt) { + pr_err("qbman: Query BPID %d failed, no response\n", + bpid); + return -EIO; + } + ++ *r = *bp_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY); + +@@ -202,20 +205,23 @@ int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, + struct qbman_fq_query_rslt *r) + { + struct qbman_fq_query_desc *p; ++ struct qbman_fq_query_rslt *fq_query_rslt; + + p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s); + if (!p) + return -EBUSY; + + p->fqid = fqid; +- *r = *(struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_FQ_QUERY); +- if (!r) { ++ fq_query_rslt = (struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s, ++ p, QBMAN_FQ_QUERY); ++ if (!fq_query_rslt) { + pr_err("qbman: Query FQID %d failed, no response\n", + fqid); + return -EIO; + } + ++ *r = *fq_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY); + +@@ -398,20 +404,23 @@ int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, + struct qbman_cgr_query_rslt *r) + { + struct qbman_cgr_query_desc *p; ++ struct qbman_cgr_query_rslt *cgr_query_rslt; + + p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s); + if (!p) + return -EBUSY; + + p->cgid = cgid; +- *r = *(struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_CGR_QUERY); +- if (!r) { ++ cgr_query_rslt = (struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s, ++ p, QBMAN_CGR_QUERY); ++ if (!cgr_query_rslt) { + pr_err("qbman: Query CGID %d failed, no response\n", + cgid); + return -EIO; + } + ++ *r = *cgr_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_CGR_QUERY); + +@@ -473,20 +482,23 @@ int qbman_cgr_wred_query(struct qbman_swp *s, uint32_t cgid, + struct qbman_wred_query_rslt *r) + { + struct qbman_cgr_query_desc *p; ++ struct qbman_wred_query_rslt *wred_query_rslt; + + p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s); + if (!p) + return -EBUSY; + + p->cgid = cgid; +- *r = *(struct qbman_wred_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_WRED_QUERY); +- if (!r) { ++ wred_query_rslt = (struct qbman_wred_query_rslt *)qbman_swp_mc_complete( ++ s, p, QBMAN_WRED_QUERY); ++ if (!wred_query_rslt) { + pr_err("qbman: Query CGID WRED %d failed, no response\n", + cgid); + return -EIO; + } + ++ *r = *wred_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WRED_QUERY); + +@@ -527,7 +539,7 @@ void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, + if (mn == 0) + *maxth = ma; + else +- *maxth = ((ma+256) * (1<<(mn-1))); ++ *maxth = ((uint64_t)(ma+256) * (1<<(mn-1))); + + if (step_s == 0) + *minth = *maxth - step_i; +@@ -630,6 +642,7 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, + struct qbman_wqchan_query_rslt *r) + { + struct qbman_wqchan_query_desc *p; ++ struct qbman_wqchan_query_rslt *wqchan_query_rslt; + + /* Start the management command */ + p = (struct qbman_wqchan_query_desc *)qbman_swp_mc_start(s); +@@ -640,14 +653,16 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, + p->chid = chanid; + + /* Complete the management command */ +- *r = *(struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_WQ_QUERY); +- if (!r) { ++ wqchan_query_rslt = (struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete( ++ s, p, QBMAN_WQ_QUERY); ++ if (!wqchan_query_rslt) { + pr_err("qbman: Query WQ Channel %d failed, no response\n", + chanid); + return -EIO; + } + ++ *r = *wqchan_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WQ_QUERY); + +diff --git a/dpdk/drivers/bus/ifpga/ifpga_bus.c b/dpdk/drivers/bus/ifpga/ifpga_bus.c +index ffb0c61214..11b31eee4f 100644 +--- a/dpdk/drivers/bus/ifpga/ifpga_bus.c ++++ b/dpdk/drivers/bus/ifpga/ifpga_bus.c +@@ -180,7 +180,7 @@ ifpga_scan_one(struct rte_rawdev *rawdev, + rawdev->dev_ops->firmware_load && + rawdev->dev_ops->firmware_load(rawdev, + &afu_pr_conf)){ +- IFPGA_BUS_ERR("firmware load error %d\n", ret); ++ IFPGA_BUS_ERR("firmware load error %d", ret); + goto end; + } + afu_dev->id.uuid.uuid_low = afu_pr_conf.afu_id.uuid.uuid_low; +@@ -316,7 +316,7 @@ ifpga_probe_all_drivers(struct rte_afu_device *afu_dev) + + /* Check if a driver is already loaded */ + if (rte_dev_is_probed(&afu_dev->device)) { +- IFPGA_BUS_DEBUG("Device %s is already probed\n", ++ IFPGA_BUS_DEBUG("Device %s is already probed", + rte_ifpga_device_name(afu_dev)); + return -EEXIST; + } +@@ -353,7 +353,7 @@ ifpga_probe(void) + if (ret == -EEXIST) + continue; + if (ret < 0) +- IFPGA_BUS_ERR("failed to initialize %s device\n", ++ IFPGA_BUS_ERR("failed to initialize %s device", + rte_ifpga_device_name(afu_dev)); + } + +@@ -408,7 +408,7 @@ ifpga_remove_driver(struct rte_afu_device *afu_dev) + + name = rte_ifpga_device_name(afu_dev); + if (afu_dev->driver == NULL) { +- IFPGA_BUS_DEBUG("no driver attach to device %s\n", name); ++ IFPGA_BUS_DEBUG("no driver attach to device %s", name); + return 1; + } + diff --git a/dpdk/drivers/bus/pci/linux/pci_uio.c b/dpdk/drivers/bus/pci/linux/pci_uio.c index 97d740dfe5..4afda97858 100644 --- a/dpdk/drivers/bus/pci/linux/pci_uio.c @@ -17749,7 +20205,7 @@ index 76c661f054..a06378b239 100644 - rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_UNKNOWN); } diff --git a/dpdk/drivers/bus/vdev/vdev.c b/dpdk/drivers/bus/vdev/vdev.c -index 7974b27295..dcedd0d4a0 100644 +index 7974b27295..ec7abe7cda 100644 --- a/dpdk/drivers/bus/vdev/vdev.c +++ b/dpdk/drivers/bus/vdev/vdev.c @@ -247,6 +247,10 @@ alloc_devargs(const char *name, const char *args) @@ -17763,43 +20219,15 @@ index 7974b27295..dcedd0d4a0 100644 devargs->args = devargs->data; ret = strlcpy(devargs->name, name, sizeof(devargs->name)); -@@ -259,6 +263,22 @@ alloc_devargs(const char *name, const char *args) - return devargs; - } +@@ -272,6 +276,7 @@ insert_vdev(const char *name, const char *args, + return -EINVAL; -+static struct rte_devargs * -+vdev_devargs_lookup(const char *name) -+{ -+ struct rte_devargs *devargs; -+ char dev_name[32]; -+ -+ RTE_EAL_DEVARGS_FOREACH("vdev", devargs) { -+ devargs->bus->parse(devargs->name, &dev_name); -+ if (strcmp(dev_name, name) == 0) { -+ VDEV_LOG(INFO, "devargs matched %s", dev_name); -+ return devargs; -+ } -+ } -+ return NULL; -+} -+ - static int - insert_vdev(const char *name, const char *args, - struct rte_vdev_device **p_dev, -@@ -271,7 +291,11 @@ insert_vdev(const char *name, const char *args, - if (name == NULL) - return -EINVAL; - -- devargs = alloc_devargs(name, args); -+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) -+ devargs = alloc_devargs(name, args); -+ else -+ devargs = vdev_devargs_lookup(name); + devargs = alloc_devargs(name, args); + if (!devargs) return -ENOMEM; -@@ -283,7 +307,6 @@ insert_vdev(const char *name, const char *args, +@@ -283,7 +288,6 @@ insert_vdev(const char *name, const char *args, dev->device.bus = &rte_vdev_bus; dev->device.numa_node = SOCKET_ID_ANY; @@ -17807,7 +20235,7 @@ index 7974b27295..dcedd0d4a0 100644 if (find_vdev(name)) { /* -@@ -298,6 +321,7 @@ insert_vdev(const char *name, const char *args, +@@ -298,6 +302,7 @@ insert_vdev(const char *name, const char *args, if (init) rte_devargs_insert(&devargs); dev->device.devargs = devargs; @@ -17815,6 +20243,32 @@ index 7974b27295..dcedd0d4a0 100644 TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); if (p_dev) +diff --git a/dpdk/drivers/bus/vdev/vdev_params.c b/dpdk/drivers/bus/vdev/vdev_params.c +index 51583fe949..68ae09e2e9 100644 +--- a/dpdk/drivers/bus/vdev/vdev_params.c ++++ b/dpdk/drivers/bus/vdev/vdev_params.c +@@ -53,7 +53,7 @@ rte_vdev_dev_iterate(const void *start, + if (str != NULL) { + kvargs = rte_kvargs_parse(str, vdev_params_keys); + if (kvargs == NULL) { +- VDEV_LOG(ERR, "cannot parse argument list\n"); ++ VDEV_LOG(ERR, "cannot parse argument list"); + rte_errno = EINVAL; + return NULL; + } +diff --git a/dpdk/drivers/bus/vmbus/vmbus_common.c b/dpdk/drivers/bus/vmbus/vmbus_common.c +index b9139c6e6c..8a965d10d9 100644 +--- a/dpdk/drivers/bus/vmbus/vmbus_common.c ++++ b/dpdk/drivers/bus/vmbus/vmbus_common.c +@@ -108,7 +108,7 @@ vmbus_probe_one_driver(struct rte_vmbus_driver *dr, + /* no initialization when marked as blocked, return without error */ + if (dev->device.devargs != NULL && + dev->device.devargs->policy == RTE_DEV_BLOCKED) { +- VMBUS_LOG(INFO, " Device is blocked, not initializing\n"); ++ VMBUS_LOG(INFO, " Device is blocked, not initializing"); + return 1; + } + diff --git a/dpdk/drivers/common/cnxk/cnxk_security.c b/dpdk/drivers/common/cnxk/cnxk_security.c index a8c3ba90cd..40685d0912 100644 --- a/dpdk/drivers/common/cnxk/cnxk_security.c @@ -18162,7 +20616,7 @@ index 981e85a204..4e23d8c135 100644 } diff --git a/dpdk/drivers/common/cnxk/roc_dev.c b/dpdk/drivers/common/cnxk/roc_dev.c -index e7e89bf3d6..14aff233d5 100644 +index e7e89bf3d6..793d78fdbc 100644 --- a/dpdk/drivers/common/cnxk/roc_dev.c +++ b/dpdk/drivers/common/cnxk/roc_dev.c @@ -198,9 +198,8 @@ af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg) @@ -18186,6 +20640,81 @@ index e7e89bf3d6..14aff233d5 100644 /* Send UP message to all VF's */ for (vf = 0; vf < vf_mbox->ndevs; vf++) { /* VF active */ +@@ -946,8 +947,8 @@ mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev) + RVU_VF_INT_VEC_MBOX); + } + +-static void +-mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev) ++void ++dev_mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev) + { + if (dev_is_vf(dev)) + mbox_unregister_vf_irq(pci_dev, dev); +@@ -1025,8 +1026,8 @@ roc_pf_vf_flr_irq(void *param) + } + } + +-static int +-vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev) ++void ++dev_vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev) + { + struct plt_intr_handle *intr_handle = pci_dev->intr_handle; + int i; +@@ -1042,8 +1043,6 @@ vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev) + + dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev, + RVU_PF_INT_VEC_VFFLR1); +- +- return 0; + } + + int +@@ -1492,7 +1491,7 @@ dev_init(struct dev *dev, struct plt_pci_device *pci_dev) + rc = plt_thread_create_control(&dev->sync.pfvf_msg_thread, name, + pf_vf_mbox_thread_main, dev); + if (rc != 0) { +- plt_err("Failed to create thread for VF mbox handling\n"); ++ plt_err("Failed to create thread for VF mbox handling"); + goto thread_fail; + } + } +@@ -1528,7 +1527,7 @@ thread_fail: + iounmap: + dev_vf_mbase_put(pci_dev, vf_mbase); + mbox_unregister: +- mbox_unregister_irq(pci_dev, dev); ++ dev_mbox_unregister_irq(pci_dev, dev); + if (dev->ops) + plt_free(dev->ops); + mbox_fini: +@@ -1564,10 +1563,10 @@ dev_fini(struct dev *dev, struct plt_pci_device *pci_dev) + if (dev->lmt_mz) + plt_memzone_free(dev->lmt_mz); + +- mbox_unregister_irq(pci_dev, dev); ++ dev_mbox_unregister_irq(pci_dev, dev); + + if (!dev_is_vf(dev)) +- vf_flr_unregister_irqs(pci_dev, dev); ++ dev_vf_flr_unregister_irqs(pci_dev, dev); + /* Release PF - VF */ + mbox = &dev->mbox_vfpf; + if (mbox->hwbase && mbox->dev) +diff --git a/dpdk/drivers/common/cnxk/roc_dev_priv.h b/dpdk/drivers/common/cnxk/roc_dev_priv.h +index 5b2c5096f8..f1fa498dc1 100644 +--- a/dpdk/drivers/common/cnxk/roc_dev_priv.h ++++ b/dpdk/drivers/common/cnxk/roc_dev_priv.h +@@ -128,6 +128,8 @@ int dev_irqs_disable(struct plt_intr_handle *intr_handle); + int dev_irq_reconfigure(struct plt_intr_handle *intr_handle, uint16_t max_intr); + + int dev_mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev); ++void dev_mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev); + int dev_vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev); ++void dev_vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev); + + #endif /* _ROC_DEV_PRIV_H */ diff --git a/dpdk/drivers/common/cnxk/roc_ie_on.h b/dpdk/drivers/common/cnxk/roc_ie_on.h index 9933ffa148..11c995e9d1 100644 --- a/dpdk/drivers/common/cnxk/roc_ie_on.h @@ -18257,6 +20786,30 @@ index 9933ffa148..11c995e9d1 100644 #define ROC_ONF_IPSEC_INB_MAX_L2_SZ 32UL #define ROC_ONF_IPSEC_OUTB_MAX_L2_SZ 30UL #define ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ (ROC_ONF_IPSEC_OUTB_MAX_L2_SZ + 2) +diff --git a/dpdk/drivers/common/cnxk/roc_ie_ot.c b/dpdk/drivers/common/cnxk/roc_ie_ot.c +index d0b7ad38f1..356bb8c5a5 100644 +--- a/dpdk/drivers/common/cnxk/roc_ie_ot.c ++++ b/dpdk/drivers/common/cnxk/roc_ie_ot.c +@@ -38,5 +38,6 @@ roc_ot_ipsec_outb_sa_init(struct roc_ot_ipsec_outb_sa *sa) + offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx); + sa->w0.s.ctx_push_size = (offset / ROC_CTX_UNIT_8B) + 1; + sa->w0.s.ctx_size = ROC_IE_OT_CTX_ILEN; ++ sa->w0.s.ctx_hdr_size = ROC_IE_OT_SA_CTX_HDR_SIZE; + sa->w0.s.aop_valid = 1; + } +diff --git a/dpdk/drivers/common/cnxk/roc_irq.c b/dpdk/drivers/common/cnxk/roc_irq.c +index a709c4047d..0b21b9e2d9 100644 +--- a/dpdk/drivers/common/cnxk/roc_irq.c ++++ b/dpdk/drivers/common/cnxk/roc_irq.c +@@ -15,7 +15,7 @@ + + #define MSIX_IRQ_SET_BUF_LEN \ + (sizeof(struct vfio_irq_set) + sizeof(int) * \ +- (plt_intr_max_intr_get(intr_handle))) ++ ((uint32_t)plt_intr_max_intr_get(intr_handle))) + + static int + irq_get_info(struct plt_intr_handle *intr_handle) diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h index 05434aec5a..7eaff0a0eb 100644 --- a/dpdk/drivers/common/cnxk/roc_mbox.h @@ -18294,6 +20847,19 @@ index 05434aec5a..7eaff0a0eb 100644 }; struct nix_lf_rx_ipec_cfg1_req { uint32_t __io spb_cpt_aura; +diff --git a/dpdk/drivers/common/cnxk/roc_model.c b/dpdk/drivers/common/cnxk/roc_model.c +index 6dc2afe7f0..446ab3d2bd 100644 +--- a/dpdk/drivers/common/cnxk/roc_model.c ++++ b/dpdk/drivers/common/cnxk/roc_model.c +@@ -153,7 +153,7 @@ cn10k_part_pass_get(uint32_t *part, uint32_t *pass) + + dir = opendir(SYSFS_PCI_DEVICES); + if (dir == NULL) { +- plt_err("%s(): opendir failed: %s\n", __func__, ++ plt_err("%s(): opendir failed: %s", __func__, + strerror(errno)); + return -errno; + } diff --git a/dpdk/drivers/common/cnxk/roc_nix.c b/dpdk/drivers/common/cnxk/roc_nix.c index f64933a1d9..afbc3eb901 100644 --- a/dpdk/drivers/common/cnxk/roc_nix.c @@ -18321,7 +20887,7 @@ index acdd1c4cbc..250d710c07 100644 #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2 diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl.c b/dpdk/drivers/common/cnxk/roc_nix_inl.c -index 750fd08355..bc9cc2f429 100644 +index 750fd08355..ba51ddd8c8 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_inl.c +++ b/dpdk/drivers/common/cnxk/roc_nix_inl.c @@ -620,8 +620,7 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags) @@ -18344,6 +20910,28 @@ index 750fd08355..bc9cc2f429 100644 if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) { nix->need_meta_aura = true; if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) +@@ -1669,6 +1669,7 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr, + struct nix_inl_dev *inl_dev = NULL; + struct roc_cpt_lf *outb_lf = NULL; + union cpt_lf_ctx_flush flush; ++ union cpt_lf_ctx_err err; + bool get_inl_lf = true; + uintptr_t rbase; + struct nix *nix; +@@ -1710,6 +1711,13 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr, + flush.s.cptr = ((uintptr_t)sa_cptr) >> 7; + plt_write64(flush.u, rbase + CPT_LF_CTX_FLUSH); + ++ plt_atomic_thread_fence(__ATOMIC_ACQ_REL); ++ ++ /* Read a CSR to ensure that the FLUSH operation is complete */ ++ err.u = plt_read64(rbase + CPT_LF_CTX_ERR); ++ ++ if (err.s.flush_st_flt) ++ plt_warn("CTX flush could not complete"); + return 0; + } + plt_nix_dbg("Could not get CPT LF for CTX write"); diff --git a/dpdk/drivers/common/cnxk/roc_nix_inl.h b/dpdk/drivers/common/cnxk/roc_nix_inl.h index ab1e9c0f98..f5ce26f03f 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_inl.h @@ -18421,6 +21009,128 @@ index ab1e9c0f98..f5ce26f03f 100644 /* Inline device SSO Work callback */ typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args, uint32_t soft_exp_event); +diff --git a/dpdk/drivers/common/cnxk/roc_nix_mac.c b/dpdk/drivers/common/cnxk/roc_nix_mac.c +index 2d1c29dd66..ce3fb034c5 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_mac.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_mac.c +@@ -91,11 +91,6 @@ roc_nix_mac_addr_set(struct roc_nix *roc_nix, const uint8_t addr[]) + goto exit; + } + +- if (dev_active_vfs(&nix->dev)) { +- rc = NIX_ERR_OP_NOTSUP; +- goto exit; +- } +- + req = mbox_alloc_msg_cgx_mac_addr_set(mbox); + if (req == NULL) + goto exit; +@@ -152,11 +147,6 @@ roc_nix_mac_addr_add(struct roc_nix *roc_nix, uint8_t addr[]) + goto exit; + } + +- if (dev_active_vfs(&nix->dev)) { +- rc = NIX_ERR_OP_NOTSUP; +- goto exit; +- } +- + req = mbox_alloc_msg_cgx_mac_addr_add(mbox); + mbox_memcpy(req->mac_addr, addr, PLT_ETHER_ADDR_LEN); + +diff --git a/dpdk/drivers/common/cnxk/roc_nix_ops.c b/dpdk/drivers/common/cnxk/roc_nix_ops.c +index 9e66ad1a49..efb0a41d07 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_ops.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_ops.c +@@ -220,7 +220,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + } + +- plt_nix_dbg("tcpv4 lso fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("tcpv4 lso fmt=%u", rsp->lso_format_idx); + + /* + * IPv6/TCP LSO +@@ -240,7 +240,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + } + +- plt_nix_dbg("tcpv6 lso fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("tcpv6 lso fmt=%u", rsp->lso_format_idx); + + /* + * IPv4/UDP/TUN HDR/IPv4/TCP LSO +@@ -256,7 +256,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + + nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V4V4] = rsp->lso_format_idx; +- plt_nix_dbg("udp tun v4v4 fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("udp tun v4v4 fmt=%u", rsp->lso_format_idx); + + /* + * IPv4/UDP/TUN HDR/IPv6/TCP LSO +@@ -272,7 +272,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + + nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V4V6] = rsp->lso_format_idx; +- plt_nix_dbg("udp tun v4v6 fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("udp tun v4v6 fmt=%u", rsp->lso_format_idx); + + /* + * IPv6/UDP/TUN HDR/IPv4/TCP LSO +@@ -288,7 +288,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + + nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V6V4] = rsp->lso_format_idx; +- plt_nix_dbg("udp tun v6v4 fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("udp tun v6v4 fmt=%u", rsp->lso_format_idx); + + /* + * IPv6/UDP/TUN HDR/IPv6/TCP LSO +@@ -304,7 +304,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + + nix->lso_udp_tun_idx[ROC_NIX_LSO_TUN_V6V6] = rsp->lso_format_idx; +- plt_nix_dbg("udp tun v6v6 fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("udp tun v6v6 fmt=%u", rsp->lso_format_idx); + + /* + * IPv4/TUN HDR/IPv4/TCP LSO +@@ -320,7 +320,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + + nix->lso_tun_idx[ROC_NIX_LSO_TUN_V4V4] = rsp->lso_format_idx; +- plt_nix_dbg("tun v4v4 fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("tun v4v4 fmt=%u", rsp->lso_format_idx); + + /* + * IPv4/TUN HDR/IPv6/TCP LSO +@@ -336,7 +336,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + + nix->lso_tun_idx[ROC_NIX_LSO_TUN_V4V6] = rsp->lso_format_idx; +- plt_nix_dbg("tun v4v6 fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("tun v4v6 fmt=%u", rsp->lso_format_idx); + + /* + * IPv6/TUN HDR/IPv4/TCP LSO +@@ -352,7 +352,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + + nix->lso_tun_idx[ROC_NIX_LSO_TUN_V6V4] = rsp->lso_format_idx; +- plt_nix_dbg("tun v6v4 fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("tun v6v4 fmt=%u", rsp->lso_format_idx); + + /* + * IPv6/TUN HDR/IPv6/TCP LSO +@@ -369,7 +369,7 @@ roc_nix_lso_fmt_setup(struct roc_nix *roc_nix) + goto exit; + + nix->lso_tun_idx[ROC_NIX_LSO_TUN_V6V6] = rsp->lso_format_idx; +- plt_nix_dbg("tun v6v6 fmt=%u\n", rsp->lso_format_idx); ++ plt_nix_dbg("tun v6v6 fmt=%u", rsp->lso_format_idx); + rc = 0; + exit: + mbox_put(mbox); diff --git a/dpdk/drivers/common/cnxk/roc_nix_rss.c b/dpdk/drivers/common/cnxk/roc_nix_rss.c index 3599eb9bae..2b88e1360d 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_rss.c @@ -18444,7 +21154,7 @@ index 3599eb9bae..2b88e1360d 100644 } diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm.c b/dpdk/drivers/common/cnxk/roc_nix_tm.c -index ece88b5e99..9e5e614b3b 100644 +index ece88b5e99..92401e04d0 100644 --- a/dpdk/drivers/common/cnxk/roc_nix_tm.c +++ b/dpdk/drivers/common/cnxk/roc_nix_tm.c @@ -328,6 +328,9 @@ nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc, @@ -18457,20 +21167,69 @@ index ece88b5e99..9e5e614b3b 100644 sq_s = nix->sqs[sq]; if (!sq_s) return -ENOENT; +@@ -903,7 +906,7 @@ nix_tm_sq_flush_pre(struct roc_nix_sq *sq) + if (rc) { + roc_nix_tm_dump(sq->roc_nix, NULL); + roc_nix_queues_ctx_dump(sq->roc_nix, NULL); +- plt_err("Failed to drain sq %u, rc=%d\n", sq->qid, rc); ++ plt_err("Failed to drain sq %u, rc=%d", sq->qid, rc); + return rc; + } + /* Freed all pending SQEs for this SQ, so disable this node */ +diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm_mark.c b/dpdk/drivers/common/cnxk/roc_nix_tm_mark.c +index e9a7604e79..092d0851b9 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_tm_mark.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_tm_mark.c +@@ -266,7 +266,7 @@ nix_tm_mark_init(struct nix *nix) + } + + nix->tm_markfmt[i][j] = rsp->mark_format_idx; +- plt_tm_dbg("Mark type: %u, Mark Color:%u, id:%u\n", i, ++ plt_tm_dbg("Mark type: %u, Mark Color:%u, id:%u", i, + j, nix->tm_markfmt[i][j]); + } + } +diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c b/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c +index e1cef7a670..c1b91ad92f 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_tm_ops.c +@@ -503,7 +503,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix) + /* Wait for sq entries to be flushed */ + rc = roc_nix_tm_sq_flush_spin(sq); + if (rc) { +- plt_err("Failed to drain sq, rc=%d\n", rc); ++ plt_err("Failed to drain sq, rc=%d", rc); + goto cleanup; + } + } +diff --git a/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c b/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c +index 8e3da95a45..4a09cc2aae 100644 +--- a/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c ++++ b/dpdk/drivers/common/cnxk/roc_nix_tm_utils.c +@@ -583,7 +583,7 @@ nix_tm_topology_reg_prep(struct nix *nix, struct nix_tm_node *node, + /* Configure TL4 to send to SDP channel instead of CGX/LBK */ + if (nix->sdp_link) { + relchan = nix->tx_chan_base & 0xff; +- plt_tm_dbg("relchan=%u schq=%u tx_chan_cnt=%u\n", relchan, schq, ++ plt_tm_dbg("relchan=%u schq=%u tx_chan_cnt=%u", relchan, schq, + nix->tx_chan_cnt); + reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq); + regval[k] = BIT_ULL(12); diff --git a/dpdk/drivers/common/cnxk/roc_npc.c b/dpdk/drivers/common/cnxk/roc_npc.c -index a0d88c0743..fcede1d0b7 100644 +index a0d88c0743..9ea96a524c 100644 --- a/dpdk/drivers/common/cnxk/roc_npc.c +++ b/dpdk/drivers/common/cnxk/roc_npc.c -@@ -351,6 +351,8 @@ roc_npc_fini(struct roc_npc *roc_npc) +@@ -351,6 +351,9 @@ roc_npc_fini(struct roc_npc *roc_npc) struct npc *npc = roc_npc_to_npc_priv(roc_npc); int rc; -+ npc_aging_ctrl_thread_destroy(roc_npc); ++ if (!roc_npc->flow_age.aged_flows_get_thread_exit) ++ npc_aging_ctrl_thread_destroy(roc_npc); + rc = npc_flow_free_all_resources(npc); if (rc) { plt_err("Error when deleting NPC MCAM entries, counters"); -@@ -1626,8 +1628,7 @@ roc_npc_flow_destroy(struct roc_npc *roc_npc, struct roc_npc_flow *flow) +@@ -1626,8 +1629,7 @@ roc_npc_flow_destroy(struct roc_npc *roc_npc, struct roc_npc_flow *flow) if (flow->has_age_action) npc_age_flow_list_entry_delete(roc_npc, flow); @@ -18594,7 +21353,7 @@ index ecd1b3e13b..3c288070fb 100644 if (rc != 0) return rc; diff --git a/dpdk/drivers/common/cnxk/roc_platform.c b/dpdk/drivers/common/cnxk/roc_platform.c -index 15cbb6d68f..80d81742a2 100644 +index 15cbb6d68f..c57dcbe731 100644 --- a/dpdk/drivers/common/cnxk/roc_platform.c +++ b/dpdk/drivers/common/cnxk/roc_platform.c @@ -85,15 +85,15 @@ roc_plt_init(void) @@ -18613,7 +21372,7 @@ index 15cbb6d68f..80d81742a2 100644 -RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_tm, NOTICE); -RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_dpi, NOTICE); -RTE_LOG_REGISTER_DEFAULT(cnxk_logtype_ree, NOTICE); -+RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_base, base, NOTICE); ++RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_base, base, INFO); +RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_mbox, mbox, NOTICE); +RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_cpt, crypto, NOTICE); +RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_ml, ml, NOTICE); @@ -18625,6 +21384,47 @@ index 15cbb6d68f..80d81742a2 100644 +RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_tm, tm, NOTICE); +RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_dpi, dpi, NOTICE); +RTE_LOG_REGISTER_SUFFIX(cnxk_logtype_ree, ree, NOTICE); +diff --git a/dpdk/drivers/common/cnxk/roc_sso.c b/dpdk/drivers/common/cnxk/roc_sso.c +index 748d287bad..14cdf14554 100644 +--- a/dpdk/drivers/common/cnxk/roc_sso.c ++++ b/dpdk/drivers/common/cnxk/roc_sso.c +@@ -171,7 +171,7 @@ sso_rsrc_get(struct roc_sso *roc_sso) + mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = mbox_process_msg(mbox, (void **)&rsrc_cnt); + if (rc) { +- plt_err("Failed to get free resource count\n"); ++ plt_err("Failed to get free resource count"); + rc = -EIO; + goto exit; + } +@@ -765,7 +765,14 @@ sso_update_msix_vec_count(struct roc_sso *roc_sso, uint16_t sso_vec_cnt) + return dev_irq_reconfigure(pci_dev->intr_handle, mbox_vec_cnt + npa_vec_cnt); + } + ++ /* Before re-configuring unregister irqs */ + npa_vec_cnt = (dev->npa.pci_dev == pci_dev) ? NPA_LF_INT_VEC_POISON + 1 : 0; ++ if (npa_vec_cnt) ++ npa_unregister_irqs(&dev->npa); ++ ++ dev_mbox_unregister_irq(pci_dev, dev); ++ if (!dev_is_vf(dev)) ++ dev_vf_flr_unregister_irqs(pci_dev, dev); + + /* Re-configure to include SSO vectors */ + rc = dev_irq_reconfigure(pci_dev->intr_handle, mbox_vec_cnt + npa_vec_cnt + sso_vec_cnt); +diff --git a/dpdk/drivers/common/cnxk/roc_tim.c b/dpdk/drivers/common/cnxk/roc_tim.c +index f8607b2852..d39af3c85e 100644 +--- a/dpdk/drivers/common/cnxk/roc_tim.c ++++ b/dpdk/drivers/common/cnxk/roc_tim.c +@@ -317,7 +317,7 @@ tim_free_lf_count_get(struct dev *dev, uint16_t *nb_lfs) + mbox_alloc_msg_free_rsrc_cnt(mbox); + rc = mbox_process_msg(mbox, (void **)&rsrc_cnt); + if (rc) { +- plt_err("Failed to get free resource count\n"); ++ plt_err("Failed to get free resource count"); + mbox_put(mbox); + return -EIO; + } diff --git a/dpdk/drivers/common/cnxk/version.map b/dpdk/drivers/common/cnxk/version.map index aa884a8fe2..e718c13acb 100644 --- a/dpdk/drivers/common/cnxk/version.map @@ -18640,6 +21440,28 @@ index aa884a8fe2..e718c13acb 100644 cnxk_ot_ipsec_inb_sa_fill; cnxk_ot_ipsec_outb_sa_fill; cnxk_ot_ipsec_inb_sa_valid; +diff --git a/dpdk/drivers/common/cpt/cpt_ucode.h b/dpdk/drivers/common/cpt/cpt_ucode.h +index b393be4cf6..2e6846312b 100644 +--- a/dpdk/drivers/common/cpt/cpt_ucode.h ++++ b/dpdk/drivers/common/cpt/cpt_ucode.h +@@ -2589,7 +2589,7 @@ fill_sess_aead(struct rte_crypto_sym_xform *xform, + sess->cpt_op |= CPT_OP_CIPHER_DECRYPT; + sess->cpt_op |= CPT_OP_AUTH_VERIFY; + } else { +- CPT_LOG_DP_ERR("Unknown aead operation\n"); ++ CPT_LOG_DP_ERR("Unknown aead operation"); + return -1; + } + switch (aead_form->algo) { +@@ -2658,7 +2658,7 @@ fill_sess_cipher(struct rte_crypto_sym_xform *xform, + ctx->dec_auth = 1; + } + } else { +- CPT_LOG_DP_ERR("Unknown cipher operation\n"); ++ CPT_LOG_DP_ERR("Unknown cipher operation"); + return -1; + } + diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h b/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h index 95fc3ea5ba..54fca3bc67 100644 --- a/dpdk/drivers/common/dpaax/caamflib/desc/ipsec.h @@ -18725,7 +21547,7 @@ index 95fc3ea5ba..54fca3bc67 100644 * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared * descriptor. diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h -index 7d16c66d79..0ed9eec816 100644 +index 7d16c66d79..27dd5c4347 100644 --- a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h +++ b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h @@ -1023,6 +1023,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p, @@ -18752,6 +21574,30 @@ index 7d16c66d79..0ed9eec816 100644 LOAD(p, CLRW_RESET_CLS1_CHA | CLRW_CLR_C1KEY | CLRW_CLR_C1CTX | +@@ -1210,6 +1220,11 @@ pdcp_insert_cplane_snow_aes_op(struct program *p, + SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1); + MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); ++ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | + CLRW_CLR_C1CTX | +@@ -1911,6 +1926,11 @@ pdcp_insert_cplane_zuc_aes_op(struct program *p, + + MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); ++ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | + CLRW_CLR_C1CTX | diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h index b38c15a24f..d41bacf8f9 100644 --- a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h @@ -18785,6 +21631,21 @@ index b38c15a24f..d41bacf8f9 100644 /* Reset class 1 CHA */ LOAD(p, CLRW_RESET_CLS1_CHA | CLRW_CLR_C1KEY | +diff --git a/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h b/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h +index fe1ac37ee8..563735eb88 100644 +--- a/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h ++++ b/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h +@@ -7,10 +7,6 @@ + #ifndef __RTA_OPERATION_CMD_H__ + #define __RTA_OPERATION_CMD_H__ + +-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 70000) +-#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" +-#endif +- + extern enum rta_sec_era rta_sec_era; + + static inline int diff --git a/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/dpdk/drivers/common/dpaax/dpaax_iova_table.c index 9daac4bc03..860e702333 100644 --- a/dpdk/drivers/common/dpaax/dpaax_iova_table.c @@ -18847,6 +21708,29 @@ index e0f117197c..6c1427cca4 100644 local: *; }; +diff --git a/dpdk/drivers/common/idpf/base/idpf_osdep.h b/dpdk/drivers/common/idpf/base/idpf_osdep.h +index 74a376cb13..581a36cc40 100644 +--- a/dpdk/drivers/common/idpf/base/idpf_osdep.h ++++ b/dpdk/drivers/common/idpf/base/idpf_osdep.h +@@ -341,10 +341,16 @@ idpf_hweight32(u32 num) + #define LIST_ENTRY_TYPE(type) LIST_ENTRY(type) + #endif + ++#ifndef LIST_FOREACH_SAFE ++#define LIST_FOREACH_SAFE(var, head, field, tvar) \ ++ for ((var) = LIST_FIRST((head)); \ ++ (var) && ((tvar) = LIST_NEXT((var), field), 1); \ ++ (var) = (tvar)) ++#endif ++ + #ifndef LIST_FOR_EACH_ENTRY_SAFE + #define LIST_FOR_EACH_ENTRY_SAFE(pos, temp, head, entry_type, list) \ +- LIST_FOREACH(pos, head, list) +- ++ LIST_FOREACH_SAFE(pos, head, list, temp) + #endif + + #ifndef LIST_FOR_EACH_ENTRY diff --git a/dpdk/drivers/common/idpf/base/virtchnl2.h b/dpdk/drivers/common/idpf/base/virtchnl2.h index 3900b784d0..21b2039aa2 100644 --- a/dpdk/drivers/common/idpf/base/virtchnl2.h @@ -18882,6 +21766,68 @@ index e6e782a219..a5e3f05014 100644 #define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 15 #define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S) +diff --git a/dpdk/drivers/common/idpf/idpf_common_device.c b/dpdk/drivers/common/idpf/idpf_common_device.c +index cc4207a46e..77c58170b3 100644 +--- a/dpdk/drivers/common/idpf/idpf_common_device.c ++++ b/dpdk/drivers/common/idpf/idpf_common_device.c +@@ -136,8 +136,7 @@ idpf_init_mbx(struct idpf_hw *hw) + if (ret != 0) + return ret; + +- LIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head, +- struct idpf_ctlq_info, cq_list) { ++ LIST_FOR_EACH_ENTRY(ctlq, &hw->cq_list_head, struct idpf_ctlq_info, cq_list) { + if (ctlq->q_id == IDPF_CTLQ_ID && + ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX) + hw->asq = ctlq; +diff --git a/dpdk/drivers/common/idpf/idpf_common_logs.h b/dpdk/drivers/common/idpf/idpf_common_logs.h +index f6be84ceb5..105450774e 100644 +--- a/dpdk/drivers/common/idpf/idpf_common_logs.h ++++ b/dpdk/drivers/common/idpf/idpf_common_logs.h +@@ -9,7 +9,7 @@ + + extern int idpf_common_logtype; + +-#define DRV_LOG_RAW(level, ...) \ ++#define DRV_LOG(level, ...) \ + rte_log(RTE_LOG_ ## level, \ + idpf_common_logtype, \ + RTE_FMT("%s(): " \ +@@ -17,9 +17,6 @@ extern int idpf_common_logtype; + __func__, \ + RTE_FMT_TAIL(__VA_ARGS__,))) + +-#define DRV_LOG(level, fmt, args...) \ +- DRV_LOG_RAW(level, fmt "\n", ## args) +- + #ifdef RTE_LIBRTE_IDPF_DEBUG_RX + #define RX_LOG(level, ...) \ + RTE_LOG(level, \ +diff --git a/dpdk/drivers/common/idpf/idpf_common_rxtx_avx512.c b/dpdk/drivers/common/idpf/idpf_common_rxtx_avx512.c +index f65e8d512b..5abafc729b 100644 +--- a/dpdk/drivers/common/idpf/idpf_common_rxtx_avx512.c ++++ b/dpdk/drivers/common/idpf/idpf_common_rxtx_avx512.c +@@ -1043,6 +1043,7 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq) + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { ++#ifdef RTE_ARCH_64 + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); +@@ -1052,6 +1053,12 @@ idpf_tx_singleq_free_bufs_avx512(struct idpf_tx_queue *txq) + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); ++#else ++ const __m512i a = _mm512_loadu_si512(&txep[copied]); ++ const __m512i b = _mm512_loadu_si512(&txep[copied + 16]); ++ _mm512_storeu_si512(&cache_objs[copied], a); ++ _mm512_storeu_si512(&cache_objs[copied + 16], b); ++#endif + copied += 32; + } + cache->len += n; diff --git a/dpdk/drivers/common/mlx5/mlx5_common_mr.c b/dpdk/drivers/common/mlx5/mlx5_common_mr.c index 40ff9153bd..85ec10d2ee 100644 --- a/dpdk/drivers/common/mlx5/mlx5_common_mr.c @@ -18895,8 +21841,21 @@ index 40ff9153bd..85ec10d2ee 100644 if (*out == NULL) return -1; rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out); +diff --git a/dpdk/drivers/common/mlx5/mlx5_common_utils.h b/dpdk/drivers/common/mlx5/mlx5_common_utils.h +index ae15119a33..6db0105c53 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_common_utils.h ++++ b/dpdk/drivers/common/mlx5/mlx5_common_utils.h +@@ -131,7 +131,7 @@ struct mlx5_list_inconst { + * For huge amount of entries, please consider hash list. + * + */ +-struct mlx5_list { ++struct __rte_aligned(16) mlx5_list { + struct mlx5_list_const l_const; + struct mlx5_list_inconst l_inconst; + }; diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c -index 4d8818924a..630ab96a8f 100644 +index 4d8818924a..9e2d7ce86f 100644 --- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c @@ -965,19 +965,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, @@ -18931,7 +21890,15 @@ index 4d8818924a..630ab96a8f 100644 attr->vdpa.valid = !!(general_obj_types_supported & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); attr->vdpa.queue_counters_valid = -@@ -1074,8 +1067,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, +@@ -1026,6 +1019,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + attr->log_max_qp = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp); + attr->log_max_cq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq_sz); + attr->log_max_qp_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp_sz); ++ attr->log_max_wq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_wq_sz); + attr->log_max_mrw_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_mrw_sz); + attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd); + attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq); +@@ -1074,8 +1068,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled); attr->wait_on_time = MLX5_GET(cmd_hca_cap, hcattr, wait_on_time); attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto); @@ -18941,7 +21908,7 @@ index 4d8818924a..630ab96a8f 100644 MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD); attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop); attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table); -@@ -1104,8 +1096,8 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, +@@ -1104,8 +1097,8 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, (ctx, &attr->flex); if (rc) return -1; @@ -18952,7 +21919,7 @@ index 4d8818924a..630ab96a8f 100644 } if (attr->crypto) { attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts) || -@@ -1814,7 +1806,7 @@ mlx5_devx_cmd_create_rqt(void *ctx, +@@ -1814,7 +1807,7 @@ mlx5_devx_cmd_create_rqt(void *ctx, uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; void *rqt_ctx; struct mlx5_devx_obj *rqt = NULL; @@ -18961,7 +21928,7 @@ index 4d8818924a..630ab96a8f 100644 in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); if (!in) { -@@ -1867,7 +1859,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, +@@ -1867,7 +1860,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); void *rqt_ctx; @@ -18970,7 +21937,7 @@ index 4d8818924a..630ab96a8f 100644 int ret; if (!in) { -@@ -1880,7 +1872,6 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, +@@ -1880,7 +1873,6 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); @@ -18979,10 +21946,18 @@ index 4d8818924a..630ab96a8f 100644 for (i = 0; i < rqt_attr->rqt_actual_size; i++) MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h -index 7f23e925a5..b814c8becc 100644 +index 7f23e925a5..028cf2abb9 100644 --- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h +++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h -@@ -315,6 +315,7 @@ struct mlx5_hca_attr { +@@ -264,6 +264,7 @@ struct mlx5_hca_attr { + struct mlx5_hca_flow_attr flow; + struct mlx5_hca_flex_attr flex; + struct mlx5_hca_crypto_mmo_attr crypto_mmo; ++ uint8_t log_max_wq_sz; + int log_max_qp_sz; + int log_max_cq_sz; + int log_max_qp; +@@ -315,6 +316,7 @@ struct mlx5_hca_attr { uint32_t flow_counter_bulk_log_granularity:5; uint32_t alloc_flow_counter_pd:1; uint32_t flow_counter_access_aso:1; @@ -18991,10 +21966,72 @@ index 7f23e925a5..b814c8becc 100644 uint32_t cross_vhca:1; uint32_t lag_rx_port_affinity:1; diff --git a/dpdk/drivers/common/mlx5/mlx5_prm.h b/dpdk/drivers/common/mlx5/mlx5_prm.h -index 9e22dce6da..3cbb1179c0 100644 +index 9e22dce6da..79533ff35a 100644 --- a/dpdk/drivers/common/mlx5/mlx5_prm.h +++ b/dpdk/drivers/common/mlx5/mlx5_prm.h -@@ -2334,8 +2334,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { +@@ -268,8 +268,12 @@ + /* Maximum number of DS in WQE. Limited by 6-bit field. */ + #define MLX5_DSEG_MAX 63 + +-/* The 32 bit syndrome offset in struct mlx5_err_cqe. */ ++/* The 32 bit syndrome offset in struct mlx5_error_cqe. */ ++#if (RTE_CACHE_LINE_SIZE == 128) ++#define MLX5_ERROR_CQE_SYNDROME_OFFSET 116 ++#else + #define MLX5_ERROR_CQE_SYNDROME_OFFSET 52 ++#endif + + /* The completion mode offset in the WQE control segment line 2. */ + #define MLX5_COMP_MODE_OFFSET 2 +@@ -415,6 +419,29 @@ struct mlx5_wqe_mprq { + + #define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2 + ++struct mlx5_error_cqe { ++#if (RTE_CACHE_LINE_SIZE == 128) ++ uint8_t padding[64]; ++#endif ++ uint8_t rsvd0[2]; ++ uint16_t eth_wqe_id; ++ uint8_t rsvd1[16]; ++ uint16_t ib_stride_index; ++ uint8_t rsvd2[10]; ++ uint32_t srqn; ++ uint8_t rsvd3[8]; ++ uint32_t byte_cnt; ++ uint8_t rsvd4[4]; ++ uint8_t hw_err_synd; ++ uint8_t hw_synd_type; ++ uint8_t vendor_err_synd; ++ uint8_t syndrome; ++ uint32_t s_wqe_opcode_qpn; ++ uint16_t wqe_counter; ++ uint8_t signature; ++ uint8_t op_own; ++}; ++ + /* CQ element structure - should be equal to the cache line size */ + struct mlx5_cqe { + #if (RTE_CACHE_LINE_SIZE == 128) +@@ -896,7 +923,7 @@ struct mlx5_modification_cmd { + unsigned int field:12; + unsigned int action_type:4; + }; +- }; ++ } __rte_packed; + union { + uint32_t data1; + uint8_t data[4]; +@@ -907,7 +934,7 @@ struct mlx5_modification_cmd { + unsigned int dst_field:12; + unsigned int rsvd4:4; + }; +- }; ++ } __rte_packed; + }; + + typedef uint64_t u64; +@@ -2334,8 +2361,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_d0[0x3]; u8 log_conn_track_max_alloc[0x5]; u8 reserved_at_d8[0x3]; @@ -19005,7 +22042,7 @@ index 9e22dce6da..3cbb1179c0 100644 u8 allowed_object_for_other_vhca_access_high[0x20]; u8 allowed_object_for_other_vhca_access[0x20]; u8 reserved_at_140[0x20]; -@@ -3606,7 +3606,7 @@ struct mlx5_ifc_stc_ste_param_vport_bits { +@@ -3606,7 +3633,7 @@ struct mlx5_ifc_stc_ste_param_vport_bits { u8 eswitch_owner_vhca_id[0x10]; u8 vport_number[0x10]; u8 eswitch_owner_vhca_id_valid[0x1]; @@ -19014,8 +22051,65 @@ index 9e22dce6da..3cbb1179c0 100644 }; union mlx5_ifc_stc_param_bits { +diff --git a/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h b/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h +index 79e7a7f386..d60df6fd37 100644 +--- a/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h ++++ b/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h +@@ -219,18 +219,6 @@ struct mlx5_action { + } dest_tir; + }; + +-struct mlx5_err_cqe { +- uint8_t rsvd0[32]; +- uint32_t srqn; +- uint8_t rsvd1[18]; +- uint8_t vendor_err_synd; +- uint8_t syndrome; +- uint32_t s_wqe_opcode_qpn; +- uint16_t wqe_counter; +- uint8_t signature; +- uint8_t op_own; +-}; +- + struct mlx5_wqe_srq_next_seg { + uint8_t rsvd0[2]; + rte_be16_t next_wqe_index; +diff --git a/dpdk/drivers/common/nfp/nfp_common_ctrl.h b/dpdk/drivers/common/nfp/nfp_common_ctrl.h +index d09fd2b892..532bc6584a 100644 +--- a/dpdk/drivers/common/nfp/nfp_common_ctrl.h ++++ b/dpdk/drivers/common/nfp/nfp_common_ctrl.h +@@ -223,6 +223,7 @@ struct nfp_net_fw_ver { + #define NFP_NET_CFG_CTRL_IPSEC_SM_LOOKUP (0x1 << 3) /**< SA short match lookup */ + #define NFP_NET_CFG_CTRL_IPSEC_LM_LOOKUP (0x1 << 4) /**< SA long match lookup */ + #define NFP_NET_CFG_CTRL_MULTI_PF (0x1 << 5) ++#define NFP_NET_CFG_CTRL_VIRTIO (0x1 << 10) /**< Virtio offload */ + #define NFP_NET_CFG_CTRL_IN_ORDER (0x1 << 11) /**< Virtio in-order flag */ + + #define NFP_NET_CFG_CAP_WORD1 0x00a4 +diff --git a/dpdk/drivers/common/octeontx/octeontx_mbox.c b/dpdk/drivers/common/octeontx/octeontx_mbox.c +index 4fd3fda721..f98942c79c 100644 +--- a/dpdk/drivers/common/octeontx/octeontx_mbox.c ++++ b/dpdk/drivers/common/octeontx/octeontx_mbox.c +@@ -264,7 +264,7 @@ octeontx_start_domain(void) + + result = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); + if (result != 0) { +- mbox_log_err("Could not start domain. Err=%d. FuncErr=%d\n", ++ mbox_log_err("Could not start domain. Err=%d. FuncErr=%d", + result, hdr.res_code); + result = -EINVAL; + } +@@ -288,7 +288,7 @@ octeontx_check_mbox_version(struct mbox_intf_ver *app_intf_ver, + sizeof(struct mbox_intf_ver), + &kernel_intf_ver, sizeof(kernel_intf_ver)); + if (result != sizeof(kernel_intf_ver)) { +- mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d\n", ++ mbox_log_err("Could not send interface version. Err=%d. FuncErr=%d", + result, hdr.res_code); + result = -EINVAL; + } diff --git a/dpdk/drivers/common/qat/meson.build b/dpdk/drivers/common/qat/meson.build -index 5c36fbb270..62abcb6fe3 100644 +index 5c36fbb270..3d28bd2af5 100644 --- a/dpdk/drivers/common/qat/meson.build +++ b/dpdk/drivers/common/qat/meson.build @@ -17,13 +17,13 @@ qat_compress_relpath = '../../' + qat_compress_path @@ -19034,7 +22128,7 @@ index 5c36fbb270..62abcb6fe3 100644 'Explicitly disabled via build config') endif -@@ -36,7 +36,7 @@ if arch_subdir == 'arm' +@@ -36,14 +36,14 @@ if arch_subdir == 'arm' else qat_crypto = false dpdk_drvs_disabled += qat_crypto_path @@ -19043,6 +22137,14 @@ index 5c36fbb270..62abcb6fe3 100644 'missing dependency for Arm, libcrypto') endif else + IMB_required_ver = '1.4.0' + IMB_header = '#include' + libipsecmb = cc.find_library('IPSec_MB', required: false) +- if libipsecmb.found() and meson.version().version_compare('>=0.60') and cc.links( ++ if libipsecmb.found() and cc.links( + 'int main(void) {return 0;}', dependencies: libipsecmb) + # version comes with quotes, so we split based on " and take the middle + imb_ver = cc.get_define('IMB_VERSION_STR', @@ -57,7 +57,7 @@ else else qat_crypto = false @@ -19062,7 +22164,7 @@ index 5c36fbb270..62abcb6fe3 100644 endif endif diff --git a/dpdk/drivers/common/qat/qat_device.c b/dpdk/drivers/common/qat/qat_device.c -index f55dc3c6f0..eceb5c89c4 100644 +index f55dc3c6f0..6901fb3aab 100644 --- a/dpdk/drivers/common/qat/qat_device.c +++ b/dpdk/drivers/common/qat/qat_device.c @@ -29,6 +29,7 @@ struct qat_dev_hw_spec_funcs *qat_dev_hw_spec[QAT_N_GENS]; @@ -19073,6 +22175,54 @@ index f55dc3c6f0..eceb5c89c4 100644 /* * The set of PCI devices this driver supports +@@ -334,11 +335,7 @@ qat_pci_device_allocate(struct rte_pci_device *pci_dev, + + return qat_dev; + error: +- if (rte_memzone_free(qat_dev_mz)) { +- QAT_LOG(DEBUG, +- "QAT internal error! Trying to free already allocated memzone: %s", +- qat_dev_mz->name); +- } ++ rte_memzone_free(qat_dev_mz); + return NULL; + } + +diff --git a/dpdk/drivers/common/qat/qat_pf2vf.c b/dpdk/drivers/common/qat/qat_pf2vf.c +index 621f12fce2..9b25fdc6a0 100644 +--- a/dpdk/drivers/common/qat/qat_pf2vf.c ++++ b/dpdk/drivers/common/qat/qat_pf2vf.c +@@ -36,7 +36,7 @@ int qat_pf2vf_exch_msg(struct qat_pci_device *qat_dev, + } + + if ((pf2vf_msg.msg_type & type_mask) != pf2vf_msg.msg_type) { +- QAT_LOG(ERR, "PF2VF message type 0x%X out of range\n", ++ QAT_LOG(ERR, "PF2VF message type 0x%X out of range", + pf2vf_msg.msg_type); + return -EINVAL; + } +@@ -65,7 +65,7 @@ int qat_pf2vf_exch_msg(struct qat_pci_device *qat_dev, + (++count < ADF_IOV_MSG_ACK_MAX_RETRY)); + + if (val & ADF_PFVF_INT) { +- QAT_LOG(ERR, "ACK not received from remote\n"); ++ QAT_LOG(ERR, "ACK not received from remote"); + return -EIO; + } + +diff --git a/dpdk/drivers/common/qat/qat_qp.c b/dpdk/drivers/common/qat/qat_qp.c +index f95dd33375..21a110d22e 100644 +--- a/dpdk/drivers/common/qat/qat_qp.c ++++ b/dpdk/drivers/common/qat/qat_qp.c +@@ -267,7 +267,7 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, + if (qat_qp_check_queue_alignment(queue->base_phys_addr, + queue_size_bytes)) { + QAT_LOG(ERR, "Invalid alignment on queue create " +- " 0x%"PRIx64"\n", ++ " 0x%"PRIx64, + queue->base_phys_addr); + ret = -EFAULT; + goto queue_create_err; diff --git a/dpdk/drivers/common/sfc_efx/base/efx.h b/dpdk/drivers/common/sfc_efx/base/efx.h index 3312c2fa8f..5773cb00b3 100644 --- a/dpdk/drivers/common/sfc_efx/base/efx.h @@ -19110,16075 +22260,28939 @@ index 3312c2fa8f..5773cb00b3 100644 /* The macro expands divider twice */ #define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d)) -diff --git a/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c -index 997110e3d3..c96cf2b3a1 100644 ---- a/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c -+++ b/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c -@@ -861,15 +861,20 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, +diff --git a/dpdk/drivers/compress/isal/isal_compress_pmd.c b/dpdk/drivers/compress/isal/isal_compress_pmd.c +index cb23e929ed..0e783243a8 100644 +--- a/dpdk/drivers/compress/isal/isal_compress_pmd.c ++++ b/dpdk/drivers/compress/isal/isal_compress_pmd.c +@@ -42,10 +42,10 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + /* Set private xform algorithm */ + if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) { + if (xform->compress.algo == RTE_COMP_ALGO_NULL) { +- ISAL_PMD_LOG(ERR, "By-pass not supported\n"); ++ ISAL_PMD_LOG(ERR, "By-pass not supported"); + return -ENOTSUP; + } +- ISAL_PMD_LOG(ERR, "Algorithm not supported\n"); ++ ISAL_PMD_LOG(ERR, "Algorithm not supported"); + return -ENOTSUP; + } + priv_xform->compress.algo = RTE_COMP_ALGO_DEFLATE; +@@ -55,7 +55,7 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + priv_xform->compress.window_size = + RTE_COMP_ISAL_WINDOW_SIZE; + else { +- ISAL_PMD_LOG(ERR, "Window size not supported\n"); ++ ISAL_PMD_LOG(ERR, "Window size not supported"); + return -ENOTSUP; + } - return; - } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC && -- cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION && -- cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) { -- if (likely(compcode == CPT_COMP_GOOD)) { -- if (uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) { -- cop->status = RTE_CRYPTO_OP_STATUS_ERROR; -- return; -- } else if (uc_compcode == ROC_AE_ERR_ECC_PAI) { -- cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -- return; -+ cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { -+ struct cnxk_ae_sess *sess; -+ -+ sess = (struct cnxk_ae_sess *)cop->asym->session; -+ if (sess->xfrm_type == RTE_CRYPTO_ASYM_XFORM_ECDH && -+ cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) { -+ if (likely(compcode == CPT_COMP_GOOD)) { -+ if (uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) { -+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR; -+ return; -+ } else if (uc_compcode == ROC_AE_ERR_ECC_PAI) { -+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -+ return; -+ } +@@ -74,7 +74,7 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + RTE_COMP_HUFFMAN_DYNAMIC; + break; + default: +- ISAL_PMD_LOG(ERR, "Huffman code not supported\n"); ++ ISAL_PMD_LOG(ERR, "Huffman code not supported"); + return -ENOTSUP; + } + +@@ -92,10 +92,10 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + break; + case(RTE_COMP_CHECKSUM_CRC32_ADLER32): + ISAL_PMD_LOG(ERR, "Combined CRC and ADLER checksum not" +- " supported\n"); ++ " supported"); + return -ENOTSUP; + default: +- ISAL_PMD_LOG(ERR, "Checksum type not supported\n"); ++ ISAL_PMD_LOG(ERR, "Checksum type not supported"); + priv_xform->compress.chksum = IGZIP_DEFLATE; + break; + } +@@ -105,21 +105,21 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + */ + if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT || + xform->compress.level > RTE_COMP_LEVEL_MAX) { +- ISAL_PMD_LOG(ERR, "Compression level out of range\n"); ++ ISAL_PMD_LOG(ERR, "Compression level out of range"); + return -EINVAL; + } + /* Check for Compressdev API level 0, No compression + * not supported in ISA-L + */ + else if (xform->compress.level == RTE_COMP_LEVEL_NONE) { +- ISAL_PMD_LOG(ERR, "No Compression not supported\n"); ++ ISAL_PMD_LOG(ERR, "No Compression not supported"); + return -ENOTSUP; + } + /* If using fixed huffman code, level must be 0 */ + else if (priv_xform->compress.deflate.huffman == + RTE_COMP_HUFFMAN_FIXED) { + ISAL_PMD_LOG(DEBUG, "ISA-L level 0 used due to a" +- " fixed huffman code\n"); ++ " fixed huffman code"); + priv_xform->compress.level = RTE_COMP_ISAL_LEVEL_ZERO; + priv_xform->level_buffer_size = + ISAL_DEF_LVL0_DEFAULT; +@@ -169,7 +169,7 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + ISAL_PMD_LOG(DEBUG, "Requested ISA-L level" + " 3 or above; Level 3 optimized" + " for AVX512 & AVX2 only." +- " level changed to 2.\n"); ++ " level changed to 2."); + priv_xform->compress.level = + RTE_COMP_ISAL_LEVEL_TWO; + priv_xform->level_buffer_size = +@@ -188,10 +188,10 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + /* Set private xform algorithm */ + if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) { + if (xform->decompress.algo == RTE_COMP_ALGO_NULL) { +- ISAL_PMD_LOG(ERR, "By pass not supported\n"); ++ ISAL_PMD_LOG(ERR, "By pass not supported"); + return -ENOTSUP; } +- ISAL_PMD_LOG(ERR, "Algorithm not supported\n"); ++ ISAL_PMD_LOG(ERR, "Algorithm not supported"); + return -ENOTSUP; + } + priv_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE; +@@ -210,10 +210,10 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + break; + case(RTE_COMP_CHECKSUM_CRC32_ADLER32): + ISAL_PMD_LOG(ERR, "Combined CRC and ADLER checksum not" +- " supported\n"); ++ " supported"); + return -ENOTSUP; + default: +- ISAL_PMD_LOG(ERR, "Checksum type not supported\n"); ++ ISAL_PMD_LOG(ERR, "Checksum type not supported"); + priv_xform->decompress.chksum = ISAL_DEFLATE; + break; + } +@@ -223,7 +223,7 @@ isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform, + priv_xform->decompress.window_size = + RTE_COMP_ISAL_WINDOW_SIZE; + else { +- ISAL_PMD_LOG(ERR, "Window size not supported\n"); ++ ISAL_PMD_LOG(ERR, "Window size not supported"); + return -ENOTSUP; } } -diff --git a/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c -index 34d40b07d4..eb5575b7ec 100644 ---- a/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c -+++ b/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c -@@ -578,7 +578,22 @@ cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop, - if (unlikely(res->uc_compcode)) { - if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE) - cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; -- else -+ else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC && -+ cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { -+ struct cnxk_ae_sess *sess; -+ -+ sess = (struct cnxk_ae_sess *)cop->asym->session; -+ if (sess->xfrm_type == RTE_CRYPTO_ASYM_XFORM_ECDH && -+ cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) { -+ if (res->uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) { -+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR; -+ return; -+ } else if (res->uc_compcode == ROC_AE_ERR_ECC_PAI) { -+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -+ return; -+ } -+ } -+ } else - cop->status = RTE_CRYPTO_OP_STATUS_ERROR; +@@ -263,7 +263,7 @@ chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp) + remaining_offset); - plt_dp_info("Request failed with microcode error"); -diff --git a/dpdk/drivers/crypto/cnxk/cnxk_ae.h b/dpdk/drivers/crypto/cnxk/cnxk_ae.h -index ea11e093bf..ef9cb5eb91 100644 ---- a/dpdk/drivers/crypto/cnxk/cnxk_ae.h -+++ b/dpdk/drivers/crypto/cnxk/cnxk_ae.h -@@ -49,13 +49,22 @@ struct cnxk_ae_sess { - }; + if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) { +- ISAL_PMD_LOG(ERR, "Invalid source or destination buffer\n"); ++ ISAL_PMD_LOG(ERR, "Invalid source or destination buffer"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } +@@ -279,7 +279,7 @@ chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp) + remaining_data = op->src.length - qp->stream->total_in; - static __rte_always_inline void --cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len) -+cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len, size_t max) - { -+ uint8_t msw_len = *len % 8; -+ uint64_t msw_val = 0; + if (ret != COMP_OK) { +- ISAL_PMD_LOG(ERR, "Compression operation failed\n"); ++ ISAL_PMD_LOG(ERR, "Compression operation failed"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return ret; + } +@@ -294,7 +294,7 @@ chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp) + RTE_MIN(remaining_data, src->data_len); + } else { + ISAL_PMD_LOG(ERR, +- "Not enough input buffer segments\n"); ++ "Not enough input buffer segments"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } +@@ -309,7 +309,7 @@ chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp) + qp->stream->avail_out = dst->data_len; + } else { + ISAL_PMD_LOG(ERR, +- "Not enough output buffer segments\n"); ++ "Not enough output buffer segments"); + op->status = + RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + return -1; +@@ -378,14 +378,14 @@ chained_mbuf_decompression(struct rte_comp_op *op, struct isal_comp_qp *qp) + + if (ret == ISAL_OUT_OVERFLOW) { + ISAL_PMD_LOG(ERR, "Decompression operation ran " +- "out of space, but can be recovered.\n%d bytes " +- "consumed\t%d bytes produced\n", ++ "out of space, but can be recovered.%d bytes " ++ "consumed\t%d bytes produced", + consumed_data, qp->state->total_out); + op->status = + RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE; + return ret; + } else if (ret < 0) { +- ISAL_PMD_LOG(ERR, "Decompression operation failed\n"); ++ ISAL_PMD_LOG(ERR, "Decompression operation failed"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return ret; + } +@@ -399,7 +399,7 @@ chained_mbuf_decompression(struct rte_comp_op *op, struct isal_comp_qp *qp) + qp->state->avail_out = dst->data_len; + } else { + ISAL_PMD_LOG(ERR, +- "Not enough output buffer segments\n"); ++ "Not enough output buffer segments"); + op->status = + RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + return -1; +@@ -451,14 +451,14 @@ process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + IGZIP_HUFFTABLE_DEFAULT); + + if (op->m_src->pkt_len < (op->src.length + op->src.offset)) { +- ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n"); ++ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough."); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + if (op->dst.offset >= op->m_dst->pkt_len) { + ISAL_PMD_LOG(ERR, "Output mbuf(s) not big enough" +- " for offset provided.\n"); ++ " for offset provided."); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } +@@ -483,7 +483,7 @@ process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + + if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) { + ISAL_PMD_LOG(ERR, "Invalid source or destination" +- " buffers\n"); ++ " buffers"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } +@@ -493,7 +493,7 @@ process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + + /* Check that output buffer did not run out of space */ + if (ret == STATELESS_OVERFLOW) { +- ISAL_PMD_LOG(ERR, "Output buffer not big enough\n"); ++ ISAL_PMD_LOG(ERR, "Output buffer not big enough"); + op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + return ret; + } +@@ -501,13 +501,13 @@ process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + /* Check that input buffer has been fully consumed */ + if (qp->stream->avail_in != (uint32_t)0) { + ISAL_PMD_LOG(ERR, "Input buffer could not be read" +- " entirely\n"); ++ " entirely"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return -1; + } + + if (ret != COMP_OK) { +- ISAL_PMD_LOG(ERR, "Compression operation failed\n"); ++ ISAL_PMD_LOG(ERR, "Compression operation failed"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return ret; + } +@@ -543,14 +543,14 @@ process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + qp->state->crc_flag = priv_xform->decompress.chksum; + + if (op->m_src->pkt_len < (op->src.length + op->src.offset)) { +- ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n"); ++ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough."); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } + + if (op->dst.offset >= op->m_dst->pkt_len) { + ISAL_PMD_LOG(ERR, "Output mbuf not big enough for " +- "offset provided.\n"); ++ "offset provided."); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } +@@ -574,7 +574,7 @@ process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + + if (unlikely(!qp->state->next_in || !qp->state->next_out)) { + ISAL_PMD_LOG(ERR, "Invalid source or destination" +- " buffers\n"); ++ " buffers"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + return -1; + } +@@ -583,7 +583,7 @@ process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + ret = isal_inflate_stateless(qp->state); + + if (ret == ISAL_OUT_OVERFLOW) { +- ISAL_PMD_LOG(ERR, "Output buffer not big enough\n"); ++ ISAL_PMD_LOG(ERR, "Output buffer not big enough"); + op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + return ret; + } +@@ -591,13 +591,13 @@ process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp, + /* Check that input buffer has been fully consumed */ + if (qp->state->avail_in != (uint32_t)0) { + ISAL_PMD_LOG(ERR, "Input buffer could not be read" +- " entirely\n"); ++ " entirely"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return -1; + } + + if (ret != ISAL_DECOMP_OK && ret != ISAL_END_INPUT) { +- ISAL_PMD_LOG(ERR, "Decompression operation failed\n"); ++ ISAL_PMD_LOG(ERR, "Decompression operation failed"); + op->status = RTE_COMP_OP_STATUS_ERROR; + return ret; + } +@@ -622,7 +622,7 @@ process_op(struct isal_comp_qp *qp, struct rte_comp_op *op, + process_isal_inflate(op, qp, priv_xform); + break; + default: +- ISAL_PMD_LOG(ERR, "Operation Not Supported\n"); ++ ISAL_PMD_LOG(ERR, "Operation Not Supported"); + return -ENOTSUP; + } + return 0; +@@ -641,7 +641,7 @@ isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops, + for (i = 0; i < num_enq; i++) { + if (unlikely(ops[i]->op_type != RTE_COMP_OP_STATELESS)) { + ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS; +- ISAL_PMD_LOG(ERR, "Stateful operation not Supported\n"); ++ ISAL_PMD_LOG(ERR, "Stateful operation not Supported"); + qp->qp_stats.enqueue_err_count++; + continue; + } +@@ -696,7 +696,7 @@ compdev_isal_create(const char *name, struct rte_vdev_device *vdev, + dev->dequeue_burst = isal_comp_pmd_dequeue_burst; + dev->enqueue_burst = isal_comp_pmd_enqueue_burst; + +- ISAL_PMD_LOG(INFO, "\nISA-L library version used: "ISAL_VERSION_STRING); ++ ISAL_PMD_LOG(INFO, "ISA-L library version used: "ISAL_VERSION_STRING); + + return 0; + } +@@ -739,7 +739,7 @@ compdev_isal_probe(struct rte_vdev_device *dev) + retval = rte_compressdev_pmd_parse_input_args(&init_params, args); + if (retval) { + ISAL_PMD_LOG(ERR, +- "Failed to parse initialisation arguments[%s]\n", args); ++ "Failed to parse initialisation arguments[%s]", args); + return -EINVAL; + } + +diff --git a/dpdk/drivers/compress/mlx5/mlx5_compress.c b/dpdk/drivers/compress/mlx5/mlx5_compress.c +index 41d9752833..702108c5f9 100644 +--- a/dpdk/drivers/compress/mlx5/mlx5_compress.c ++++ b/dpdk/drivers/compress/mlx5/mlx5_compress.c +@@ -602,7 +602,7 @@ mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe, size_t i; -- /* Strip leading NUL bytes */ -- for (i = 0; i < *len; i++) { -- if ((*data)[i] != 0) -+ if (*len <= 8) -+ return; -+ -+ memcpy(&msw_val, *data, msw_len); -+ if (msw_val != 0) -+ return; -+ -+ for (i = msw_len; i < *len && (*len - i) < max; i += 8) { -+ memcpy(&msw_val, &(*data)[i], 8); -+ if (msw_val != 0) + DRV_LOG(ERR, "Error cqe:"); +- for (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4) ++ for (i = 0; i < sizeof(struct mlx5_error_cqe) >> 2; i += 4) + DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1], + cqe[i + 2], cqe[i + 3]); + DRV_LOG(ERR, "\nError wqe:"); +@@ -620,7 +620,7 @@ mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp, + struct rte_comp_op *op) + { + const uint32_t idx = qp->ci & (qp->entries_n - 1); +- volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *) ++ volatile struct mlx5_error_cqe *cqe = (volatile struct mlx5_error_cqe *) + &qp->cq.cqes[idx]; + volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *) + qp->qp.wqes; +diff --git a/dpdk/drivers/compress/octeontx/otx_zip.h b/dpdk/drivers/compress/octeontx/otx_zip.h +index 7391360925..d52f937548 100644 +--- a/dpdk/drivers/compress/octeontx/otx_zip.h ++++ b/dpdk/drivers/compress/octeontx/otx_zip.h +@@ -206,7 +206,7 @@ zipvf_prepare_sgl(struct rte_mbuf *buf, int64_t offset, struct zipvf_sginfo *sg_ break; + } + +- ZIP_PMD_LOG(DEBUG, "ZIP SGL buf[%d], len = %d, iova = 0x%"PRIx64"\n", ++ ZIP_PMD_LOG(DEBUG, "ZIP SGL buf[%d], len = %d, iova = 0x%"PRIx64, + sgidx, sginfo[sgidx].sg_ctl.s.length, sginfo[sgidx].sg_addr.s.addr); + ++sgidx; } - *data += i; -@@ -72,8 +81,8 @@ cnxk_ae_fill_modex_params(struct cnxk_ae_sess *sess, - uint8_t *exp = xform->modex.exponent.data; - uint8_t *mod = xform->modex.modulus.data; +@@ -219,7 +219,7 @@ zipvf_prepare_sgl(struct rte_mbuf *buf, int64_t offset, struct zipvf_sginfo *sg_ + } + qp->num_sgbuf = ++sgidx; -- cnxk_ae_modex_param_normalize(&mod, &mod_len); -- cnxk_ae_modex_param_normalize(&exp, &exp_len); -+ cnxk_ae_modex_param_normalize(&mod, &mod_len, SIZE_MAX); -+ cnxk_ae_modex_param_normalize(&exp, &exp_len, mod_len); +- ZIP_PMD_LOG(DEBUG, "Tot_buf_len:%d max_segs:%"PRIx64"\n", tot_buf_len, ++ ZIP_PMD_LOG(DEBUG, "Tot_buf_len:%d max_segs:%"PRIx64, tot_buf_len, + qp->num_sgbuf); + return ret; + } +@@ -246,7 +246,7 @@ zipvf_prepare_in_buf(union zip_inst_s *inst, struct zipvf_qp *qp, struct rte_com + inst->s.inp_ptr_ctl.s.length = qp->num_sgbuf; + inst->s.inp_ptr_ctl.s.fw = 0; - if (unlikely(exp_len == 0 || mod_len == 0)) - return -EINVAL; -@@ -205,16 +214,22 @@ cnxk_ae_fill_ec_params(struct cnxk_ae_sess *sess, - return 0; +- ZIP_PMD_LOG(DEBUG, "Gather(input): len(nb_segs):%d, iova: 0x%"PRIx64"\n", ++ ZIP_PMD_LOG(DEBUG, "Gather(input): len(nb_segs):%d, iova: 0x%"PRIx64, + inst->s.inp_ptr_ctl.s.length, inst->s.inp_ptr_addr.s.addr); + return ret; + } +@@ -256,7 +256,7 @@ zipvf_prepare_in_buf(union zip_inst_s *inst, struct zipvf_qp *qp, struct rte_com + inst->s.inp_ptr_addr.s.addr = rte_pktmbuf_iova_offset(m_src, offset); + inst->s.inp_ptr_ctl.s.length = inlen; - ec->pkey.length = xform->ec.pkey.length; -- if (xform->ec.pkey.length) -- rte_memcpy(ec->pkey.data, xform->ec.pkey.data, xform->ec.pkey.length); -+ if (ec->pkey.length > ROC_AE_EC_DATA_MAX) -+ ec->pkey.length = ROC_AE_EC_DATA_MAX; -+ if (ec->pkey.length) -+ rte_memcpy(ec->pkey.data, xform->ec.pkey.data, ec->pkey.length); +- ZIP_PMD_LOG(DEBUG, "Direct input - inlen:%d\n", inlen); ++ ZIP_PMD_LOG(DEBUG, "Direct input - inlen:%d", inlen); + return ret; + } - ec->q.x.length = xform->ec.q.x.length; -- if (xform->ec.q.x.length) -- rte_memcpy(ec->q.x.data, xform->ec.q.x.data, xform->ec.q.x.length); -+ if (ec->q.x.length > ROC_AE_EC_DATA_MAX) -+ ec->q.x.length = ROC_AE_EC_DATA_MAX; -+ if (ec->q.x.length) -+ rte_memcpy(ec->q.x.data, xform->ec.q.x.data, ec->q.x.length); +@@ -282,7 +282,7 @@ zipvf_prepare_out_buf(union zip_inst_s *inst, struct zipvf_qp *qp, struct rte_co + inst->s.out_ptr_addr.s.addr = rte_mem_virt2iova(qp->s_info); + inst->s.out_ptr_ctl.s.length = qp->num_sgbuf; - ec->q.y.length = xform->ec.q.y.length; -+ if (ec->q.y.length > ROC_AE_EC_DATA_MAX) -+ ec->q.y.length = ROC_AE_EC_DATA_MAX; - if (xform->ec.q.y.length) -- rte_memcpy(ec->q.y.data, xform->ec.q.y.data, xform->ec.q.y.length); -+ rte_memcpy(ec->q.y.data, xform->ec.q.y.data, ec->q.y.length); +- ZIP_PMD_LOG(DEBUG, "Scatter(output): nb_segs:%d, iova:0x%"PRIx64"\n", ++ ZIP_PMD_LOG(DEBUG, "Scatter(output): nb_segs:%d, iova:0x%"PRIx64, + inst->s.out_ptr_ctl.s.length, inst->s.out_ptr_addr.s.addr); + return ret; + } +@@ -296,7 +296,7 @@ zipvf_prepare_out_buf(union zip_inst_s *inst, struct zipvf_qp *qp, struct rte_co - return 0; + inst->s.out_ptr_ctl.s.length = inst->s.totaloutputlength; + +- ZIP_PMD_LOG(DEBUG, "Direct output - outlen:%d\n", inst->s.totaloutputlength); ++ ZIP_PMD_LOG(DEBUG, "Direct output - outlen:%d", inst->s.totaloutputlength); + return ret; } -@@ -282,7 +297,7 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf, - struct rte_crypto_mod_op_param mod_op; - uint64_t total_key_len; - union cpt_inst_w4 w4; -- uint32_t base_len; -+ size_t base_len; - uint32_t dlen; - uint8_t *dptr; -@@ -290,8 +305,11 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf, +diff --git a/dpdk/drivers/compress/octeontx/otx_zip_pmd.c b/dpdk/drivers/compress/octeontx/otx_zip_pmd.c +index fd20139da6..c8f456b319 100644 +--- a/dpdk/drivers/compress/octeontx/otx_zip_pmd.c ++++ b/dpdk/drivers/compress/octeontx/otx_zip_pmd.c +@@ -161,7 +161,7 @@ zip_set_stream_parameters(struct rte_compressdev *dev, + */ - base_len = mod_op.base.length; - if (unlikely(base_len > mod_len)) { -- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; -- return -ENOTSUP; -+ cnxk_ae_modex_param_normalize(&mod_op.base.data, &base_len, mod_len); -+ if (base_len > mod_len) { -+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; -+ return -ENOTSUP; -+ } + } else { +- ZIP_PMD_ERR("\nxform type not supported"); ++ ZIP_PMD_ERR("xform type not supported"); + ret = -1; + goto err; + } +@@ -527,7 +527,7 @@ zip_pmd_enqueue_burst(void *queue_pair, } - total_key_len = mod_len + exp_len; -@@ -735,7 +753,11 @@ cnxk_ae_sm2_sign_prep(struct rte_crypto_sm2_op_param *sm2, - uint8_t *dptr; + qp->enqed = enqd; +- ZIP_PMD_LOG(DEBUG, "ops_enqd[nb_ops:%d]:%d\n", nb_ops, enqd); ++ ZIP_PMD_LOG(DEBUG, "ops_enqd[nb_ops:%d]:%d", nb_ops, enqd); - prime_len = ec_grp->prime.length; -+ if (prime_len > ROC_AE_EC_DATA_MAX) -+ prime_len = ROC_AE_EC_DATA_MAX; - order_len = ec_grp->order.length; -+ if (order_len > ROC_AE_EC_DATA_MAX) -+ order_len = ROC_AE_EC_DATA_MAX; + return enqd; + } +@@ -563,7 +563,7 @@ zip_pmd_dequeue_burst(void *queue_pair, + op->status = RTE_COMP_OP_STATUS_SUCCESS; + } else { + /* FATAL error cannot do anything */ +- ZIP_PMD_ERR("operation failed with error code:%d\n", ++ ZIP_PMD_ERR("operation failed with error code:%d", + zresult->s.compcode); + if (zresult->s.compcode == ZIP_COMP_E_DSTOP) + op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; +@@ -571,7 +571,7 @@ zip_pmd_dequeue_burst(void *queue_pair, + op->status = RTE_COMP_OP_STATUS_ERROR; + } - /* Truncate input length to curve prime length */ - if (message_len > prime_len) -@@ -822,7 +844,11 @@ cnxk_ae_sm2_verify_prep(struct rte_crypto_sm2_op_param *sm2, - uint8_t *dptr; +- ZIP_PMD_LOG(DEBUG, "written %d\n", zresult->s.totalbyteswritten); ++ ZIP_PMD_LOG(DEBUG, "written %d", zresult->s.totalbyteswritten); - prime_len = ec_grp->prime.length; -+ if (prime_len > ROC_AE_EC_DATA_MAX) -+ prime_len = ROC_AE_EC_DATA_MAX; - order_len = ec_grp->order.length; -+ if (order_len > ROC_AE_EC_DATA_MAX) -+ order_len = ROC_AE_EC_DATA_MAX; + /* Update op stats */ + switch (op->status) { +@@ -582,7 +582,7 @@ zip_pmd_dequeue_burst(void *queue_pair, + op->produced = zresult->s.totalbyteswritten; + break; + default: +- ZIP_PMD_ERR("stats not updated for status:%d\n", ++ ZIP_PMD_ERR("stats not updated for status:%d", + op->status); + break; + } +@@ -598,7 +598,7 @@ zip_pmd_dequeue_burst(void *queue_pair, + rte_mempool_put(qp->vf->sg_mp, qp->s_info); + } - /* Truncate input length to curve prime length */ - if (message_len > prime_len) -diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c -index bb5a2c629e..6ae356ace0 100644 ---- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c -+++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c -@@ -4124,7 +4124,7 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, - cfg.dest_cfg.priority = priority; +- ZIP_PMD_LOG(DEBUG, "ops_deqd[nb_ops:%d]: %d\n", nb_ops, nb_dequeued); ++ ZIP_PMD_LOG(DEBUG, "ops_deqd[nb_ops:%d]: %d", nb_ops, nb_dequeued); + return nb_dequeued; + } - cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; -- cfg.user_ctx = (size_t)(qp); -+ cfg.user_ctx = (size_t)(&qp->rx_vq); - if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { - cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; - cfg.order_preservation_en = 1; -diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c -index a301e8edb2..906ea39047 100644 ---- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c -+++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c -@@ -395,10 +395,10 @@ dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) +@@ -676,7 +676,7 @@ zip_pci_remove(struct rte_pci_device *pci_dev) + char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN]; - cdb->sh_desc[0] = cipherdata.keylen; - cdb->sh_desc[1] = authdata.keylen; -- err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, -+ err = rta_inline_ipsec_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, - DESC_JOB_IO_LEN, - (unsigned int *)cdb->sh_desc, -- &cdb->sh_desc[2], 2); -+ &cdb->sh_desc[2], 2, authdata.algtype, 1); + if (pci_dev == NULL) { +- ZIP_PMD_ERR(" Invalid PCI Device\n"); ++ ZIP_PMD_ERR(" Invalid PCI Device"); + return -EINVAL; + } + rte_pci_device_name(&pci_dev->addr, compressdev_name, +diff --git a/dpdk/drivers/compress/zlib/zlib_pmd.c b/dpdk/drivers/compress/zlib/zlib_pmd.c +index 98abd41013..92e808e78c 100644 +--- a/dpdk/drivers/compress/zlib/zlib_pmd.c ++++ b/dpdk/drivers/compress/zlib/zlib_pmd.c +@@ -29,13 +29,13 @@ process_zlib_deflate(struct rte_comp_op *op, z_stream *strm) + break; + default: + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; +- ZLIB_PMD_ERR("Invalid flush value\n"); ++ ZLIB_PMD_ERR("Invalid flush value"); + return; + } - if (err < 0) { - DPAA_SEC_ERR("Crypto: Incorrect key lengths"); -diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c -index 30f919cd40..2a5599b7d8 100644 ---- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c -+++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c -@@ -406,7 +406,7 @@ ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer) - resp_param->result = ipsec_mb_qp_release(dev, qp_id); + if (unlikely(!strm)) { + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; +- ZLIB_PMD_ERR("Invalid z_stream\n"); ++ ZLIB_PMD_ERR("Invalid z_stream"); + return; + } + /* Update z_stream with the inputs provided by application */ +@@ -98,7 +98,7 @@ def_end: + op->produced += strm->total_out; break; default: -- CDEV_LOG_ERR("invalid mp request type\n"); -+ CDEV_LOG_ERR("invalid mp request type"); +- ZLIB_PMD_ERR("stats not updated for status:%d\n", ++ ZLIB_PMD_ERR("stats not updated for status:%d", + op->status); } - out: -diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c -index 4de4866cf3..80de25c65b 100644 ---- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c -+++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c -@@ -1500,7 +1500,7 @@ aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job, - * - * @return - * - 0 on success, the IMB_JOB will be filled -- * - -1 if invalid session or errors allocationg SGL linear buffer, -+ * - -1 if invalid session or errors allocating SGL linear buffer, - * IMB_JOB will not be filled - */ - static inline int -diff --git a/dpdk/drivers/crypto/openssl/compat.h b/dpdk/drivers/crypto/openssl/compat.h -index 9f9167c4f1..e1814fea8c 100644 ---- a/dpdk/drivers/crypto/openssl/compat.h -+++ b/dpdk/drivers/crypto/openssl/compat.h -@@ -5,6 +5,32 @@ - #ifndef __RTA_COMPAT_H__ - #define __RTA_COMPAT_H__ - -+#if OPENSSL_VERSION_NUMBER >= 0x30000000L -+static __rte_always_inline void -+free_hmac_ctx(EVP_MAC_CTX *ctx) -+{ -+ EVP_MAC_CTX_free(ctx); -+} -+ -+static __rte_always_inline void -+free_cmac_ctx(EVP_MAC_CTX *ctx) -+{ -+ EVP_MAC_CTX_free(ctx); -+} -+#else -+static __rte_always_inline void -+free_hmac_ctx(HMAC_CTX *ctx) -+{ -+ HMAC_CTX_free(ctx); -+} -+ -+static __rte_always_inline void -+free_cmac_ctx(CMAC_CTX *ctx) -+{ -+ CMAC_CTX_free(ctx); -+} -+#endif -+ - #if (OPENSSL_VERSION_NUMBER < 0x10100000L) - - static __rte_always_inline int -diff --git a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h -index 334912d335..aa3f466e74 100644 ---- a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h -+++ b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h -@@ -80,6 +80,20 @@ struct openssl_qp { - */ - } __rte_cache_aligned; - -+struct evp_ctx_pair { -+ EVP_CIPHER_CTX *cipher; -+ union { -+ EVP_MD_CTX *auth; -+#if OPENSSL_VERSION_NUMBER >= 0x30000000L -+ EVP_MAC_CTX *hmac; -+ EVP_MAC_CTX *cmac; -+#else -+ HMAC_CTX *hmac; -+ CMAC_CTX *cmac; -+#endif -+ }; -+}; -+ - /** OPENSSL crypto private session structure */ - struct openssl_session { - enum openssl_chain_order chain_order; -@@ -166,6 +180,15 @@ struct openssl_session { - /**< digest length */ - } auth; +@@ -114,7 +114,7 @@ process_zlib_inflate(struct rte_comp_op *op, z_stream *strm) -+ uint16_t ctx_copies_len; -+ /* < number of entries in ctx_copies */ -+ struct evp_ctx_pair qp_ctx[]; -+ /**< Flexible array member of per-queue-pair structures, each containing -+ * pointers to copies of the cipher and auth EVP contexts. Cipher -+ * contexts are not safe to use from multiple cores simultaneously, so -+ * maintaining these copies allows avoiding per-buffer copying into a -+ * temporary context. -+ */ - } __rte_cache_aligned; + if (unlikely(!strm)) { + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; +- ZLIB_PMD_ERR("Invalid z_stream\n"); ++ ZLIB_PMD_ERR("Invalid z_stream"); + return; + } + strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *, +@@ -184,7 +184,7 @@ inf_end: + op->produced += strm->total_out; + break; + default: +- ZLIB_PMD_ERR("stats not produced for status:%d\n", ++ ZLIB_PMD_ERR("stats not produced for status:%d", + op->status); + } - /** OPENSSL crypto private asymmetric session structure */ -@@ -217,7 +240,8 @@ struct openssl_asym_session { - /** Set and validate OPENSSL crypto session parameters */ - extern int - openssl_set_session_parameters(struct openssl_session *sess, -- const struct rte_crypto_sym_xform *xform); -+ const struct rte_crypto_sym_xform *xform, -+ uint16_t nb_queue_pairs); +@@ -203,7 +203,7 @@ process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op) + (op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) { + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; + ZLIB_PMD_ERR("Invalid source or destination buffers or " +- "invalid Operation requested\n"); ++ "invalid Operation requested"); + } else { + private_xform = (struct zlib_priv_xform *)op->private_xform; + stream = &private_xform->stream; +@@ -238,7 +238,7 @@ zlib_set_stream_parameters(const struct rte_comp_xform *xform, + wbits = -(xform->compress.window_size); + break; + default: +- ZLIB_PMD_ERR("Compression algorithm not supported\n"); ++ ZLIB_PMD_ERR("Compression algorithm not supported"); + return -1; + } + /** Compression Level */ +@@ -260,7 +260,7 @@ zlib_set_stream_parameters(const struct rte_comp_xform *xform, + if (level < RTE_COMP_LEVEL_MIN || + level > RTE_COMP_LEVEL_MAX) { + ZLIB_PMD_ERR("Compression level %d " +- "not supported\n", ++ "not supported", + level); + return -1; + } +@@ -278,13 +278,13 @@ zlib_set_stream_parameters(const struct rte_comp_xform *xform, + strategy = Z_DEFAULT_STRATEGY; + break; + default: +- ZLIB_PMD_ERR("Compression strategy not supported\n"); ++ ZLIB_PMD_ERR("Compression strategy not supported"); + return -1; + } + if (deflateInit2(strm, level, + Z_DEFLATED, wbits, + DEF_MEM_LEVEL, strategy) != Z_OK) { +- ZLIB_PMD_ERR("Deflate init failed\n"); ++ ZLIB_PMD_ERR("Deflate init failed"); + return -1; + } + break; +@@ -298,12 +298,12 @@ zlib_set_stream_parameters(const struct rte_comp_xform *xform, + wbits = -(xform->decompress.window_size); + break; + default: +- ZLIB_PMD_ERR("Compression algorithm not supported\n"); ++ ZLIB_PMD_ERR("Compression algorithm not supported"); + return -1; + } - /** Reset OPENSSL crypto session parameters */ - extern void -diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -index e8cb09defc..101111e85b 100644 ---- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -+++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -@@ -350,7 +350,8 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen, - static int - openssl_set_sess_aead_enc_param(struct openssl_session *sess, - enum rte_crypto_aead_algorithm algo, -- uint8_t tag_len, const uint8_t *key) -+ uint8_t tag_len, const uint8_t *key, -+ EVP_CIPHER_CTX **ctx) - { - int iv_type = 0; - unsigned int do_ccm; -@@ -378,7 +379,7 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, + if (inflateInit2(strm, wbits) != Z_OK) { +- ZLIB_PMD_ERR("Inflate init failed\n"); ++ ZLIB_PMD_ERR("Inflate init failed"); + return -1; + } + break; +@@ -395,7 +395,7 @@ zlib_probe(struct rte_vdev_device *vdev) + retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args); + if (retval < 0) { + ZLIB_PMD_LOG(ERR, +- "Failed to parse initialisation arguments[%s]\n", ++ "Failed to parse initialisation arguments[%s]", + input_args); + return -EINVAL; } +diff --git a/dpdk/drivers/compress/zlib/zlib_pmd_ops.c b/dpdk/drivers/compress/zlib/zlib_pmd_ops.c +index 445a3baa67..a530d15119 100644 +--- a/dpdk/drivers/compress/zlib/zlib_pmd_ops.c ++++ b/dpdk/drivers/compress/zlib/zlib_pmd_ops.c +@@ -48,8 +48,8 @@ zlib_pmd_config(struct rte_compressdev *dev, + NULL, config->socket_id, + 0); + if (mp == NULL) { +- ZLIB_PMD_ERR("Cannot create private xform pool on " +- "socket %d\n", config->socket_id); ++ ZLIB_PMD_ERR("Cannot create private xform pool on socket %d", ++ config->socket_id); + return -ENOMEM; + } + internals->mp = mp; +diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_device.c b/dpdk/drivers/crypto/bcmfs/bcmfs_device.c +index ada7ba342c..46522970d5 100644 +--- a/dpdk/drivers/crypto/bcmfs/bcmfs_device.c ++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_device.c +@@ -139,7 +139,7 @@ fsdev_allocate_one_dev(struct rte_vdev_device *vdev, + return fsdev; - sess->cipher.mode = OPENSSL_CIPHER_LIB; -- sess->cipher.ctx = EVP_CIPHER_CTX_new(); -+ *ctx = EVP_CIPHER_CTX_new(); - - if (get_aead_algo(algo, sess->cipher.key.length, - &sess->cipher.evp_algo) != 0) -@@ -388,19 +389,19 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, + cleanup: +- free(fsdev); ++ rte_free(fsdev); - sess->chain_order = OPENSSL_CHAIN_COMBINED; + return NULL; + } +@@ -163,7 +163,7 @@ fsdev_release(struct bcmfs_device *fsdev) + return; -- if (EVP_EncryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo, -+ if (EVP_EncryptInit_ex(*ctx, sess->cipher.evp_algo, - NULL, NULL, NULL) <= 0) - return -EINVAL; + TAILQ_REMOVE(&fsdev_list, fsdev, next); +- free(fsdev); ++ rte_free(fsdev); + } -- if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, sess->iv.length, -+ if (EVP_CIPHER_CTX_ctrl(*ctx, iv_type, sess->iv.length, - NULL) <= 0) - return -EINVAL; + static int +diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_qp.c b/dpdk/drivers/crypto/bcmfs/bcmfs_qp.c +index d1ede5e990..59e39a6c14 100644 +--- a/dpdk/drivers/crypto/bcmfs/bcmfs_qp.c ++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_qp.c +@@ -142,7 +142,7 @@ bcmfs_queue_create(struct bcmfs_queue *queue, + + if (bcmfs_qp_check_queue_alignment(qp_mz->iova, align)) { + BCMFS_LOG(ERR, "Invalid alignment on queue create " +- " 0x%" PRIx64 "\n", ++ " 0x%" PRIx64, + queue->base_phys_addr); + ret = -EFAULT; + goto queue_create_err; +diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_sym_pmd.c b/dpdk/drivers/crypto/bcmfs/bcmfs_sym_pmd.c +index 78272d616c..d3b1e25d57 100644 +--- a/dpdk/drivers/crypto/bcmfs/bcmfs_sym_pmd.c ++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_sym_pmd.c +@@ -217,7 +217,7 @@ bcmfs_sym_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id, + bcmfs_private->fsdev->qps_in_use[qp_id] = *qp_addr; + + cdev->data->queue_pairs[qp_id] = qp; +- BCMFS_LOG(NOTICE, "queue %d setup done\n", qp_id); ++ BCMFS_LOG(NOTICE, "queue %d setup done", qp_id); - if (do_ccm) -- EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG, -+ EVP_CIPHER_CTX_ctrl(*ctx, EVP_CTRL_CCM_SET_TAG, - tag_len, NULL); + return 0; + } +diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_sym_session.c b/dpdk/drivers/crypto/bcmfs/bcmfs_sym_session.c +index 40813d1fe5..64bd4a317a 100644 +--- a/dpdk/drivers/crypto/bcmfs/bcmfs_sym_session.c ++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_sym_session.c +@@ -192,7 +192,7 @@ crypto_set_session_parameters(struct bcmfs_sym_session *sess, + rc = -EINVAL; + break; + default: +- BCMFS_DP_LOG(ERR, "Invalid chain order\n"); ++ BCMFS_DP_LOG(ERR, "Invalid chain order"); + rc = -EINVAL; + break; + } +diff --git a/dpdk/drivers/crypto/caam_jr/caam_jr.c b/dpdk/drivers/crypto/caam_jr/caam_jr.c +index b55258689b..1713600db7 100644 +--- a/dpdk/drivers/crypto/caam_jr/caam_jr.c ++++ b/dpdk/drivers/crypto/caam_jr/caam_jr.c +@@ -309,7 +309,7 @@ caam_jr_prep_cdb(struct caam_jr_session *ses) -- if (EVP_EncryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0) -+ if (EVP_EncryptInit_ex(*ctx, NULL, NULL, key, NULL) <= 0) - return -EINVAL; + cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb)); + if (!cdb) { +- CAAM_JR_ERR("failed to allocate memory for cdb\n"); ++ CAAM_JR_ERR("failed to allocate memory for cdb"); + return -1; + } - return 0; -@@ -410,7 +411,8 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, - static int - openssl_set_sess_aead_dec_param(struct openssl_session *sess, - enum rte_crypto_aead_algorithm algo, -- uint8_t tag_len, const uint8_t *key) -+ uint8_t tag_len, const uint8_t *key, -+ EVP_CIPHER_CTX **ctx) - { - int iv_type = 0; - unsigned int do_ccm = 0; -@@ -437,7 +439,7 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, +@@ -606,7 +606,7 @@ hw_poll_job_ring(struct sec_job_ring_t *job_ring, + /*TODO for multiple ops, packets*/ + ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes); + if (unlikely(sec_error_code)) { +- CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n", ++ CAAM_JR_ERR("desc at cidx %d generated error 0x%x", + job_ring->cidx, sec_error_code); + hw_handle_job_ring_error(job_ring, sec_error_code); + //todo improve with exact errors +@@ -1368,7 +1368,7 @@ caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp) + } + + if (unlikely(!ses->qp || ses->qp != qp)) { +- CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp); ++ CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p", ses->qp, qp); + ses->qp = qp; + caam_jr_prep_cdb(ses); + } +@@ -1554,7 +1554,7 @@ caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused, + session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, + RTE_CACHE_LINE_SIZE); + if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { +- CAAM_JR_ERR("No Memory for cipher key\n"); ++ CAAM_JR_ERR("No Memory for cipher key"); + return -ENOMEM; + } + session->cipher_key.length = xform->cipher.key.length; +@@ -1576,7 +1576,7 @@ caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused, + session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, + RTE_CACHE_LINE_SIZE); + if (session->auth_key.data == NULL && xform->auth.key.length > 0) { +- CAAM_JR_ERR("No Memory for auth key\n"); ++ CAAM_JR_ERR("No Memory for auth key"); + return -ENOMEM; + } + session->auth_key.length = xform->auth.key.length; +@@ -1602,7 +1602,7 @@ caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused, + session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, + RTE_CACHE_LINE_SIZE); + if (session->aead_key.data == NULL && xform->aead.key.length > 0) { +- CAAM_JR_ERR("No Memory for aead key\n"); ++ CAAM_JR_ERR("No Memory for aead key"); + return -ENOMEM; + } + session->aead_key.length = xform->aead.key.length; +@@ -1755,7 +1755,7 @@ caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, + RTE_CACHE_LINE_SIZE); + if (session->cipher_key.data == NULL && + cipher_xform->key.length > 0) { +- CAAM_JR_ERR("No Memory for cipher key\n"); ++ CAAM_JR_ERR("No Memory for cipher key"); + return -ENOMEM; } - sess->cipher.mode = OPENSSL_CIPHER_LIB; -- sess->cipher.ctx = EVP_CIPHER_CTX_new(); -+ *ctx = EVP_CIPHER_CTX_new(); +@@ -1765,7 +1765,7 @@ caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, + RTE_CACHE_LINE_SIZE); + if (session->auth_key.data == NULL && + auth_xform->key.length > 0) { +- CAAM_JR_ERR("No Memory for auth key\n"); ++ CAAM_JR_ERR("No Memory for auth key"); + rte_free(session->cipher_key.data); + return -ENOMEM; + } +@@ -1810,11 +1810,11 @@ caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, + case RTE_CRYPTO_AUTH_KASUMI_F9: + case RTE_CRYPTO_AUTH_AES_CBC_MAC: + case RTE_CRYPTO_AUTH_ZUC_EIA3: +- CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n", ++ CAAM_JR_ERR("Crypto: Unsupported auth alg %u", + auth_xform->algo); + goto out; + default: +- CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n", ++ CAAM_JR_ERR("Crypto: Undefined Auth specified %u", + auth_xform->algo); + goto out; + } +@@ -1834,11 +1834,11 @@ caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, + case RTE_CRYPTO_CIPHER_3DES_ECB: + case RTE_CRYPTO_CIPHER_AES_ECB: + case RTE_CRYPTO_CIPHER_KASUMI_F8: +- CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n", ++ CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u", + cipher_xform->algo); + goto out; + default: +- CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n", ++ CAAM_JR_ERR("Crypto: Undefined Cipher specified %u", + cipher_xform->algo); + goto out; + } +@@ -1962,7 +1962,7 @@ caam_jr_dev_configure(struct rte_cryptodev *dev, + NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (!internals->ctx_pool) { +- CAAM_JR_ERR("%s create failed\n", str); ++ CAAM_JR_ERR("%s create failed", str); + return -ENOMEM; + } + } else +@@ -2180,7 +2180,7 @@ init_job_ring(void *reg_base_addr, int irq_id) + } + } + if (job_ring == NULL) { +- CAAM_JR_ERR("No free job ring\n"); ++ CAAM_JR_ERR("No free job ring"); + return NULL; + } - if (get_aead_algo(algo, sess->cipher.key.length, - &sess->cipher.evp_algo) != 0) -@@ -447,24 +449,46 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, +@@ -2301,7 +2301,7 @@ caam_jr_dev_init(const char *name, + job_ring->uio_fd); - sess->chain_order = OPENSSL_CHAIN_COMBINED; + if (!dev->data->dev_private) { +- CAAM_JR_ERR("Ring memory allocation failed\n"); ++ CAAM_JR_ERR("Ring memory allocation failed"); + goto cleanup2; + } -- if (EVP_DecryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo, -+ if (EVP_DecryptInit_ex(*ctx, sess->cipher.evp_algo, - NULL, NULL, NULL) <= 0) - return -EINVAL; +@@ -2334,7 +2334,7 @@ caam_jr_dev_init(const char *name, + security_instance = rte_malloc("caam_jr", + sizeof(struct rte_security_ctx), 0); + if (security_instance == NULL) { +- CAAM_JR_ERR("memory allocation failed\n"); ++ CAAM_JR_ERR("memory allocation failed"); + //todo error handling. + goto cleanup2; + } +diff --git a/dpdk/drivers/crypto/caam_jr/caam_jr_uio.c b/dpdk/drivers/crypto/caam_jr/caam_jr_uio.c +index 583ba3b523..acb40bdf77 100644 +--- a/dpdk/drivers/crypto/caam_jr/caam_jr_uio.c ++++ b/dpdk/drivers/crypto/caam_jr/caam_jr_uio.c +@@ -338,7 +338,7 @@ free_job_ring(int uio_fd) + } -- if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, -+ if (EVP_CIPHER_CTX_ctrl(*ctx, iv_type, - sess->iv.length, NULL) <= 0) - return -EINVAL; + if (job_ring == NULL) { +- CAAM_JR_ERR("JR not available for fd = %x\n", uio_fd); ++ CAAM_JR_ERR("JR not available for fd = %x", uio_fd); + return; + } - if (do_ccm) -- EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG, -+ EVP_CIPHER_CTX_ctrl(*ctx, EVP_CTRL_CCM_SET_TAG, - tag_len, NULL); +@@ -378,7 +378,7 @@ uio_job_ring *config_job_ring(void) + } -- if (EVP_DecryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0) -+ if (EVP_DecryptInit_ex(*ctx, NULL, NULL, key, NULL) <= 0) - return -EINVAL; + if (job_ring == NULL) { +- CAAM_JR_ERR("No free job ring\n"); ++ CAAM_JR_ERR("No free job ring"); + return NULL; + } - return 0; - } +@@ -441,7 +441,7 @@ sec_configure(void) + dir->d_name, "name", uio_name); + CAAM_JR_INFO("sec device uio name: %s", uio_name); + if (ret != 0) { +- CAAM_JR_ERR("file_read_first_line failed\n"); ++ CAAM_JR_ERR("file_read_first_line failed"); + closedir(d); + return -1; + } +diff --git a/dpdk/drivers/crypto/ccp/ccp_dev.c b/dpdk/drivers/crypto/ccp/ccp_dev.c +index b7ca3af5a4..6d42b92d8b 100644 +--- a/dpdk/drivers/crypto/ccp/ccp_dev.c ++++ b/dpdk/drivers/crypto/ccp/ccp_dev.c +@@ -362,7 +362,7 @@ ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + weight++; + +- CCP_LOG_DBG("Queue %d can access %d LSB regions of mask %lu\n", ++ CCP_LOG_DBG("Queue %d can access %d LSB regions of mask %lu", + (int)cmd_q->id, weight, cmd_q->lsbmask); + + return weight ? 0 : -EINVAL; +diff --git a/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c b/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c +index a5271d7227..c92fdb446d 100644 +--- a/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c ++++ b/dpdk/drivers/crypto/ccp/rte_ccp_pmd.c +@@ -228,7 +228,7 @@ cryptodev_ccp_create(const char *name, + } + cryptodev_cnt++; + +- CCP_LOG_DBG("CCP : Crypto device count = %d\n", cryptodev_cnt); ++ CCP_LOG_DBG("CCP : Crypto device count = %d", cryptodev_cnt); + dev->device = &pci_dev->device; + dev->device->driver = &pci_drv->driver; + dev->driver_id = ccp_cryptodev_driver_id; +diff --git a/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c +index 997110e3d3..c96cf2b3a1 100644 +--- a/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c ++++ b/dpdk/drivers/crypto/cnxk/cn10k_cryptodev_ops.c +@@ -861,15 +861,20 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, -+#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30200000L) -+static int openssl_aesni_ctx_clone(EVP_CIPHER_CTX **dest, -+ struct openssl_session *sess) -+{ -+ /* OpenSSL versions 3.0.0 <= V < 3.2.0 have no dupctx() implementation -+ * for AES-GCM and AES-CCM. In this case, we have to create new empty -+ * contexts and initialise, as we did the original context. -+ */ -+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) -+ sess->aead_algo = RTE_CRYPTO_AEAD_AES_GCM; + return; + } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC && +- cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION && +- cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) { +- if (likely(compcode == CPT_COMP_GOOD)) { +- if (uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) { +- cop->status = RTE_CRYPTO_OP_STATUS_ERROR; +- return; +- } else if (uc_compcode == ROC_AE_ERR_ECC_PAI) { +- cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +- return; ++ cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { ++ struct cnxk_ae_sess *sess; + -+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) -+ return openssl_set_sess_aead_enc_param(sess, sess->aead_algo, -+ sess->auth.digest_length, sess->cipher.key.data, -+ dest); -+ else -+ return openssl_set_sess_aead_dec_param(sess, sess->aead_algo, -+ sess->auth.digest_length, sess->cipher.key.data, -+ dest); -+} -+#endif -+ - /** Set session cipher parameters */ - static int - openssl_set_session_cipher_parameters(struct openssl_session *sess, -@@ -521,6 +545,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, - sess->cipher.key.length, - sess->cipher.key.data) != 0) - return -EINVAL; ++ sess = (struct cnxk_ae_sess *)cop->asym->session; ++ if (sess->xfrm_type == RTE_CRYPTO_ASYM_XFORM_ECDH && ++ cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) { ++ if (likely(compcode == CPT_COMP_GOOD)) { ++ if (uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) { ++ cop->status = RTE_CRYPTO_OP_STATUS_ERROR; ++ return; ++ } else if (uc_compcode == ROC_AE_ERR_ECC_PAI) { ++ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; ++ return; ++ } + } + } + } +diff --git a/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c +index 34d40b07d4..eb5575b7ec 100644 +--- a/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c ++++ b/dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c +@@ -578,7 +578,22 @@ cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop, + if (unlikely(res->uc_compcode)) { + if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE) + cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; +- else ++ else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC && ++ cop->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { ++ struct cnxk_ae_sess *sess; + ++ sess = (struct cnxk_ae_sess *)cop->asym->session; ++ if (sess->xfrm_type == RTE_CRYPTO_ASYM_XFORM_ECDH && ++ cop->asym->ecdh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_VERIFY) { ++ if (res->uc_compcode == ROC_AE_ERR_ECC_POINT_NOT_ON_CURVE) { ++ cop->status = RTE_CRYPTO_OP_STATUS_ERROR; ++ return; ++ } else if (res->uc_compcode == ROC_AE_ERR_ECC_PAI) { ++ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; ++ return; ++ } ++ } ++ } else + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + + plt_dp_info("Request failed with microcode error"); +diff --git a/dpdk/drivers/crypto/cnxk/cnxk_ae.h b/dpdk/drivers/crypto/cnxk/cnxk_ae.h +index ea11e093bf..ef9cb5eb91 100644 +--- a/dpdk/drivers/crypto/cnxk/cnxk_ae.h ++++ b/dpdk/drivers/crypto/cnxk/cnxk_ae.h +@@ -49,13 +49,22 @@ struct cnxk_ae_sess { + }; + + static __rte_always_inline void +-cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len) ++cnxk_ae_modex_param_normalize(uint8_t **data, size_t *len, size_t max) + { ++ uint8_t msw_len = *len % 8; ++ uint64_t msw_val = 0; + size_t i; + +- /* Strip leading NUL bytes */ +- for (i = 0; i < *len; i++) { +- if ((*data)[i] != 0) ++ if (*len <= 8) ++ return; + -+ /* We use 3DES encryption also for decryption. -+ * IV is not important for 3DES ECB. -+ */ -+ if (EVP_EncryptInit_ex(sess->cipher.ctx, EVP_des_ede3_ecb(), -+ NULL, sess->cipher.key.data, NULL) != 1) -+ return -EINVAL; ++ memcpy(&msw_val, *data, msw_len); ++ if (msw_val != 0) ++ return; + - break; - - case RTE_CRYPTO_CIPHER_DES_CBC: -@@ -586,6 +619,8 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, - return -ENOTSUP; ++ for (i = msw_len; i < *len && (*len - i) < max; i += 8) { ++ memcpy(&msw_val, &(*data)[i], 8); ++ if (msw_val != 0) + break; } + *data += i; +@@ -72,8 +81,8 @@ cnxk_ae_fill_modex_params(struct cnxk_ae_sess *sess, + uint8_t *exp = xform->modex.exponent.data; + uint8_t *mod = xform->modex.modulus.data; -+ EVP_CIPHER_CTX_set_padding(sess->cipher.ctx, 0); -+ - return 0; - } +- cnxk_ae_modex_param_normalize(&mod, &mod_len); +- cnxk_ae_modex_param_normalize(&exp, &exp_len); ++ cnxk_ae_modex_param_normalize(&mod, &mod_len, SIZE_MAX); ++ cnxk_ae_modex_param_normalize(&exp, &exp_len, mod_len); -@@ -623,12 +658,14 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, - return openssl_set_sess_aead_enc_param(sess, - RTE_CRYPTO_AEAD_AES_GCM, - xform->auth.digest_length, -- xform->auth.key.data); -+ xform->auth.key.data, -+ &sess->cipher.ctx); - else - return openssl_set_sess_aead_dec_param(sess, - RTE_CRYPTO_AEAD_AES_GCM, - xform->auth.digest_length, -- xform->auth.key.data); -+ xform->auth.key.data, -+ &sess->cipher.ctx); - break; + if (unlikely(exp_len == 0 || mod_len == 0)) + return -EINVAL; +@@ -205,16 +214,22 @@ cnxk_ae_fill_ec_params(struct cnxk_ae_sess *sess, + return 0; - case RTE_CRYPTO_AUTH_MD5: -@@ -770,16 +807,19 @@ openssl_set_session_aead_parameters(struct openssl_session *sess, - /* Select cipher direction */ - if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) - return openssl_set_sess_aead_enc_param(sess, xform->aead.algo, -- xform->aead.digest_length, xform->aead.key.data); -+ xform->aead.digest_length, xform->aead.key.data, -+ &sess->cipher.ctx); - else - return openssl_set_sess_aead_dec_param(sess, xform->aead.algo, -- xform->aead.digest_length, xform->aead.key.data); -+ xform->aead.digest_length, xform->aead.key.data, -+ &sess->cipher.ctx); - } + ec->pkey.length = xform->ec.pkey.length; +- if (xform->ec.pkey.length) +- rte_memcpy(ec->pkey.data, xform->ec.pkey.data, xform->ec.pkey.length); ++ if (ec->pkey.length > ROC_AE_EC_DATA_MAX) ++ ec->pkey.length = ROC_AE_EC_DATA_MAX; ++ if (ec->pkey.length) ++ rte_memcpy(ec->pkey.data, xform->ec.pkey.data, ec->pkey.length); - /** Parse crypto xform chain and set private session parameters */ - int - openssl_set_session_parameters(struct openssl_session *sess, -- const struct rte_crypto_sym_xform *xform) -+ const struct rte_crypto_sym_xform *xform, -+ uint16_t nb_queue_pairs) - { - const struct rte_crypto_sym_xform *cipher_xform = NULL; - const struct rte_crypto_sym_xform *auth_xform = NULL; -@@ -841,6 +881,12 @@ openssl_set_session_parameters(struct openssl_session *sess, - } - } + ec->q.x.length = xform->ec.q.x.length; +- if (xform->ec.q.x.length) +- rte_memcpy(ec->q.x.data, xform->ec.q.x.data, xform->ec.q.x.length); ++ if (ec->q.x.length > ROC_AE_EC_DATA_MAX) ++ ec->q.x.length = ROC_AE_EC_DATA_MAX; ++ if (ec->q.x.length) ++ rte_memcpy(ec->q.x.data, xform->ec.q.x.data, ec->q.x.length); + + ec->q.y.length = xform->ec.q.y.length; ++ if (ec->q.y.length > ROC_AE_EC_DATA_MAX) ++ ec->q.y.length = ROC_AE_EC_DATA_MAX; + if (xform->ec.q.y.length) +- rte_memcpy(ec->q.y.data, xform->ec.q.y.data, xform->ec.q.y.length); ++ rte_memcpy(ec->q.y.data, xform->ec.q.y.data, ec->q.y.length); -+ /* -+ * With only one queue pair, the array of copies is not needed. -+ * Otherwise, one entry per queue pair is required. -+ */ -+ sess->ctx_copies_len = nb_queue_pairs > 1 ? nb_queue_pairs : 0; -+ return 0; } +@@ -282,7 +297,7 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf, + struct rte_crypto_mod_op_param mod_op; + uint64_t total_key_len; + union cpt_inst_w4 w4; +- uint32_t base_len; ++ size_t base_len; + uint32_t dlen; + uint8_t *dptr; -@@ -848,33 +894,45 @@ openssl_set_session_parameters(struct openssl_session *sess, - void - openssl_reset_session(struct openssl_session *sess) - { -- EVP_CIPHER_CTX_free(sess->cipher.ctx); -+ /* Free all the qp_ctx entries. */ -+ for (uint16_t i = 0; i < sess->ctx_copies_len; i++) { -+ if (sess->qp_ctx[i].cipher != NULL) { -+ EVP_CIPHER_CTX_free(sess->qp_ctx[i].cipher); -+ sess->qp_ctx[i].cipher = NULL; -+ } +@@ -290,8 +305,11 @@ cnxk_ae_modex_prep(struct rte_crypto_op *op, struct roc_ae_buf_ptr *meta_buf, -- if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI) -- EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx); -+ switch (sess->auth.mode) { -+ case OPENSSL_AUTH_AS_AUTH: -+ EVP_MD_CTX_destroy(sess->qp_ctx[i].auth); -+ sess->qp_ctx[i].auth = NULL; -+ break; -+ case OPENSSL_AUTH_AS_HMAC: -+ free_hmac_ctx(sess->qp_ctx[i].hmac); -+ sess->qp_ctx[i].hmac = NULL; -+ break; -+ case OPENSSL_AUTH_AS_CMAC: -+ free_cmac_ctx(sess->qp_ctx[i].cmac); -+ sess->qp_ctx[i].cmac = NULL; -+ break; + base_len = mod_op.base.length; + if (unlikely(base_len > mod_len)) { +- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; +- return -ENOTSUP; ++ cnxk_ae_modex_param_normalize(&mod_op.base.data, &base_len, mod_len); ++ if (base_len > mod_len) { ++ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; ++ return -ENOTSUP; + } -+ } -+ -+ EVP_CIPHER_CTX_free(sess->cipher.ctx); - - switch (sess->auth.mode) { - case OPENSSL_AUTH_AS_AUTH: - EVP_MD_CTX_destroy(sess->auth.auth.ctx); - break; - case OPENSSL_AUTH_AS_HMAC: -- EVP_PKEY_free(sess->auth.hmac.pkey); --# if OPENSSL_VERSION_NUMBER >= 0x30000000L -- EVP_MAC_CTX_free(sess->auth.hmac.ctx); --# else -- HMAC_CTX_free(sess->auth.hmac.ctx); --# endif -+ free_hmac_ctx(sess->auth.hmac.ctx); - break; - case OPENSSL_AUTH_AS_CMAC: --# if OPENSSL_VERSION_NUMBER >= 0x30000000L -- EVP_MAC_CTX_free(sess->auth.cmac.ctx); --# else -- CMAC_CTX_free(sess->auth.cmac.ctx); --# endif -- break; -- default: -+ free_cmac_ctx(sess->auth.cmac.ctx); - break; } -+ -+ if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI) -+ EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx); - } - /** Provide session for operation */ -@@ -914,7 +972,7 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op) - sess = (struct openssl_session *)_sess->driver_priv_data; + total_key_len = mod_len + exp_len; +@@ -735,7 +753,11 @@ cnxk_ae_sm2_sign_prep(struct rte_crypto_sm2_op_param *sm2, + uint8_t *dptr; - if (unlikely(openssl_set_session_parameters(sess, -- op->sym->xform) != 0)) { -+ op->sym->xform, 1) != 0)) { - rte_mempool_put(qp->sess_mp, _sess); - sess = NULL; - } -@@ -1068,8 +1126,6 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, - if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) - goto process_cipher_encrypt_err; + prime_len = ec_grp->prime.length; ++ if (prime_len > ROC_AE_EC_DATA_MAX) ++ prime_len = ROC_AE_EC_DATA_MAX; + order_len = ec_grp->order.length; ++ if (order_len > ROC_AE_EC_DATA_MAX) ++ order_len = ROC_AE_EC_DATA_MAX; -- EVP_CIPHER_CTX_set_padding(ctx, 0); -- - if (process_openssl_encryption_update(mbuf_src, offset, &dst, - srclen, ctx, inplace)) - goto process_cipher_encrypt_err; -@@ -1118,8 +1174,6 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, - if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) - goto process_cipher_decrypt_err; + /* Truncate input length to curve prime length */ + if (message_len > prime_len) +@@ -822,7 +844,11 @@ cnxk_ae_sm2_verify_prep(struct rte_crypto_sm2_op_param *sm2, + uint8_t *dptr; -- EVP_CIPHER_CTX_set_padding(ctx, 0); -- - if (process_openssl_decryption_update(mbuf_src, offset, &dst, - srclen, ctx, inplace)) - goto process_cipher_decrypt_err; -@@ -1136,8 +1190,7 @@ process_cipher_decrypt_err: - /** Process cipher des 3 ctr encryption, decryption algorithm */ - static int - process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, -- int offset, uint8_t *iv, uint8_t *key, int srclen, -- EVP_CIPHER_CTX *ctx) -+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx) + prime_len = ec_grp->prime.length; ++ if (prime_len > ROC_AE_EC_DATA_MAX) ++ prime_len = ROC_AE_EC_DATA_MAX; + order_len = ec_grp->order.length; ++ if (order_len > ROC_AE_EC_DATA_MAX) ++ order_len = ROC_AE_EC_DATA_MAX; + + /* Truncate input length to curve prime length */ + if (message_len > prime_len) +diff --git a/dpdk/drivers/crypto/cnxk/cnxk_se.h b/dpdk/drivers/crypto/cnxk/cnxk_se.h +index c2a807fa94..cf163e0208 100644 +--- a/dpdk/drivers/crypto/cnxk/cnxk_se.h ++++ b/dpdk/drivers/crypto/cnxk/cnxk_se.h +@@ -1952,7 +1952,7 @@ fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess) + sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT; + sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY; + } else { +- plt_dp_err("Unknown aead operation\n"); ++ plt_dp_err("Unknown aead operation"); + return -1; + } + switch (aead_form->algo) { +@@ -2036,7 +2036,7 @@ fill_sm_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *ses + sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT; + sess->roc_se_ctx.template_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT; + } else { +- plt_dp_err("Unknown cipher operation\n"); ++ plt_dp_err("Unknown cipher operation"); + return -1; + } + +@@ -2113,7 +2113,7 @@ fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess) + ROC_SE_FC_MINOR_OP_HMAC_FIRST; + } + } else { +- plt_dp_err("Unknown cipher operation\n"); ++ plt_dp_err("Unknown cipher operation"); + return -1; + } + +diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +index bb5a2c629e..bd5590c02d 100644 +--- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c ++++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +@@ -1146,7 +1146,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, + + DPAA2_SEC_DP_DEBUG( + "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" +- " data_off: 0x%x\n", ++ " data_off: 0x%x", + data_offset, + data_len, + sess->iv.length, +@@ -1172,7 +1172,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, + DPAA2_SET_FLE_FIN(sge); + + DPAA2_SEC_DP_DEBUG( +- "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", ++ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d", + flc, fle, fle->addr_hi, fle->addr_lo, + fle->length); + +@@ -1212,7 +1212,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, + + DPAA2_SEC_DP_DEBUG( + "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" +- " off =%d, len =%d\n", ++ " off =%d, len =%d", + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[bpid].meta_data_size, +@@ -1292,7 +1292,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, + + DPAA2_SEC_DP_DEBUG( + "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," +- " data_off: 0x%x\n", ++ " data_off: 0x%x", + data_offset, + data_len, + sess->iv.length, +@@ -1303,7 +1303,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, + fle->length = data_len + sess->iv.length; + + DPAA2_SEC_DP_DEBUG( +- "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", ++ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", + flc, fle, fle->addr_hi, fle->addr_lo, + fle->length); + +@@ -1326,7 +1326,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, + + DPAA2_SEC_DP_DEBUG( + "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" +- " off =%d, len =%d\n", ++ " off =%d, len =%d", + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[bpid].meta_data_size, +@@ -1348,12 +1348,12 @@ build_sec_fd(struct rte_crypto_op *op, + } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) { + sess = SECURITY_GET_SESS_PRIV(op->sym->session); + } else { +- DPAA2_SEC_DP_ERR("Session type invalid\n"); ++ DPAA2_SEC_DP_ERR("Session type invalid"); + return -ENOTSUP; + } + + if (!sess) { +- DPAA2_SEC_DP_ERR("Session not available\n"); ++ DPAA2_SEC_DP_ERR("Session not available"); + return -EINVAL; + } + +@@ -1446,7 +1446,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_SEC_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -1475,7 +1475,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, + bpid = mempool_to_bpid(mb_pool); + ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp); + if (ret) { +- DPAA2_SEC_DP_DEBUG("FD build failed\n"); ++ DPAA2_SEC_DP_DEBUG("FD build failed"); + goto skip_tx; + } + ops++; +@@ -1493,7 +1493,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { + num_tx += loop; + nb_ops -= loop; +- DPAA2_SEC_DP_DEBUG("Enqueue fail\n"); ++ DPAA2_SEC_DP_DEBUG("Enqueue fail"); + /* freeing the fle buffers */ + while (loop < frames_to_send) { + free_fle(&fd_arr[loop], +@@ -1569,7 +1569,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) + + fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + +- DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", ++ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x", + fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); + + /* we are using the first FLE entry to store Mbuf. +@@ -1602,7 +1602,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp) + } + + DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," +- " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", ++ " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d", + (void *)dst, + dst->buf_addr, + DPAA2_GET_FD_ADDR(fd), +@@ -1824,7 +1824,7 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops, + bpid = mempool_to_bpid(mb_pool); + ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp); + if (ret) { +- DPAA2_SEC_DP_DEBUG("FD build failed\n"); ++ DPAA2_SEC_DP_DEBUG("FD build failed"); + goto skip_tx; + } + ops++; +@@ -1841,7 +1841,7 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops, + if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) { + num_tx += loop; + nb_ops -= loop; +- DPAA2_SEC_DP_DEBUG("Enqueue fail\n"); ++ DPAA2_SEC_DP_DEBUG("Enqueue fail"); + /* freeing the fle buffers */ + while (loop < frames_to_send) { + free_fle(&fd_arr[loop], +@@ -1884,7 +1884,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_SEC_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -1937,7 +1937,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, + status = (uint8_t)qbman_result_DQ_flags(dq_storage); + if (unlikely( + (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { +- DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); ++ DPAA2_SEC_DP_DEBUG("No frame is delivered"); + continue; + } + } +@@ -1948,7 +1948,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, + if (unlikely(fd->simple.frc)) { + /* TODO Parse SEC errors */ + if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) { +- DPAA2_SEC_DP_ERR("SEC returned Error - %x\n", ++ DPAA2_SEC_DP_ERR("SEC returned Error - %x", + fd->simple.frc); + if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP) + dpaa2_sec_dump(ops[num_rx]); +@@ -1966,7 +1966,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, + + dpaa2_qp->rx_vq.rx_pkts += num_rx; + +- DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx, ++ DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64, num_rx, + dpaa2_qp->rx_vq.err_pkts); + /*Return the total number of packets received to DPAA2 app*/ + return num_rx; +@@ -2555,7 +2555,7 @@ dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform, + #ifdef CAAM_DESC_DEBUG + int i; + for (i = 0; i < bufsize; i++) +- DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", ++ DPAA2_SEC_DEBUG("DESC[%d]:0x%x", + i, priv->flc_desc[0].desc[i]); + #endif + return ret; +@@ -3466,6 +3466,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, + } + } else { + DPAA2_SEC_ERR("Invalid crypto type"); ++ rte_free(priv); + return -EINVAL; + } + +@@ -4124,7 +4125,7 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, + cfg.dest_cfg.priority = priority; + + cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; +- cfg.user_ctx = (size_t)(qp); ++ cfg.user_ctx = (size_t)(&qp->rx_vq); + if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) { + cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION; + cfg.order_preservation_en = 1; +@@ -4275,7 +4276,7 @@ check_devargs_handler(const char *key, const char *value, + if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) { + DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not " + "supported, changing to FULL error" +- " prints\n"); ++ " prints"); + dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP; + } + } else +diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c +index 4754b9d6f8..883584a6e2 100644 +--- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c ++++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_raw_dp.c +@@ -605,7 +605,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx, + flc = &priv->flc_desc[0].flc; + + DPAA2_SEC_DP_DEBUG( +- "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d\n", ++ "RAW CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d", + data_offset, + data_len, + sess->iv.length); +@@ -642,7 +642,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx, + DPAA2_SET_FLE_FIN(sge); + + DPAA2_SEC_DP_DEBUG( +- "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", ++ "RAW CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d", + flc, fle, fle->addr_hi, fle->addr_lo, + fle->length); + +@@ -678,7 +678,7 @@ build_raw_dp_cipher_fd(uint8_t *drv_ctx, + DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); + + DPAA2_SEC_DP_DEBUG( +- "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d\n", ++ "RAW CIPHER SG: fdaddr =%" PRIx64 " off =%d, len =%d", + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); +@@ -721,7 +721,7 @@ dpaa2_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_SEC_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -811,7 +811,7 @@ sec_fd_to_userdata(const struct qbman_fd *fd) + void *userdata; + fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + +- DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", ++ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x", + fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); + userdata = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); + /* free the fle memory */ +@@ -847,7 +847,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_SEC_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -900,7 +900,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx, + status = (uint8_t)qbman_result_DQ_flags(dq_storage); + if (unlikely( + (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { +- DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); ++ DPAA2_SEC_DP_DEBUG("No frame is delivered"); + continue; + } + } +@@ -929,7 +929,7 @@ dpaa2_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx, + *dequeue_status = 1; + *n_success = num_rx; + +- DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); ++ DPAA2_SEC_DP_DEBUG("SEC Received %d Packets", num_rx); + /*Return the total number of packets received to DPAA2 app*/ + return num_rx; + } +diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c +index a301e8edb2..131cd90c94 100644 +--- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c ++++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c +@@ -102,7 +102,7 @@ ern_sec_fq_handler(struct qman_portal *qm __rte_unused, + struct qman_fq *fq, + const struct qm_mr_entry *msg) { - uint8_t ebuf[8], ctr[8]; - int unused, n; -@@ -1155,12 +1208,6 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, - src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); - l = rte_pktmbuf_data_len(m) - offset; +- DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", ++ DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x", + fq->fqid, msg->ern.rc, msg->ern.seqnum); + } -- /* We use 3DES encryption also for decryption. -- * IV is not important for 3DES ecb -- */ -- if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0) -- goto process_cipher_des3ctr_err; -- - memcpy(ctr, iv, 8); +@@ -395,10 +395,10 @@ dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses) - for (n = 0; n < srclen; n++) { -@@ -1427,6 +1474,9 @@ process_openssl_auth_mac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset, - if (m == 0) - goto process_auth_err; + cdb->sh_desc[0] = cipherdata.keylen; + cdb->sh_desc[1] = authdata.keylen; +- err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, ++ err = rta_inline_ipsec_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, + DESC_JOB_IO_LEN, + (unsigned int *)cdb->sh_desc, +- &cdb->sh_desc[2], 2); ++ &cdb->sh_desc[2], 2, authdata.algtype, 1); -+ if (EVP_MAC_init(ctx, NULL, 0, NULL) <= 0) -+ goto process_auth_err; -+ - src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); + if (err < 0) { + DPAA_SEC_ERR("Crypto: Incorrect key lengths"); +@@ -849,7 +849,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else { + if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) { +- DPAA_SEC_DP_WARN("SEC return err:0x%x\n", ++ DPAA_SEC_DP_WARN("SEC return err:0x%x", + ctx->fd_status); + if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP) + dpaa_sec_dump(ctx, qp); +@@ -1944,7 +1944,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, + } else if (unlikely(ses->qp[rte_lcore_id() % + MAX_DPAA_CORES] != qp)) { + DPAA_SEC_DP_ERR("Old:sess->qp = %p" +- " New qp = %p\n", ++ " New qp = %p", + ses->qp[rte_lcore_id() % + MAX_DPAA_CORES], qp); + frames_to_send = loop; +@@ -2054,7 +2054,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, + fd->cmd = 0x80000000 | + *((uint32_t *)((uint8_t *)op + + ses->pdcp.hfn_ovd_offset)); +- DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n", ++ DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u", + *((uint32_t *)((uint8_t *)op + + ses->pdcp.hfn_ovd_offset)), + ses->pdcp.hfn_ovd); +@@ -2095,7 +2095,7 @@ dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, + dpaa_qp->rx_pkts += num_rx; + dpaa_qp->rx_errs += nb_ops - num_rx; + +- DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); ++ DPAA_SEC_DP_DEBUG("SEC Received %d Packets", num_rx); + + return num_rx; + } +@@ -2158,7 +2158,7 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, + NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (!qp->ctx_pool) { +- DPAA_SEC_ERR("%s create failed\n", str); ++ DPAA_SEC_ERR("%s create failed", str); + return -ENOMEM; + } + } else +@@ -2459,7 +2459,7 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, + session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, + RTE_CACHE_LINE_SIZE); + if (session->aead_key.data == NULL && xform->aead.key.length > 0) { +- DPAA_SEC_ERR("No Memory for aead key\n"); ++ DPAA_SEC_ERR("No Memory for aead key"); + return -ENOMEM; + } + session->aead_key.length = xform->aead.key.length; +@@ -2508,7 +2508,7 @@ dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq) + for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) { + if (&qi->inq[i] == fq) { + if (qman_retire_fq(fq, NULL) != 0) +- DPAA_SEC_DEBUG("Queue is not retired\n"); ++ DPAA_SEC_DEBUG("Queue is not retired"); + qman_oos_fq(fq); + qi->inq_attach[i] = 0; + return 0; +@@ -3483,7 +3483,7 @@ dpaa_sec_eventq_attach(const struct rte_cryptodev *dev, + qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event; + break; + case RTE_SCHED_TYPE_ORDERED: +- DPAA_SEC_ERR("Ordered queue schedule type is not supported\n"); ++ DPAA_SEC_ERR("Ordered queue schedule type is not supported"); + return -ENOTSUP; + default: + opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK; +@@ -3582,7 +3582,7 @@ check_devargs_handler(__rte_unused const char *key, const char *value, + dpaa_sec_dp_dump = atoi(value); + if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) { + DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not " +- "supported, changing to FULL error prints\n"); ++ "supported, changing to FULL error prints"); + dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP; + } - l = rte_pktmbuf_data_len(m) - offset; -@@ -1453,11 +1503,9 @@ process_auth_final: - if (EVP_MAC_final(ctx, dst, &dstlen, DIGEST_LENGTH_MAX) != 1) - goto process_auth_err; +@@ -3645,7 +3645,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) -- EVP_MAC_CTX_free(ctx); + ret = munmap(internals->sec_hw, MAP_SIZE); + if (ret) +- DPAA_SEC_WARN("munmap failed\n"); ++ DPAA_SEC_WARN("munmap failed"); + + close(map_fd); + cryptodev->driver_id = dpaa_cryptodev_driver_id; +@@ -3713,7 +3713,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) return 0; - process_auth_err: -- EVP_MAC_CTX_free(ctx); - OPENSSL_LOG(ERR, "Process openssl auth failed"); - return -EINVAL; + init_error: +- DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); ++ DPAA_SEC_ERR("driver %s: create failed", cryptodev->data->name); + + rte_free(cryptodev->security_ctx); + return -EFAULT; +diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h +index fb895a8bc6..d298ac5b57 100644 +--- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h ++++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_log.h +@@ -29,7 +29,7 @@ extern int dpaa_logtype_sec; + + /* DP Logs, toggled out at compile time if level lower than current level */ + #define DPAA_SEC_DP_LOG(level, fmt, args...) \ +- RTE_LOG_DP(level, PMD, fmt, ## args) ++ RTE_LOG_DP(level, PMD, fmt "\n", ## args) + + #define DPAA_SEC_DP_DEBUG(fmt, args...) \ + DPAA_SEC_DP_LOG(DEBUG, fmt, ## args) +diff --git a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c +index ce49c4996f..f62c803894 100644 +--- a/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c ++++ b/dpdk/drivers/crypto/dpaa_sec/dpaa_sec_raw_dp.c +@@ -761,7 +761,7 @@ build_dpaa_raw_proto_sg(uint8_t *drv_ctx, + fd->cmd = 0x80000000 | + *((uint32_t *)((uint8_t *)userdata + + ses->pdcp.hfn_ovd_offset)); +- DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n", ++ DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u", + *((uint32_t *)((uint8_t *)userdata + + ses->pdcp.hfn_ovd_offset)), + ses->pdcp.hfn_ovd); +@@ -806,7 +806,7 @@ dpaa_sec_raw_enqueue_burst(void *qp_data, uint8_t *drv_ctx, + } else if (unlikely(ses->qp[rte_lcore_id() % + MAX_DPAA_CORES] != dpaa_qp)) { + DPAA_SEC_DP_ERR("Old:sess->qp = %p" +- " New qp = %p\n", ++ " New qp = %p", + ses->qp[rte_lcore_id() % + MAX_DPAA_CORES], dpaa_qp); + frames_to_send = loop; +@@ -955,7 +955,7 @@ dpaa_sec_raw_dequeue_burst(void *qp_data, uint8_t *drv_ctx, + *dequeue_status = 1; + *n_success = num_rx; + +- DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); ++ DPAA_SEC_DP_DEBUG("SEC Received %d Packets", num_rx); + + return num_rx; } -@@ -1569,11 +1617,151 @@ process_auth_err: - # endif - /*----------------------------------------------------------------------------*/ +diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c +index 30f919cd40..2a5599b7d8 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c ++++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_ops.c +@@ -406,7 +406,7 @@ ipsec_mb_ipc_request(const struct rte_mp_msg *mp_msg, const void *peer) + resp_param->result = ipsec_mb_qp_release(dev, qp_id); + break; + default: +- CDEV_LOG_ERR("invalid mp request type\n"); ++ CDEV_LOG_ERR("invalid mp request type"); + } -+static inline EVP_CIPHER_CTX * -+get_local_cipher_ctx(struct openssl_session *sess, struct openssl_qp *qp) + out: +diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c +index f485d130b6..0d2538832d 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c ++++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.c +@@ -165,7 +165,7 @@ ipsec_mb_create(struct rte_vdev_device *vdev, + + rte_cryptodev_pmd_probing_finish(dev); + +- IPSEC_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n", ++ IPSEC_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s", + imb_get_version_str()); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { +@@ -176,7 +176,7 @@ ipsec_mb_create(struct rte_vdev_device *vdev, + + if (retval) + IPSEC_MB_LOG(ERR, +- "IPSec Multi-buffer register MP request failed.\n"); ++ "IPSec Multi-buffer register MP request failed."); + } + return retval; + } +diff --git a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h +index 52722f94a0..252bcb3192 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h ++++ b/dpdk/drivers/crypto/ipsec_mb/ipsec_mb_private.h +@@ -198,7 +198,7 @@ alloc_init_mb_mgr(void) + IMB_MGR *mb_mgr = alloc_mb_mgr(0); + + if (unlikely(mb_mgr == NULL)) { +- IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data\n"); ++ IPSEC_MB_LOG(ERR, "Failed to allocate IMB_MGR data"); + return NULL; + } + +diff --git a/dpdk/drivers/crypto/ipsec_mb/meson.build b/dpdk/drivers/crypto/ipsec_mb/meson.build +index 87bf965554..81631d3050 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/meson.build ++++ b/dpdk/drivers/crypto/ipsec_mb/meson.build +@@ -17,7 +17,7 @@ if not lib.found() + build = false + reason = 'missing dependency, "libIPSec_MB"' + # if the lib is found, check it's the right format +-elif meson.version().version_compare('>=0.60') and not cc.links( ++elif not cc.links( + 'int main(void) {return 0;}', dependencies: lib) + build = false + reason = 'incompatible dependency, "libIPSec_MB"' +diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +index 4de4866cf3..8e74645e0a 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c ++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +@@ -107,7 +107,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + uint16_t xcbc_mac_digest_len = + get_truncated_digest_byte_length(IMB_AUTH_AES_XCBC); + if (sess->auth.req_digest_len != xcbc_mac_digest_len) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; +@@ -130,7 +130,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + get_digest_byte_length(IMB_AUTH_AES_CMAC); + + if (sess->auth.req_digest_len > cmac_digest_len) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + /* +@@ -165,7 +165,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + + if (sess->auth.req_digest_len > + get_digest_byte_length(IMB_AUTH_AES_GMAC)) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; +@@ -192,7 +192,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + sess->template_job.key_len_in_bytes = IMB_KEY_256_BYTES; + break; + default: +- IPSEC_MB_LOG(ERR, "Invalid authentication key length\n"); ++ IPSEC_MB_LOG(ERR, "Invalid authentication key length"); + return -EINVAL; + } + sess->template_job.u.GMAC._key = &sess->cipher.gcm_key; +@@ -205,7 +205,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + sess->template_job.hash_alg = IMB_AUTH_ZUC_EIA3_BITLEN; + + if (sess->auth.req_digest_len != 4) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + } else if (xform->auth.key.length == 32) { +@@ -217,11 +217,11 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + #else + if (sess->auth.req_digest_len != 4) { + #endif +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + } else { +- IPSEC_MB_LOG(ERR, "Invalid authentication key length\n"); ++ IPSEC_MB_LOG(ERR, "Invalid authentication key length"); + return -EINVAL; + } + +@@ -237,7 +237,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + get_truncated_digest_byte_length( + IMB_AUTH_SNOW3G_UIA2_BITLEN); + if (sess->auth.req_digest_len != snow3g_uia2_digest_len) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; +@@ -252,7 +252,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + uint16_t kasumi_f9_digest_len = + get_truncated_digest_byte_length(IMB_AUTH_KASUMI_UIA1); + if (sess->auth.req_digest_len != kasumi_f9_digest_len) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + sess->template_job.auth_tag_output_len_in_bytes = sess->auth.req_digest_len; +@@ -361,7 +361,7 @@ aesni_mb_set_session_auth_parameters(const IMB_MGR *mb_mgr, + + if (sess->auth.req_digest_len > full_digest_size || + sess->auth.req_digest_len == 0) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + +@@ -691,7 +691,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, + if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN || + sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN || + (sess->auth.req_digest_len & 1) == 1) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + break; +@@ -727,7 +727,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, + /* GCM digest size must be between 1 and 16 */ + if (sess->auth.req_digest_len == 0 || + sess->auth.req_digest_len > 16) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + break; +@@ -748,7 +748,7 @@ aesni_mb_set_session_aead_parameters(const IMB_MGR *mb_mgr, + sess->template_job.enc_keys = sess->cipher.expanded_aes_keys.encode; + sess->template_job.dec_keys = sess->cipher.expanded_aes_keys.decode; + if (sess->auth.req_digest_len != 16) { +- IPSEC_MB_LOG(ERR, "Invalid digest size\n"); ++ IPSEC_MB_LOG(ERR, "Invalid digest size"); + return -EINVAL; + } + break; +@@ -1200,7 +1200,7 @@ handle_sgl_linear(IMB_JOB *job, struct rte_crypto_op *op, uint32_t dst_offset, + total_len = sgl_linear_cipher_auth_len(job, &auth_len); + linear_buf = rte_zmalloc(NULL, total_len + job->auth_tag_output_len_in_bytes, 0); + if (linear_buf == NULL) { +- IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear Buffer\n"); ++ IPSEC_MB_LOG(ERR, "Error allocating memory for SGL Linear Buffer"); + return -1; + } + +@@ -1500,7 +1500,7 @@ aesni_mb_digest_appended_in_src(struct rte_crypto_op *op, IMB_JOB *job, + * + * @return + * - 0 on success, the IMB_JOB will be filled +- * - -1 if invalid session or errors allocationg SGL linear buffer, ++ * - -1 if invalid session or errors allocating SGL linear buffer, + * IMB_JOB will not be filled + */ + static inline int +diff --git a/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c b/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c +index e64df1a462..a0b354bb83 100644 +--- a/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c ++++ b/dpdk/drivers/crypto/ipsec_mb/pmd_snow3g.c +@@ -186,7 +186,7 @@ process_snow3g_cipher_op_bit(struct ipsec_mb_qp *qp, + src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + if (op->sym->m_dst == NULL) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; +- IPSEC_MB_LOG(ERR, "bit-level in-place not supported\n"); ++ IPSEC_MB_LOG(ERR, "bit-level in-place not supported"); + return 0; + } + length_in_bits = op->sym->cipher.data.length; +@@ -317,7 +317,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, + IPSEC_MB_LOG(ERR, + "PMD supports only contiguous mbufs, " + "op (%p) provides noncontiguous mbuf as " +- "source/destination buffer.\n", ops[i]); ++ "source/destination buffer.", ops[i]); + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return 0; + } +diff --git a/dpdk/drivers/crypto/mlx5/mlx5_crypto_gcm.c b/dpdk/drivers/crypto/mlx5/mlx5_crypto_gcm.c +index 8b9953b46d..9b6c8dc4d5 100644 +--- a/dpdk/drivers/crypto/mlx5/mlx5_crypto_gcm.c ++++ b/dpdk/drivers/crypto/mlx5/mlx5_crypto_gcm.c +@@ -856,7 +856,7 @@ mlx5_crypto_gcm_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op * + { + uint8_t op_code; + const uint32_t idx = qp->cq_ci & (qp->entries_n - 1); +- volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *) ++ volatile struct mlx5_error_cqe *cqe = (volatile struct mlx5_error_cqe *) + &qp->cq_obj.cqes[idx]; + + op_code = rte_be_to_cpu_32(cqe->s_wqe_opcode_qpn) >> MLX5_CQ_INDEX_WIDTH; +diff --git a/dpdk/drivers/crypto/mlx5/mlx5_crypto_xts.c b/dpdk/drivers/crypto/mlx5/mlx5_crypto_xts.c +index d4e1dd718c..b9214711ac 100644 +--- a/dpdk/drivers/crypto/mlx5/mlx5_crypto_xts.c ++++ b/dpdk/drivers/crypto/mlx5/mlx5_crypto_xts.c +@@ -363,7 +363,7 @@ static __rte_noinline void + mlx5_crypto_xts_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op) + { + const uint32_t idx = qp->ci & (qp->entries_n - 1); +- volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *) ++ volatile struct mlx5_error_cqe *cqe = (volatile struct mlx5_error_cqe *) + &qp->cq_obj.cqes[idx]; + + op->status = RTE_CRYPTO_OP_STATUS_ERROR; +diff --git a/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h b/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h +index 4647d568de..aa2363ef15 100644 +--- a/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h ++++ b/dpdk/drivers/crypto/octeontx/otx_cryptodev_hw_access.h +@@ -211,7 +211,7 @@ otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count) + static __rte_always_inline void * + get_cpt_inst(struct command_queue *cqueue) + { +- CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx); ++ CPT_LOG_DP_DEBUG("CPT queue idx %u", cqueue->idx); + return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE]; + } + +@@ -305,9 +305,9 @@ complete: + " error, MC completion code : 0x%x", user_req, + ret); + } +- CPT_LOG_DP_DEBUG("MC status %.8x\n", ++ CPT_LOG_DP_DEBUG("MC status %.8x", + *((volatile uint32_t *)user_req->alternate_caddr)); +- CPT_LOG_DP_DEBUG("HW status %.8x\n", ++ CPT_LOG_DP_DEBUG("HW status %.8x", + *((volatile uint32_t *)user_req->completion_addr)); + } else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) || + (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) { +diff --git a/dpdk/drivers/crypto/openssl/compat.h b/dpdk/drivers/crypto/openssl/compat.h +index 9f9167c4f1..e1814fea8c 100644 +--- a/dpdk/drivers/crypto/openssl/compat.h ++++ b/dpdk/drivers/crypto/openssl/compat.h +@@ -5,6 +5,32 @@ + #ifndef __RTA_COMPAT_H__ + #define __RTA_COMPAT_H__ + ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++static __rte_always_inline void ++free_hmac_ctx(EVP_MAC_CTX *ctx) +{ -+ /* If the array is not being used, just return the main context. */ -+ if (sess->ctx_copies_len == 0) -+ return sess->cipher.ctx; -+ -+ EVP_CIPHER_CTX **lctx = &sess->qp_ctx[qp->id].cipher; ++ EVP_MAC_CTX_free(ctx); ++} + -+ if (unlikely(*lctx == NULL)) { -+#if OPENSSL_VERSION_NUMBER >= 0x30200000L -+ /* EVP_CIPHER_CTX_dup() added in OSSL 3.2 */ -+ *lctx = EVP_CIPHER_CTX_dup(sess->cipher.ctx); -+ return *lctx; -+#elif OPENSSL_VERSION_NUMBER >= 0x30000000L -+ if (sess->chain_order == OPENSSL_CHAIN_COMBINED) { -+ /* AESNI special-cased to use openssl_aesni_ctx_clone() -+ * to allow for working around lack of -+ * EVP_CIPHER_CTX_copy support for 3.0.0 <= OSSL Version -+ * < 3.2.0. -+ */ -+ if (openssl_aesni_ctx_clone(lctx, sess) != 0) -+ *lctx = NULL; -+ return *lctx; -+ } -+#endif -+ -+ *lctx = EVP_CIPHER_CTX_new(); -+ EVP_CIPHER_CTX_copy(*lctx, sess->cipher.ctx); -+ } -+ -+ return *lctx; -+} -+ -+static inline EVP_MD_CTX * -+get_local_auth_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++static __rte_always_inline void ++free_cmac_ctx(EVP_MAC_CTX *ctx) +{ -+ /* If the array is not being used, just return the main context. */ -+ if (sess->ctx_copies_len == 0) -+ return sess->auth.auth.ctx; -+ -+ EVP_MD_CTX **lctx = &sess->qp_ctx[qp->id].auth; -+ -+ if (unlikely(*lctx == NULL)) { -+#if OPENSSL_VERSION_NUMBER >= 0x30100000L -+ /* EVP_MD_CTX_dup() added in OSSL 3.1 */ -+ *lctx = EVP_MD_CTX_dup(sess->auth.auth.ctx); -+#else -+ *lctx = EVP_MD_CTX_new(); -+ EVP_MD_CTX_copy(*lctx, sess->auth.auth.ctx); -+#endif -+ } -+ -+ return *lctx; ++ EVP_MAC_CTX_free(ctx); +} -+ -+#if OPENSSL_VERSION_NUMBER >= 0x30000000L -+static inline EVP_MAC_CTX * +#else -+static inline HMAC_CTX * -+#endif -+get_local_hmac_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++static __rte_always_inline void ++free_hmac_ctx(HMAC_CTX *ctx) +{ -+#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) -+ /* For OpenSSL versions 3.0.0 <= v < 3.0.3, re-initing of -+ * EVP_MAC_CTXs is broken, and doesn't actually reset their -+ * state. This was fixed in OSSL commit c9ddc5af5199 ("Avoid -+ * undefined behavior of provided macs on EVP_MAC -+ * reinitialization"). In cases where the fix is not present, -+ * fall back to duplicating the context every buffer as a -+ * workaround, at the cost of performance. -+ */ -+ RTE_SET_USED(qp); -+ return EVP_MAC_CTX_dup(sess->auth.hmac.ctx); -+#else -+ if (sess->ctx_copies_len == 0) -+ return sess->auth.hmac.ctx; -+ -+#if OPENSSL_VERSION_NUMBER >= 0x30000000L -+ EVP_MAC_CTX **lctx = -+#else -+ HMAC_CTX **lctx = -+#endif -+ &sess->qp_ctx[qp->id].hmac; -+ -+ if (unlikely(*lctx == NULL)) { -+#if OPENSSL_VERSION_NUMBER >= 0x30000000L -+ *lctx = EVP_MAC_CTX_dup(sess->auth.hmac.ctx); -+#else -+ *lctx = HMAC_CTX_new(); -+ HMAC_CTX_copy(*lctx, sess->auth.hmac.ctx); -+#endif -+ } -+ -+ return *lctx; -+#endif ++ HMAC_CTX_free(ctx); +} + -+#if OPENSSL_VERSION_NUMBER >= 0x30000000L -+static inline EVP_MAC_CTX * -+#else -+static inline CMAC_CTX * -+#endif -+get_local_cmac_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++static __rte_always_inline void ++free_cmac_ctx(CMAC_CTX *ctx) +{ -+#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) -+ /* For OpenSSL versions 3.0.0 <= v < 3.0.3, re-initing of -+ * EVP_MAC_CTXs is broken, and doesn't actually reset their -+ * state. This was fixed in OSSL commit c9ddc5af5199 ("Avoid -+ * undefined behavior of provided macs on EVP_MAC -+ * reinitialization"). In cases where the fix is not present, -+ * fall back to duplicating the context every buffer as a -+ * workaround, at the cost of performance. -+ */ -+ RTE_SET_USED(qp); -+ return EVP_MAC_CTX_dup(sess->auth.cmac.ctx); -+#else -+ if (sess->ctx_copies_len == 0) -+ return sess->auth.cmac.ctx; -+ -+#if OPENSSL_VERSION_NUMBER >= 0x30000000L -+ EVP_MAC_CTX **lctx = -+#else -+ CMAC_CTX **lctx = ++ CMAC_CTX_free(ctx); ++} +#endif -+ &sess->qp_ctx[qp->id].cmac; + -+ if (unlikely(*lctx == NULL)) { + #if (OPENSSL_VERSION_NUMBER < 0x10100000L) + + static __rte_always_inline int +diff --git a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h +index 334912d335..aa3f466e74 100644 +--- a/dpdk/drivers/crypto/openssl/openssl_pmd_private.h ++++ b/dpdk/drivers/crypto/openssl/openssl_pmd_private.h +@@ -80,6 +80,20 @@ struct openssl_qp { + */ + } __rte_cache_aligned; + ++struct evp_ctx_pair { ++ EVP_CIPHER_CTX *cipher; ++ union { ++ EVP_MD_CTX *auth; +#if OPENSSL_VERSION_NUMBER >= 0x30000000L -+ *lctx = EVP_MAC_CTX_dup(sess->auth.cmac.ctx); ++ EVP_MAC_CTX *hmac; ++ EVP_MAC_CTX *cmac; +#else -+ *lctx = CMAC_CTX_new(); -+ CMAC_CTX_copy(*lctx, sess->auth.cmac.ctx); -+#endif -+ } -+ -+ return *lctx; ++ HMAC_CTX *hmac; ++ CMAC_CTX *cmac; +#endif -+} ++ }; ++}; + - /** Process auth/cipher combined operation */ - static void --process_openssl_combined_op -- (struct rte_crypto_op *op, struct openssl_session *sess, -- struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst) -+process_openssl_combined_op(struct openssl_qp *qp, struct rte_crypto_op *op, -+ struct openssl_session *sess, struct rte_mbuf *mbuf_src, -+ struct rte_mbuf *mbuf_dst) - { - /* cipher */ - uint8_t *dst = NULL, *iv, *tag, *aad; -@@ -1590,6 +1778,8 @@ process_openssl_combined_op + /** OPENSSL crypto private session structure */ + struct openssl_session { + enum openssl_chain_order chain_order; +@@ -166,6 +180,15 @@ struct openssl_session { + /**< digest length */ + } auth; + ++ uint16_t ctx_copies_len; ++ /* < number of entries in ctx_copies */ ++ struct evp_ctx_pair qp_ctx[]; ++ /**< Flexible array member of per-queue-pair structures, each containing ++ * pointers to copies of the cipher and auth EVP contexts. Cipher ++ * contexts are not safe to use from multiple cores simultaneously, so ++ * maintaining these copies allows avoiding per-buffer copying into a ++ * temporary context. ++ */ + } __rte_cache_aligned; + + /** OPENSSL crypto private asymmetric session structure */ +@@ -217,7 +240,8 @@ struct openssl_asym_session { + /** Set and validate OPENSSL crypto session parameters */ + extern int + openssl_set_session_parameters(struct openssl_session *sess, +- const struct rte_crypto_sym_xform *xform); ++ const struct rte_crypto_sym_xform *xform, ++ uint16_t nb_queue_pairs); + + /** Reset OPENSSL crypto session parameters */ + extern void +diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +index e8cb09defc..017e74e765 100644 +--- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c ++++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +@@ -2,6 +2,7 @@ + * Copyright(c) 2016-2017 Intel Corporation + */ + ++#include + #include + #include + #include +@@ -57,13 +58,13 @@ static void ossl_legacy_provider_load(void) + /* Load Multiple providers into the default (NULL) library context */ + legacy = OSSL_PROVIDER_load(NULL, "legacy"); + if (legacy == NULL) { +- OPENSSL_LOG(ERR, "Failed to load Legacy provider\n"); ++ OPENSSL_LOG(ERR, "Failed to load Legacy provider"); return; } -+ EVP_CIPHER_CTX *ctx = get_local_cipher_ctx(sess, qp); -+ - iv = rte_crypto_op_ctod_offset(op, uint8_t *, - sess->iv.offset); - if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { -@@ -1623,12 +1813,12 @@ process_openssl_combined_op - status = process_openssl_auth_encryption_gcm( - mbuf_src, offset, srclen, - aad, aadlen, iv, -- dst, tag, sess->cipher.ctx); -+ dst, tag, ctx); - else - status = process_openssl_auth_encryption_ccm( - mbuf_src, offset, srclen, - aad, aadlen, iv, -- dst, tag, taglen, sess->cipher.ctx); -+ dst, tag, taglen, ctx); - - } else { - if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC || -@@ -1636,12 +1826,12 @@ process_openssl_combined_op - status = process_openssl_auth_decryption_gcm( - mbuf_src, offset, srclen, - aad, aadlen, iv, -- dst, tag, sess->cipher.ctx); -+ dst, tag, ctx); - else - status = process_openssl_auth_decryption_ccm( - mbuf_src, offset, srclen, - aad, aadlen, iv, -- dst, tag, taglen, sess->cipher.ctx); -+ dst, tag, taglen, ctx); + deflt = OSSL_PROVIDER_load(NULL, "default"); + if (deflt == NULL) { +- OPENSSL_LOG(ERR, "Failed to load Default provider\n"); ++ OPENSSL_LOG(ERR, "Failed to load Default provider"); + OSSL_PROVIDER_unload(legacy); + return; } +@@ -99,22 +100,6 @@ digest_name_get(enum rte_crypto_auth_algorithm algo) - if (status != 0) { -@@ -1656,14 +1846,13 @@ process_openssl_combined_op + static int cryptodev_openssl_remove(struct rte_vdev_device *vdev); - /** Process cipher operation */ - static void --process_openssl_cipher_op -- (struct rte_crypto_op *op, struct openssl_session *sess, -- struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst) -+process_openssl_cipher_op(struct openssl_qp *qp, struct rte_crypto_op *op, -+ struct openssl_session *sess, struct rte_mbuf *mbuf_src, -+ struct rte_mbuf *mbuf_dst) +-/*----------------------------------------------------------------------------*/ +- +-/** +- * Increment counter by 1 +- * Counter is 64 bit array, big-endian +- */ +-static void +-ctr_inc(uint8_t *ctr) +-{ +- uint64_t *ctr64 = (uint64_t *)ctr; +- +- *ctr64 = __builtin_bswap64(*ctr64); +- (*ctr64)++; +- *ctr64 = __builtin_bswap64(*ctr64); +-} +- + /* + *------------------------------------------------------------------------------ + * Session Prepare +@@ -350,7 +335,8 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen, + static int + openssl_set_sess_aead_enc_param(struct openssl_session *sess, + enum rte_crypto_aead_algorithm algo, +- uint8_t tag_len, const uint8_t *key) ++ uint8_t tag_len, const uint8_t *key, ++ EVP_CIPHER_CTX **ctx) { - uint8_t *dst, *iv; - int srclen, status; - uint8_t inplace = (mbuf_src == mbuf_dst) ? 1 : 0; -- EVP_CIPHER_CTX *ctx_copy; - - /* - * Segmented OOP destination buffer is not supported for encryption/ -@@ -1682,25 +1871,22 @@ process_openssl_cipher_op + int iv_type = 0; + unsigned int do_ccm; +@@ -378,7 +364,7 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, + } - iv = rte_crypto_op_ctod_offset(op, uint8_t *, - sess->iv.offset); -- ctx_copy = EVP_CIPHER_CTX_new(); -- EVP_CIPHER_CTX_copy(ctx_copy, sess->cipher.ctx); -+ -+ EVP_CIPHER_CTX *ctx = get_local_cipher_ctx(sess, qp); + sess->cipher.mode = OPENSSL_CIPHER_LIB; +- sess->cipher.ctx = EVP_CIPHER_CTX_new(); ++ *ctx = EVP_CIPHER_CTX_new(); - if (sess->cipher.mode == OPENSSL_CIPHER_LIB) - if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) - status = process_openssl_cipher_encrypt(mbuf_src, dst, - op->sym->cipher.data.offset, iv, -- srclen, ctx_copy, inplace); -+ srclen, ctx, inplace); - else - status = process_openssl_cipher_decrypt(mbuf_src, dst, - op->sym->cipher.data.offset, iv, -- srclen, ctx_copy, inplace); -+ srclen, ctx, inplace); - else - status = process_openssl_cipher_des3ctr(mbuf_src, dst, -- op->sym->cipher.data.offset, iv, -- sess->cipher.key.data, srclen, -- ctx_copy); -+ op->sym->cipher.data.offset, iv, srclen, ctx); + if (get_aead_algo(algo, sess->cipher.key.length, + &sess->cipher.evp_algo) != 0) +@@ -388,19 +374,19 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, -- EVP_CIPHER_CTX_free(ctx_copy); - if (status != 0) - op->status = RTE_CRYPTO_OP_STATUS_ERROR; - } -@@ -1819,42 +2005,40 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, + sess->chain_order = OPENSSL_CHAIN_COMBINED; - switch (sess->auth.mode) { - case OPENSSL_AUTH_AS_AUTH: -- ctx_a = EVP_MD_CTX_create(); -- EVP_MD_CTX_copy_ex(ctx_a, sess->auth.auth.ctx); -+ ctx_a = get_local_auth_ctx(sess, qp); - status = process_openssl_auth(mbuf_src, dst, - op->sym->auth.data.offset, NULL, NULL, srclen, - ctx_a, sess->auth.auth.evp_algo); -- EVP_MD_CTX_destroy(ctx_a); - break; - case OPENSSL_AUTH_AS_HMAC: -+ ctx_h = get_local_hmac_ctx(sess, qp); - # if OPENSSL_VERSION_NUMBER >= 0x30000000L -- ctx_h = EVP_MAC_CTX_dup(sess->auth.hmac.ctx); - status = process_openssl_auth_mac(mbuf_src, dst, - op->sym->auth.data.offset, srclen, - ctx_h); - # else -- ctx_h = HMAC_CTX_new(); -- HMAC_CTX_copy(ctx_h, sess->auth.hmac.ctx); - status = process_openssl_auth_hmac(mbuf_src, dst, - op->sym->auth.data.offset, srclen, - ctx_h); -- HMAC_CTX_free(ctx_h); - # endif -+#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) -+ EVP_MAC_CTX_free(ctx_h); -+#endif - break; - case OPENSSL_AUTH_AS_CMAC: -+ ctx_c = get_local_cmac_ctx(sess, qp); - # if OPENSSL_VERSION_NUMBER >= 0x30000000L -- ctx_c = EVP_MAC_CTX_dup(sess->auth.cmac.ctx); - status = process_openssl_auth_mac(mbuf_src, dst, - op->sym->auth.data.offset, srclen, - ctx_c); - # else -- ctx_c = CMAC_CTX_new(); -- CMAC_CTX_copy(ctx_c, sess->auth.cmac.ctx); - status = process_openssl_auth_cmac(mbuf_src, dst, - op->sym->auth.data.offset, srclen, - ctx_c); -- CMAC_CTX_free(ctx_c); - # endif -+#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) -+ EVP_MAC_CTX_free(ctx_c); -+#endif - break; - default: - status = -1; -@@ -3105,13 +3289,13 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, +- if (EVP_EncryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo, ++ if (EVP_EncryptInit_ex(*ctx, sess->cipher.evp_algo, + NULL, NULL, NULL) <= 0) + return -EINVAL; - switch (sess->chain_order) { - case OPENSSL_CHAIN_ONLY_CIPHER: -- process_openssl_cipher_op(op, sess, msrc, mdst); -+ process_openssl_cipher_op(qp, op, sess, msrc, mdst); - break; - case OPENSSL_CHAIN_ONLY_AUTH: - process_openssl_auth_op(qp, op, sess, msrc, mdst); - break; - case OPENSSL_CHAIN_CIPHER_AUTH: -- process_openssl_cipher_op(op, sess, msrc, mdst); -+ process_openssl_cipher_op(qp, op, sess, msrc, mdst); - /* OOP */ - if (msrc != mdst) - copy_plaintext(msrc, mdst, op); -@@ -3119,10 +3303,10 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, - break; - case OPENSSL_CHAIN_AUTH_CIPHER: - process_openssl_auth_op(qp, op, sess, msrc, mdst); -- process_openssl_cipher_op(op, sess, msrc, mdst); -+ process_openssl_cipher_op(qp, op, sess, msrc, mdst); - break; - case OPENSSL_CHAIN_COMBINED: -- process_openssl_combined_op(op, sess, msrc, mdst); -+ process_openssl_combined_op(qp, op, sess, msrc, mdst); - break; - case OPENSSL_CHAIN_CIPHER_BPI: - process_openssl_docsis_bpi_op(op, sess, msrc, mdst); -diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c -index b16baaa08f..1bbb855a59 100644 ---- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c -+++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c -@@ -794,9 +794,35 @@ qp_setup_cleanup: +- if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, sess->iv.length, ++ if (EVP_CIPHER_CTX_ctrl(*ctx, iv_type, sess->iv.length, + NULL) <= 0) + return -EINVAL; - /** Returns the size of the symmetric session structure */ - static unsigned --openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) -+openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev) - { -- return sizeof(struct openssl_session); -+ /* -+ * For 0 qps, return the max size of the session - this is necessary if -+ * the user calls into this function to create the session mempool, -+ * without first configuring the number of qps for the cryptodev. -+ */ -+ if (dev->data->nb_queue_pairs == 0) { -+ unsigned int max_nb_qps = ((struct openssl_private *) -+ dev->data->dev_private)->max_nb_qpairs; -+ return sizeof(struct openssl_session) + -+ (sizeof(struct evp_ctx_pair) * max_nb_qps); -+ } -+ -+ /* -+ * With only one queue pair, the thread safety of multiple context -+ * copies is not necessary, so don't allocate extra memory for the -+ * array. -+ */ -+ if (dev->data->nb_queue_pairs == 1) -+ return sizeof(struct openssl_session); -+ -+ /* -+ * Otherwise, the size of the flexible array member should be enough to -+ * fit pointers to per-qp contexts. This is twice the number of queue -+ * pairs, to allow for auth and cipher contexts. -+ */ -+ return sizeof(struct openssl_session) + -+ (sizeof(struct evp_ctx_pair) * dev->data->nb_queue_pairs); - } + if (do_ccm) +- EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG, ++ EVP_CIPHER_CTX_ctrl(*ctx, EVP_CTRL_CCM_SET_TAG, + tag_len, NULL); - /** Returns the size of the asymmetric session structure */ -@@ -808,7 +834,7 @@ openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused) +- if (EVP_EncryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0) ++ if (EVP_EncryptInit_ex(*ctx, NULL, NULL, key, NULL) <= 0) + return -EINVAL; - /** Configure the session from a crypto xform chain */ + return 0; +@@ -410,7 +396,8 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, static int --openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, -+openssl_pmd_sym_session_configure(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct rte_cryptodev_sym_session *sess) + openssl_set_sess_aead_dec_param(struct openssl_session *sess, + enum rte_crypto_aead_algorithm algo, +- uint8_t tag_len, const uint8_t *key) ++ uint8_t tag_len, const uint8_t *key, ++ EVP_CIPHER_CTX **ctx) { -@@ -820,7 +846,8 @@ openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, - return -EINVAL; + int iv_type = 0; + unsigned int do_ccm = 0; +@@ -437,7 +424,7 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, } -- ret = openssl_set_session_parameters(sess_private_data, xform); -+ ret = openssl_set_session_parameters(sess_private_data, xform, -+ dev->data->nb_queue_pairs); - if (ret != 0) { - OPENSSL_LOG(ERR, "failed configure session parameters"); + sess->cipher.mode = OPENSSL_CIPHER_LIB; +- sess->cipher.ctx = EVP_CIPHER_CTX_new(); ++ *ctx = EVP_CIPHER_CTX_new(); -diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c -index de72383d4b..b44acece7c 100644 ---- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c -+++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c -@@ -9,6 +9,7 @@ - #include "qat_asym.h" - #include "qat_crypto.h" - #include "qat_crypto_pmd_gens.h" -+#include "adf_transport_access_macros_gen4vf.h" + if (get_aead_algo(algo, sess->cipher.key.length, + &sess->cipher.evp_algo) != 0) +@@ -447,24 +434,46 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, + sess->chain_order = OPENSSL_CHAIN_COMBINED; + +- if (EVP_DecryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo, ++ if (EVP_DecryptInit_ex(*ctx, sess->cipher.evp_algo, + NULL, NULL, NULL) <= 0) + return -EINVAL; + +- if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, ++ if (EVP_CIPHER_CTX_ctrl(*ctx, iv_type, + sess->iv.length, NULL) <= 0) + return -EINVAL; + + if (do_ccm) +- EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG, ++ EVP_CIPHER_CTX_ctrl(*ctx, EVP_CTRL_CCM_SET_TAG, + tag_len, NULL); + +- if (EVP_DecryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0) ++ if (EVP_DecryptInit_ex(*ctx, NULL, NULL, key, NULL) <= 0) + return -EINVAL; - static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = { -@@ -233,6 +234,78 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx, return 0; } -+int -+qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n) ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30200000L) ++static int openssl_aesni_ctx_clone(EVP_CIPHER_CTX **dest, ++ struct openssl_session *sess) +{ -+ struct qat_qp *qp = qp_data; -+ struct qat_queue *tx_queue = &qp->tx_q; -+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; ++ /* OpenSSL versions 3.0.0 <= V < 3.2.0 have no dupctx() implementation ++ * for AES-GCM and AES-CCM. In this case, we have to create new empty ++ * contexts and initialise, as we did the original context. ++ */ ++ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) ++ sess->aead_algo = RTE_CRYPTO_AEAD_AES_GCM; + -+ if (unlikely(dp_ctx->cached_enqueue != n)) -+ return -1; ++ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ++ return openssl_set_sess_aead_enc_param(sess, sess->aead_algo, ++ sess->auth.digest_length, sess->cipher.key.data, ++ dest); ++ else ++ return openssl_set_sess_aead_dec_param(sess, sess->aead_algo, ++ sess->auth.digest_length, sess->cipher.key.data, ++ dest); ++} ++#endif + -+ qp->enqueued += n; -+ qp->stats.enqueued_count += n; + /** Set session cipher parameters */ + static int + openssl_set_session_cipher_parameters(struct openssl_session *sess, +@@ -521,6 +530,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, + sess->cipher.key.length, + sess->cipher.key.data) != 0) + return -EINVAL; + -+ tx_queue->tail = dp_ctx->tail; + -+ WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr, -+ tx_queue->hw_bundle_number, -+ tx_queue->hw_queue_number, tx_queue->tail); -+ -+ tx_queue->csr_tail = tx_queue->tail; -+ dp_ctx->cached_enqueue = 0; -+ -+ return 0; -+} -+ -+int -+qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n) -+{ -+ struct qat_qp *qp = qp_data; -+ struct qat_queue *rx_queue = &qp->rx_q; -+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; -+ -+ if (unlikely(dp_ctx->cached_dequeue != n)) -+ return -1; -+ -+ rx_queue->head = dp_ctx->head; -+ rx_queue->nb_processed_responses += n; -+ qp->dequeued += n; -+ qp->stats.dequeued_count += n; -+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) { -+ uint32_t old_head, new_head; -+ uint32_t max_head; -+ -+ old_head = rx_queue->csr_head; -+ new_head = rx_queue->head; -+ max_head = qp->nb_descriptors * rx_queue->msg_size; -+ -+ /* write out free descriptors */ -+ void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head; -+ -+ if (new_head < old_head) { -+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, -+ max_head - old_head); -+ memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE, -+ new_head); -+ } else { -+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - -+ old_head); -+ } -+ rx_queue->nb_processed_responses = 0; -+ rx_queue->csr_head = new_head; -+ -+ /* write current head to CSR */ -+ WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr, -+ rx_queue->hw_bundle_number, rx_queue->hw_queue_number, -+ new_head); -+ } -+ -+ dp_ctx->cached_dequeue = 0; -+ return 0; -+} -+ - static int - qat_sym_crypto_set_session_gen4(void *cdev, void *session) - { -@@ -390,11 +463,51 @@ qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx) - { - struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx; - struct qat_sym_session *ctx = _ctx; -- int ret; - -- ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx); -- if (ret < 0) -- return ret; -+ raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen4; -+ raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1; -+ raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1; -+ raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen4; ++ /* We use 3DES encryption also for decryption. ++ * IV is not important for 3DES ECB. ++ */ ++ if (EVP_EncryptInit_ex(sess->cipher.ctx, EVP_des_ede3_ecb(), ++ NULL, sess->cipher.key.data, NULL) != 1) ++ return -EINVAL; + -+ if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || -+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && -+ !ctx->is_gmac) { -+ /* AES-GCM or AES-CCM */ -+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || -+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || -+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 -+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE -+ && ctx->qat_hash_alg == -+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { -+ raw_dp_ctx->enqueue_burst = -+ qat_sym_dp_enqueue_aead_jobs_gen1; -+ raw_dp_ctx->enqueue = -+ qat_sym_dp_enqueue_single_aead_gen1; -+ } else { -+ raw_dp_ctx->enqueue_burst = -+ qat_sym_dp_enqueue_chain_jobs_gen1; -+ raw_dp_ctx->enqueue = -+ qat_sym_dp_enqueue_single_chain_gen1; -+ } -+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { -+ raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1; -+ raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1; -+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { -+ if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE || -+ ctx->qat_cipher_alg == -+ ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) { -+ raw_dp_ctx->enqueue_burst = -+ qat_sym_dp_enqueue_aead_jobs_gen1; -+ raw_dp_ctx->enqueue = -+ qat_sym_dp_enqueue_single_aead_gen1; -+ } else { -+ raw_dp_ctx->enqueue_burst = -+ qat_sym_dp_enqueue_cipher_jobs_gen1; -+ raw_dp_ctx->enqueue = -+ qat_sym_dp_enqueue_single_cipher_gen1; -+ } -+ } else -+ return -1; - - if (ctx->is_single_pass && ctx->is_ucs) { - raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4; -diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h -index b8ddf42d6f..64e892d022 100644 ---- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h -+++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h -@@ -394,7 +394,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, - struct qat_sym_op_cookie *cookie) - { - union rte_crypto_sym_ofs ofs; -- uint32_t max_len = 0; -+ uint32_t max_len = 0, oop_offset = 0; - uint32_t cipher_len = 0, cipher_ofs = 0; - uint32_t auth_len = 0, auth_ofs = 0; - int is_oop = (op->sym->m_dst != NULL) && -@@ -468,6 +468,16 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, - - max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len); + break; -+ /* If OOP, we need to keep in mind that offset needs to start where -+ * cipher/auth starts, namely no offset on the smaller one -+ */ -+ if (is_oop) { -+ oop_offset = RTE_MIN(auth_ofs, cipher_ofs); -+ auth_ofs -= oop_offset; -+ cipher_ofs -= oop_offset; -+ max_len -= oop_offset; -+ } -+ - /* digest in buffer check. Needed only for wireless algos - * or combined cipher-crc operations - */ -@@ -508,9 +518,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, - max_len = RTE_MAX(max_len, auth_ofs + auth_len + - ctx->digest_length); + case RTE_CRYPTO_CIPHER_DES_CBC: +@@ -586,6 +604,8 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, + return -ENOTSUP; } -- -- /* Passing 0 as cipher & auth offsets are assigned into ofs later */ -- n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, 0, max_len, -+ n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, oop_offset, max_len, - in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); - if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) { - op->status = RTE_CRYPTO_OP_STATUS_ERROR; -@@ -520,7 +528,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, - - if (unlikely((op->sym->m_dst != NULL) && - (op->sym->m_dst != op->sym->m_src))) { -- int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, 0, -+ int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, oop_offset, - max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); - - if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { -@@ -894,10 +902,12 @@ enqueue_one_aead_job_gen1(struct qat_sym_session *ctx, - *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = - q - ICP_QAT_HW_CCM_NONCE_OFFSET; - -- rte_memcpy((uint8_t *)aad->va + -- ICP_QAT_HW_CCM_NONCE_OFFSET, -- (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, -- ctx->cipher_iv.length); -+ if (ctx->aad_len > 0) { -+ rte_memcpy((uint8_t *)aad->va + -+ ICP_QAT_HW_CCM_NONCE_OFFSET, -+ (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, -+ ctx->cipher_iv.length); -+ } - break; - default: - break; -@@ -1007,6 +1017,12 @@ qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n); - int - qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n); -+int -+qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n); -+ -+int -+qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n); ++ EVP_CIPHER_CTX_set_padding(sess->cipher.ctx, 0); + - int - qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx); + return 0; + } -diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c -index 6e03bde841..8235fc0a5a 100644 ---- a/dpdk/drivers/crypto/qat/qat_sym.c -+++ b/dpdk/drivers/crypto/qat/qat_sym.c -@@ -18,7 +18,6 @@ - #include "qat_qp.h" +@@ -623,12 +643,14 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, + return openssl_set_sess_aead_enc_param(sess, + RTE_CRYPTO_AEAD_AES_GCM, + xform->auth.digest_length, +- xform->auth.key.data); ++ xform->auth.key.data, ++ &sess->cipher.ctx); + else + return openssl_set_sess_aead_dec_param(sess, + RTE_CRYPTO_AEAD_AES_GCM, + xform->auth.digest_length, +- xform->auth.key.data); ++ xform->auth.key.data, ++ &sess->cipher.ctx); + break; - uint8_t qat_sym_driver_id; --int qat_legacy_capa; + case RTE_CRYPTO_AUTH_MD5: +@@ -655,7 +677,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, + else + return -EINVAL; - struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS]; +- rte_memcpy(algo_name, algo, strlen(algo) + 1); ++ strlcpy(algo_name, algo, sizeof(algo_name)); + params[0] = OSSL_PARAM_construct_utf8_string( + OSSL_MAC_PARAM_CIPHER, algo_name, 0); + params[1] = OSSL_PARAM_construct_end(); +@@ -770,16 +792,19 @@ openssl_set_session_aead_parameters(struct openssl_session *sess, + /* Select cipher direction */ + if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) + return openssl_set_sess_aead_enc_param(sess, xform->aead.algo, +- xform->aead.digest_length, xform->aead.key.data); ++ xform->aead.digest_length, xform->aead.key.data, ++ &sess->cipher.ctx); + else + return openssl_set_sess_aead_dec_param(sess, xform->aead.algo, +- xform->aead.digest_length, xform->aead.key.data); ++ xform->aead.digest_length, xform->aead.key.data, ++ &sess->cipher.ctx); + } -@@ -266,7 +265,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, + /** Parse crypto xform chain and set private session parameters */ + int + openssl_set_session_parameters(struct openssl_session *sess, +- const struct rte_crypto_sym_xform *xform) ++ const struct rte_crypto_sym_xform *xform, ++ uint16_t nb_queue_pairs) + { + const struct rte_crypto_sym_xform *cipher_xform = NULL; + const struct rte_crypto_sym_xform *auth_xform = NULL; +@@ -841,6 +866,12 @@ openssl_set_session_parameters(struct openssl_session *sess, } - - cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY; -- QAT_LOG(INFO, "Device %s rte_security support ensabled", name); -+ QAT_LOG(INFO, "Device %s rte_security support enabled", name); - } else { - QAT_LOG(INFO, "Device %s rte_security support disabled", name); } -diff --git a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c -index 8968bb853b..2c91ceec13 100644 ---- a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c -+++ b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c -@@ -16,9 +16,6 @@ - - #define DPAA2_QDMA_PREFETCH "prefetch" --/* Dynamic log type identifier */ --int dpaa2_qdma_logtype; -- - uint32_t dpaa2_coherent_no_alloc_cache; - uint32_t dpaa2_coherent_alloc_cache; ++ /* ++ * With only one queue pair, the array of copies is not needed. ++ * Otherwise, one entry per queue pair is required. ++ */ ++ sess->ctx_copies_len = nb_queue_pairs > 1 ? nb_queue_pairs : 0; ++ + return 0; + } -@@ -1699,4 +1696,4 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = { - RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd); - RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma, - "no_prefetch= "); --RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO); -+RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO); -diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c -index 0e11ca14cc..4db3b0554c 100644 ---- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c -+++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c -@@ -39,8 +39,6 @@ hisi_dma_queue_base(struct hisi_dma_dev *hw) +@@ -848,33 +879,45 @@ openssl_set_session_parameters(struct openssl_session *sess, + void + openssl_reset_session(struct openssl_session *sess) { - if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) - return HISI_DMA_HIP08_QUEUE_BASE; -- else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) -- return HISI_DMA_HIP09_QUEUE_BASE; - else - return 0; - } -@@ -216,25 +214,6 @@ hisi_dma_init_hw(struct hisi_dma_dev *hw) - HISI_DMA_HIP08_QUEUE_INT_MASK_M, true); - hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG, - HISI_DMA_HIP08_QUEUE_INT_MASK_M, true); -- } else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) { -- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_CTRL0_REG, -- HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M, false); -- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG, -- HISI_DMA_HIP09_QUEUE_INT_MASK_M, true); -- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG, -- HISI_DMA_HIP09_QUEUE_INT_MASK_M, true); -- hisi_dma_update_queue_mbit(hw, -- HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG, -- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true); -- hisi_dma_update_queue_mbit(hw, -- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG, -- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true); -- hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL1_REG, -- HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B, true); -- hisi_dma_update_bit(hw, -- HISI_DMA_HIP09_QUEUE_CFG_REG(hw->queue_id), -- HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B, -- true); +- EVP_CIPHER_CTX_free(sess->cipher.ctx); ++ /* Free all the qp_ctx entries. */ ++ for (uint16_t i = 0; i < sess->ctx_copies_len; i++) { ++ if (sess->qp_ctx[i].cipher != NULL) { ++ EVP_CIPHER_CTX_free(sess->qp_ctx[i].cipher); ++ sess->qp_ctx[i].cipher = NULL; ++ } + +- if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI) +- EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx); ++ switch (sess->auth.mode) { ++ case OPENSSL_AUTH_AS_AUTH: ++ EVP_MD_CTX_destroy(sess->qp_ctx[i].auth); ++ sess->qp_ctx[i].auth = NULL; ++ break; ++ case OPENSSL_AUTH_AS_HMAC: ++ free_hmac_ctx(sess->qp_ctx[i].hmac); ++ sess->qp_ctx[i].hmac = NULL; ++ break; ++ case OPENSSL_AUTH_AS_CMAC: ++ free_cmac_ctx(sess->qp_ctx[i].cmac); ++ sess->qp_ctx[i].cmac = NULL; ++ break; ++ } ++ } ++ ++ EVP_CIPHER_CTX_free(sess->cipher.ctx); + + switch (sess->auth.mode) { + case OPENSSL_AUTH_AS_AUTH: + EVP_MD_CTX_destroy(sess->auth.auth.ctx); + break; + case OPENSSL_AUTH_AS_HMAC: +- EVP_PKEY_free(sess->auth.hmac.pkey); +-# if OPENSSL_VERSION_NUMBER >= 0x30000000L +- EVP_MAC_CTX_free(sess->auth.hmac.ctx); +-# else +- HMAC_CTX_free(sess->auth.hmac.ctx); +-# endif ++ free_hmac_ctx(sess->auth.hmac.ctx); + break; + case OPENSSL_AUTH_AS_CMAC: +-# if OPENSSL_VERSION_NUMBER >= 0x30000000L +- EVP_MAC_CTX_free(sess->auth.cmac.ctx); +-# else +- CMAC_CTX_free(sess->auth.cmac.ctx); +-# endif +- break; +- default: ++ free_cmac_ctx(sess->auth.cmac.ctx); + break; } ++ ++ if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI) ++ EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx); } -@@ -256,8 +235,6 @@ hisi_dma_reg_layout(uint8_t revision) - { - if (revision == HISI_DMA_REVISION_HIP08B) - return HISI_DMA_REG_LAYOUT_HIP08; -- else if (revision >= HISI_DMA_REVISION_HIP09A) -- return HISI_DMA_REG_LAYOUT_HIP09; - else - return HISI_DMA_REG_LAYOUT_INVALID; - } -@@ -328,14 +305,11 @@ hisi_dma_info_get(const struct rte_dma_dev *dev, - struct rte_dma_info *dev_info, - uint32_t info_sz) + /** Provide session for operation */ +@@ -914,7 +957,7 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op) + sess = (struct openssl_session *)_sess->driver_priv_data; + + if (unlikely(openssl_set_session_parameters(sess, +- op->sym->xform) != 0)) { ++ op->sym->xform, 1) != 0)) { + rte_mempool_put(qp->sess_mp, _sess); + sess = NULL; + } +@@ -1068,8 +1111,6 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, + if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) + goto process_cipher_encrypt_err; + +- EVP_CIPHER_CTX_set_padding(ctx, 0); +- + if (process_openssl_encryption_update(mbuf_src, offset, &dst, + srclen, ctx, inplace)) + goto process_cipher_encrypt_err; +@@ -1118,8 +1159,6 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, + if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) + goto process_cipher_decrypt_err; + +- EVP_CIPHER_CTX_set_padding(ctx, 0); +- + if (process_openssl_decryption_update(mbuf_src, offset, &dst, + srclen, ctx, inplace)) + goto process_cipher_decrypt_err; +@@ -1136,10 +1175,10 @@ process_cipher_decrypt_err: + /** Process cipher des 3 ctr encryption, decryption algorithm */ + static int + process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, +- int offset, uint8_t *iv, uint8_t *key, int srclen, +- EVP_CIPHER_CTX *ctx) ++ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx) { -- struct hisi_dma_dev *hw = dev->data->dev_private; -+ RTE_SET_USED(dev); - RTE_SET_USED(info_sz); +- uint8_t ebuf[8], ctr[8]; ++ uint8_t ebuf[8]; ++ uint64_t ctr; + int unused, n; + struct rte_mbuf *m; + uint8_t *src; +@@ -1155,21 +1194,19 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, + src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); + l = rte_pktmbuf_data_len(m) - offset; - dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | - RTE_DMA_CAPA_OPS_COPY; -- if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) -- dev_info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS; +- /* We use 3DES encryption also for decryption. +- * IV is not important for 3DES ecb +- */ +- if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0) +- goto process_cipher_des3ctr_err; - - dev_info->max_vchans = 1; - dev_info->max_desc = HISI_DMA_MAX_DESC_NUM; - dev_info->min_desc = HISI_DMA_MIN_DESC_NUM; -@@ -514,18 +488,6 @@ hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f) - { HISI_DMA_REG_LAYOUT_HIP08, - HISI_DMA_HIP08_DUMP_START_REG, - HISI_DMA_HIP08_DUMP_END_REG }, -- { HISI_DMA_REG_LAYOUT_HIP09, -- HISI_DMA_HIP09_DUMP_REGION_A_START_REG, -- HISI_DMA_HIP09_DUMP_REGION_A_END_REG }, -- { HISI_DMA_REG_LAYOUT_HIP09, -- HISI_DMA_HIP09_DUMP_REGION_B_START_REG, -- HISI_DMA_HIP09_DUMP_REGION_B_END_REG }, -- { HISI_DMA_REG_LAYOUT_HIP09, -- HISI_DMA_HIP09_DUMP_REGION_C_START_REG, -- HISI_DMA_HIP09_DUMP_REGION_C_END_REG }, -- { HISI_DMA_REG_LAYOUT_HIP09, -- HISI_DMA_HIP09_DUMP_REGION_D_START_REG, -- HISI_DMA_HIP09_DUMP_REGION_D_END_REG }, - }; - uint32_t i; +- memcpy(ctr, iv, 8); ++ memcpy(&ctr, iv, 8); -diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h -index 5a17f9f69e..a57b5c759a 100644 ---- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h -+++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h -@@ -25,22 +25,14 @@ - #define HISI_DMA_DEVICE_ID 0xA122 - #define HISI_DMA_PCI_REVISION_ID_REG 0x08 - #define HISI_DMA_REVISION_HIP08B 0x21 --#define HISI_DMA_REVISION_HIP09A 0x30 + for (n = 0; n < srclen; n++) { + if (n % 8 == 0) { ++ uint64_t cpu_ctr; ++ + if (EVP_EncryptUpdate(ctx, + (unsigned char *)&ebuf, &unused, + (const unsigned char *)&ctr, 8) <= 0) + goto process_cipher_des3ctr_err; +- ctr_inc(ctr); ++ cpu_ctr = rte_be_to_cpu_64(ctr); ++ cpu_ctr++; ++ ctr = rte_cpu_to_be_64(cpu_ctr); + } + dst[n] = *(src++) ^ ebuf[n % 8]; - #define HISI_DMA_MAX_HW_QUEUES 4 - #define HISI_DMA_MAX_DESC_NUM 8192 - #define HISI_DMA_MIN_DESC_NUM 32 +@@ -1427,6 +1464,9 @@ process_openssl_auth_mac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset, + if (m == 0) + goto process_auth_err; --/** -- * The HIP08B(HiSilicon IP08) and HIP09B(HiSilicon IP09) are DMA iEPs, they -- * have the same pci device id but different pci revision. -- * Unfortunately, they have different register layouts, so two layout -- * enumerations are defined. -- */ - enum { - HISI_DMA_REG_LAYOUT_INVALID = 0, -- HISI_DMA_REG_LAYOUT_HIP08, -- HISI_DMA_REG_LAYOUT_HIP09 -+ HISI_DMA_REG_LAYOUT_HIP08 - }; ++ if (EVP_MAC_init(ctx, NULL, 0, NULL) <= 0) ++ goto process_auth_err; ++ + src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); - /** -@@ -69,9 +61,6 @@ enum { - * length of queue-region. The global offset for a single queue register is - * calculated by: - * offset = queue-base + (queue-id * queue-region) + reg-offset-in-region. -- * -- * The first part of queue region is basically the same for HIP08 and HIP09 -- * register layouts, therefore, HISI_QUEUE_* registers are defined for it. - */ - #define HISI_DMA_QUEUE_SQ_BASE_L_REG 0x0 - #define HISI_DMA_QUEUE_SQ_BASE_H_REG 0x4 -@@ -110,28 +99,6 @@ enum { - #define HISI_DMA_HIP08_DUMP_START_REG 0x2000 - #define HISI_DMA_HIP08_DUMP_END_REG 0x2280 + l = rte_pktmbuf_data_len(m) - offset; +@@ -1453,11 +1493,9 @@ process_auth_final: + if (EVP_MAC_final(ctx, dst, &dstlen, DIGEST_LENGTH_MAX) != 1) + goto process_auth_err; --/** -- * HiSilicon IP09 DMA register and field define: -- */ --#define HISI_DMA_HIP09_QUEUE_BASE 0x2000 --#define HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M GENMASK(31, 28) --#define HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B 2 --#define HISI_DMA_HIP09_QUEUE_INT_MASK_M 0x1 --#define HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG 0x48 --#define HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG 0x4C --#define HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M GENMASK(18, 1) --#define HISI_DMA_HIP09_QUEUE_CFG_REG(queue_id) (0x800 + \ -- (queue_id) * 0x20) --#define HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B 16 --#define HISI_DMA_HIP09_DUMP_REGION_A_START_REG 0x0 --#define HISI_DMA_HIP09_DUMP_REGION_A_END_REG 0x368 --#define HISI_DMA_HIP09_DUMP_REGION_B_START_REG 0x800 --#define HISI_DMA_HIP09_DUMP_REGION_B_END_REG 0xA08 --#define HISI_DMA_HIP09_DUMP_REGION_C_START_REG 0x1800 --#define HISI_DMA_HIP09_DUMP_REGION_C_END_REG 0x1A4C --#define HISI_DMA_HIP09_DUMP_REGION_D_START_REG 0x1C00 --#define HISI_DMA_HIP09_DUMP_REGION_D_END_REG 0x1CC4 -- - /** - * In fact, there are multiple states, but it need to pay attention to - * the following three states for the driver: -diff --git a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py -index c0c833ade9..5c9572b49d 100755 ---- a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py -+++ b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py -@@ -104,8 +104,10 @@ def configure_dsa(dsa_id, args): - "priority": 1, - "max_batch_size": 1024, - "size": int(max_work_queues_size / nb_queues)} -- wqcfg.update(parse_wq_opts(args.wq_option)) - wq_dir = SysfsDir(os.path.join(dsa_dir.path, f"wq{dsa_id}.{q}")) -+ if os.path.exists(os.path.join(wq_dir.path, f"driver_name")): -+ wqcfg.update({"driver_name": "user"}) -+ wqcfg.update(parse_wq_opts(args.wq_option)) - wq_dir.write_values(wqcfg) +- EVP_MAC_CTX_free(ctx); + return 0; - # enable device and then queues -diff --git a/dpdk/drivers/dma/idxd/idxd_bus.c b/dpdk/drivers/dma/idxd/idxd_bus.c -index 3b2d4c2b65..ba8076715d 100644 ---- a/dpdk/drivers/dma/idxd/idxd_bus.c -+++ b/dpdk/drivers/dma/idxd/idxd_bus.c -@@ -261,9 +261,15 @@ static int - is_for_this_process_use(struct rte_dsa_device *dev, const char *name) - { - char *runtime_dir = strdup(rte_eal_get_runtime_dir()); -- char *prefix = basename(runtime_dir); -- int prefixlen = strlen(prefix); - int retval = 0; -+ int prefixlen; -+ char *prefix; + process_auth_err: +- EVP_MAC_CTX_free(ctx); + OPENSSL_LOG(ERR, "Process openssl auth failed"); + return -EINVAL; + } +@@ -1569,11 +1607,151 @@ process_auth_err: + # endif + /*----------------------------------------------------------------------------*/ + ++static inline EVP_CIPHER_CTX * ++get_local_cipher_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++{ ++ /* If the array is not being used, just return the main context. */ ++ if (sess->ctx_copies_len == 0) ++ return sess->cipher.ctx; + -+ if (runtime_dir == NULL) -+ return retval; ++ EVP_CIPHER_CTX **lctx = &sess->qp_ctx[qp->id].cipher; + -+ prefix = basename(runtime_dir); -+ prefixlen = strlen(prefix); - - if (strncmp(name, "dpdk_", 5) == 0) - retval = 1; -diff --git a/dpdk/drivers/event/cnxk/cn10k_tx_worker.h b/dpdk/drivers/event/cnxk/cn10k_tx_worker.h -index 53e0dde20c..256237b895 100644 ---- a/dpdk/drivers/event/cnxk/cn10k_tx_worker.h -+++ b/dpdk/drivers/event/cnxk/cn10k_tx_worker.h -@@ -70,6 +70,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, - const uint64_t *txq_data, const uint32_t flags) - { - uint8_t lnum = 0, loff = 0, shft = 0; -+ struct rte_mbuf *extm = NULL; - struct cn10k_eth_txq *txq; - uintptr_t laddr; - uint16_t segdw; -@@ -90,7 +91,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, - if (flags & NIX_TX_OFFLOAD_TSO_F) - cn10k_nix_xmit_prepare_tso(m, flags); - -- cn10k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, &sec, -+ cn10k_nix_xmit_prepare(txq, m, &extm, cmd, flags, txq->lso_tun_fmt, &sec, - txq->mark_flag, txq->mark_fmt); - - laddr = lmt_addr; -@@ -105,7 +106,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, - cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags); - - if (flags & NIX_TX_MULTI_SEG_F) -- segdw = cn10k_nix_prepare_mseg(txq, m, (uint64_t *)laddr, flags); -+ segdw = cn10k_nix_prepare_mseg(txq, m, &extm, (uint64_t *)laddr, flags); - else - segdw = cn10k_nix_tx_ext_subs(flags) + 2; - -@@ -127,6 +128,9 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, - /* Memory barrier to make sure lmtst store completes */ - rte_io_wmb(); - -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) -+ cn10k_nix_free_extmbuf(extm); ++ if (unlikely(*lctx == NULL)) { ++#if OPENSSL_VERSION_NUMBER >= 0x30200000L ++ /* EVP_CIPHER_CTX_dup() added in OSSL 3.2 */ ++ *lctx = EVP_CIPHER_CTX_dup(sess->cipher.ctx); ++ return *lctx; ++#elif OPENSSL_VERSION_NUMBER >= 0x30000000L ++ if (sess->chain_order == OPENSSL_CHAIN_COMBINED) { ++ /* AESNI special-cased to use openssl_aesni_ctx_clone() ++ * to allow for working around lack of ++ * EVP_CIPHER_CTX_copy support for 3.0.0 <= OSSL Version ++ * < 3.2.0. ++ */ ++ if (openssl_aesni_ctx_clone(lctx, sess) != 0) ++ *lctx = NULL; ++ return *lctx; ++ } ++#endif + - return 1; - } - -diff --git a/dpdk/drivers/event/cnxk/cn9k_worker.h b/dpdk/drivers/event/cnxk/cn9k_worker.h -index 0451157812..107265d54b 100644 ---- a/dpdk/drivers/event/cnxk/cn9k_worker.h -+++ b/dpdk/drivers/event/cnxk/cn9k_worker.h -@@ -746,7 +746,7 @@ static __rte_always_inline uint16_t - cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, - uint64_t *txq_data, const uint32_t flags) - { -- struct rte_mbuf *m = ev->mbuf; -+ struct rte_mbuf *m = ev->mbuf, *extm = NULL; - struct cn9k_eth_txq *txq; - - /* Perform header writes before barrier for TSO */ -@@ -767,7 +767,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, - if (cn9k_sso_sq_depth(txq) <= 0) - return 0; - cn9k_nix_tx_skeleton(txq, cmd, flags, 0); -- cn9k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag, -+ cn9k_nix_xmit_prepare(txq, m, &extm, cmd, flags, txq->lso_tun_fmt, txq->mark_flag, - txq->mark_fmt); - - if (flags & NIX_TX_OFFLOAD_SECURITY_F) { -@@ -789,7 +789,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, - } - - if (flags & NIX_TX_MULTI_SEG_F) { -- const uint16_t segdw = cn9k_nix_prepare_mseg(txq, m, cmd, flags); -+ const uint16_t segdw = cn9k_nix_prepare_mseg(txq, m, &extm, cmd, flags); - cn9k_nix_xmit_prepare_tstamp(txq, cmd, m->ol_flags, segdw, - flags); - if (!CNXK_TT_FROM_EVENT(ev->event)) { -@@ -819,6 +819,9 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, - } - - done: -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) -+ cn9k_nix_free_extmbuf(extm); ++ *lctx = EVP_CIPHER_CTX_new(); ++ EVP_CIPHER_CTX_copy(*lctx, sess->cipher.ctx); ++ } + - return 1; - } - -diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -index 0c61f4c20e..20f7f0d6df 100644 ---- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c -+++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -@@ -162,16 +162,17 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev, uint32_t deq_depth, - - deq_tmo_ns = conf->dequeue_timeout_ns; - -- if (deq_tmo_ns == 0) -- deq_tmo_ns = dev->min_dequeue_timeout_ns; -- if (deq_tmo_ns < dev->min_dequeue_timeout_ns || -- deq_tmo_ns > dev->max_dequeue_timeout_ns) { -+ if (deq_tmo_ns && (deq_tmo_ns < dev->min_dequeue_timeout_ns || -+ deq_tmo_ns > dev->max_dequeue_timeout_ns)) { - plt_err("Unsupported dequeue timeout requested"); - return -EINVAL; - } - -- if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) -+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { -+ if (deq_tmo_ns == 0) -+ deq_tmo_ns = dev->min_dequeue_timeout_ns; - dev->is_timeout_deq = 1; ++ return *lctx; ++} ++ ++static inline EVP_MD_CTX * ++get_local_auth_ctx(struct openssl_session *sess, struct openssl_qp *qp) ++{ ++ /* If the array is not being used, just return the main context. */ ++ if (sess->ctx_copies_len == 0) ++ return sess->auth.auth.ctx; ++ ++ EVP_MD_CTX **lctx = &sess->qp_ctx[qp->id].auth; ++ ++ if (unlikely(*lctx == NULL)) { ++#if OPENSSL_VERSION_NUMBER >= 0x30100000L ++ /* EVP_MD_CTX_dup() added in OSSL 3.1 */ ++ *lctx = EVP_MD_CTX_dup(sess->auth.auth.ctx); ++#else ++ *lctx = EVP_MD_CTX_new(); ++ EVP_MD_CTX_copy(*lctx, sess->auth.auth.ctx); ++#endif + } - - dev->deq_tmo_ns = deq_tmo_ns; - -@@ -553,6 +554,9 @@ parse_list(const char *value, void *opaque, param_parse_t fn) - char *end = NULL; - char *f = s; - -+ if (s == NULL) -+ return; + - while (*s) { - if (*s == '[') - start = s; -@@ -663,7 +667,7 @@ cnxk_sso_init(struct rte_eventdev *event_dev) - } - - dev->is_timeout_deq = 0; -- dev->min_dequeue_timeout_ns = 0; -+ dev->min_dequeue_timeout_ns = USEC2NSEC(1); - dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF); - dev->max_num_events = -1; - dev->nb_event_queues = 0; -diff --git a/dpdk/drivers/event/dlb2/dlb2.c b/dpdk/drivers/event/dlb2/dlb2.c -index 050ace0904..5044cb17ef 100644 ---- a/dpdk/drivers/event/dlb2/dlb2.c -+++ b/dpdk/drivers/event/dlb2/dlb2.c -@@ -160,7 +160,6 @@ static int - dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) - { - struct dlb2_hw_dev *handle = &dlb2->qm_instance; -- struct dlb2_hw_resource_info *dlb2_info = &handle->info; - int num_ldb_ports; - int ret; - -@@ -222,8 +221,6 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) - handle->info.hw_rsrc_max.reorder_window_size = - dlb2->hw_rsrc_query_results.num_hist_list_entries; - -- rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info)); -- - return 0; - } - -diff --git a/dpdk/drivers/event/sw/iq_chunk.h b/dpdk/drivers/event/sw/iq_chunk.h -index 31d013eab7..7820815c38 100644 ---- a/dpdk/drivers/event/sw/iq_chunk.h -+++ b/dpdk/drivers/event/sw/iq_chunk.h -@@ -9,8 +9,6 @@ - #include - #include - --#define IQ_ROB_NAMESIZE 12 -- - struct sw_queue_chunk { - struct rte_event events[SW_EVS_PER_Q_CHUNK]; - struct sw_queue_chunk *next; -diff --git a/dpdk/drivers/event/sw/sw_evdev.c b/dpdk/drivers/event/sw/sw_evdev.c -index 55e7735cb0..2096496917 100644 ---- a/dpdk/drivers/event/sw/sw_evdev.c -+++ b/dpdk/drivers/event/sw/sw_evdev.c -@@ -228,9 +228,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, - const struct rte_event_queue_conf *queue_conf) - { - unsigned int i; -- int dev_id = sw->data->dev_id; - int socket_id = sw->data->socket_id; -- char buf[IQ_ROB_NAMESIZE]; - struct sw_qid *qid = &sw->qids[idx]; - - /* Initialize the FID structures to no pinning (-1), and zero packets */ -@@ -260,8 +258,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, - goto cleanup; - } - -- snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i); -- qid->reorder_buffer = rte_zmalloc_socket(buf, -+ qid->reorder_buffer = rte_zmalloc_socket(NULL, - window_size * sizeof(qid->reorder_buffer[0]), - 0, socket_id); - if (!qid->reorder_buffer) { -diff --git a/dpdk/drivers/meson.build b/dpdk/drivers/meson.build -index 5ba534049a..f2be71bc05 100644 ---- a/dpdk/drivers/meson.build -+++ b/dpdk/drivers/meson.build -@@ -93,7 +93,7 @@ foreach subpath:subdirs - if skip_class - drv_path = join_paths(class, '*') - dpdk_drvs_disabled += drv_path -- set_variable(drv_path.underscorify() + '_disable_reason', reason) -+ set_variable('drv_' + drv_path.underscorify() + '_disable_reason', reason) - continue - endif - endif -@@ -199,7 +199,7 @@ foreach subpath:subdirs - # component disable printout in those cases - if reason != '' - dpdk_drvs_disabled += drv_path -- set_variable(drv_path.underscorify() + '_disable_reason', reason) -+ set_variable('drv_' + drv_path.underscorify() + '_disable_reason', reason) - endif - continue - endif -diff --git a/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c b/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c -index 7f7e5efcea..5370038733 100644 ---- a/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c -+++ b/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c -@@ -288,6 +288,7 @@ cn10k_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *l - static int - cn10k_ml_cache_model_data(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer) - { -+ struct cn10k_ml_layer_xstats *xstats; - char str[RTE_MEMZONE_NAMESIZE]; - const struct plt_memzone *mz; - uint64_t isize = 0; -@@ -309,6 +310,16 @@ cn10k_ml_cache_model_data(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer * - PLT_PTR_ADD(mz->addr, isize), 1); - plt_memzone_free(mz); - -+ /* Reset sync xstats. */ -+ xstats = layer->glow.sync_xstats; -+ xstats->hw_latency_tot = 0; -+ xstats->hw_latency_min = UINT64_MAX; -+ xstats->hw_latency_max = 0; -+ xstats->fw_latency_tot = 0; -+ xstats->fw_latency_min = UINT64_MAX; -+ xstats->fw_latency_max = 0; -+ xstats->dequeued_count = 0; ++ return *lctx; ++} + - return ret; - } - -diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c -index 397a32db58..6b7b16f348 100644 ---- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c -+++ b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c -@@ -6,6 +6,7 @@ - * All rights reserved. - */ - -+#include - #include - #include - #include -@@ -39,7 +40,7 @@ - #define DFLT_FRAME_SIZE (1 << 11) - #define DFLT_FRAME_COUNT (1 << 9) - --struct pkt_rx_queue { -+struct __rte_cache_aligned pkt_rx_queue { - int sockfd; - - struct iovec *rd; -@@ -55,7 +56,7 @@ struct pkt_rx_queue { - volatile unsigned long rx_bytes; - }; - --struct pkt_tx_queue { -+struct __rte_cache_aligned pkt_tx_queue { - int sockfd; - unsigned int frame_data_size; - -diff --git a/dpdk/drivers/net/af_xdp/compat.h b/dpdk/drivers/net/af_xdp/compat.h -index 28ea64aeaa..3b5a5c1ed5 100644 ---- a/dpdk/drivers/net/af_xdp/compat.h -+++ b/dpdk/drivers/net/af_xdp/compat.h -@@ -46,6 +46,21 @@ create_shared_socket(struct xsk_socket **xsk_ptr __rte_unused, - } - #endif - -+#ifdef ETH_AF_XDP_UPDATE_XSKMAP -+static __rte_always_inline int -+update_xskmap(struct xsk_socket *xsk, int map_fd, int xsk_queue_idx __rte_unused) ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++static inline EVP_MAC_CTX * ++#else ++static inline HMAC_CTX * ++#endif ++get_local_hmac_ctx(struct openssl_session *sess, struct openssl_qp *qp) +{ -+ return xsk_socket__update_xskmap(xsk, map_fd); ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) ++ /* For OpenSSL versions 3.0.0 <= v < 3.0.3, re-initing of ++ * EVP_MAC_CTXs is broken, and doesn't actually reset their ++ * state. This was fixed in OSSL commit c9ddc5af5199 ("Avoid ++ * undefined behavior of provided macs on EVP_MAC ++ * reinitialization"). In cases where the fix is not present, ++ * fall back to duplicating the context every buffer as a ++ * workaround, at the cost of performance. ++ */ ++ RTE_SET_USED(qp); ++ return EVP_MAC_CTX_dup(sess->auth.hmac.ctx); ++#else ++ if (sess->ctx_copies_len == 0) ++ return sess->auth.hmac.ctx; ++ ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ EVP_MAC_CTX **lctx = ++#else ++ HMAC_CTX **lctx = ++#endif ++ &sess->qp_ctx[qp->id].hmac; ++ ++ if (unlikely(*lctx == NULL)) { ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ *lctx = EVP_MAC_CTX_dup(sess->auth.hmac.ctx); ++#else ++ *lctx = HMAC_CTX_new(); ++ HMAC_CTX_copy(*lctx, sess->auth.hmac.ctx); ++#endif ++ } ++ ++ return *lctx; ++#endif +} ++ ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++static inline EVP_MAC_CTX * +#else -+static __rte_always_inline int -+update_xskmap(struct xsk_socket *xsk, int map_fd, int xsk_queue_idx) ++static inline CMAC_CTX * ++#endif ++get_local_cmac_ctx(struct openssl_session *sess, struct openssl_qp *qp) +{ -+ int fd = xsk_socket__fd(xsk); -+ return bpf_map_update_elem(map_fd, &xsk_queue_idx, &fd, 0); -+} ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) ++ /* For OpenSSL versions 3.0.0 <= v < 3.0.3, re-initing of ++ * EVP_MAC_CTXs is broken, and doesn't actually reset their ++ * state. This was fixed in OSSL commit c9ddc5af5199 ("Avoid ++ * undefined behavior of provided macs on EVP_MAC ++ * reinitialization"). In cases where the fix is not present, ++ * fall back to duplicating the context every buffer as a ++ * workaround, at the cost of performance. ++ */ ++ RTE_SET_USED(qp); ++ return EVP_MAC_CTX_dup(sess->auth.cmac.ctx); ++#else ++ if (sess->ctx_copies_len == 0) ++ return sess->auth.cmac.ctx; ++ ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ EVP_MAC_CTX **lctx = ++#else ++ CMAC_CTX **lctx = +#endif ++ &sess->qp_ctx[qp->id].cmac; + - #ifdef XDP_USE_NEED_WAKEUP - static int - tx_syscall_needed(struct xsk_ring_prod *q) -diff --git a/dpdk/drivers/net/af_xdp/meson.build b/dpdk/drivers/net/af_xdp/meson.build -index 9f33e57fa2..69d109ff46 100644 ---- a/dpdk/drivers/net/af_xdp/meson.build -+++ b/dpdk/drivers/net/af_xdp/meson.build -@@ -7,6 +7,12 @@ if is_windows - subdir_done() - endif - -+if arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_32') -+ build = false -+ reason = 'not supported on 32-bit x86' -+ subdir_done() -+endif ++ if (unlikely(*lctx == NULL)) { ++#if OPENSSL_VERSION_NUMBER >= 0x30000000L ++ *lctx = EVP_MAC_CTX_dup(sess->auth.cmac.ctx); ++#else ++ *lctx = CMAC_CTX_new(); ++ CMAC_CTX_copy(*lctx, sess->auth.cmac.ctx); ++#endif ++ } + - sources = files('rte_eth_af_xdp.c') ++ return *lctx; ++#endif ++} ++ + /** Process auth/cipher combined operation */ + static void +-process_openssl_combined_op +- (struct rte_crypto_op *op, struct openssl_session *sess, +- struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst) ++process_openssl_combined_op(struct openssl_qp *qp, struct rte_crypto_op *op, ++ struct openssl_session *sess, struct rte_mbuf *mbuf_src, ++ struct rte_mbuf *mbuf_dst) + { + /* cipher */ + uint8_t *dst = NULL, *iv, *tag, *aad; +@@ -1590,6 +1768,8 @@ process_openssl_combined_op + return; + } - libxdp_ver = '>=1.2.2' -@@ -77,6 +83,10 @@ if build - dependencies : bpf_dep, args: cflags) - cflags += ['-DRTE_NET_AF_XDP_LIBBPF_XDP_ATTACH'] - endif -+ if cc.has_function('xsk_socket__update_xskmap', prefix : xsk_check_prefix, -+ dependencies : ext_deps, args: cflags) -+ cflags += ['-DETH_AF_XDP_UPDATE_XSKMAP'] -+ endif - endif ++ EVP_CIPHER_CTX *ctx = get_local_cipher_ctx(sess, qp); ++ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->iv.offset); + if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { +@@ -1623,12 +1803,12 @@ process_openssl_combined_op + status = process_openssl_auth_encryption_gcm( + mbuf_src, offset, srclen, + aad, aadlen, iv, +- dst, tag, sess->cipher.ctx); ++ dst, tag, ctx); + else + status = process_openssl_auth_encryption_ccm( + mbuf_src, offset, srclen, + aad, aadlen, iv, +- dst, tag, taglen, sess->cipher.ctx); ++ dst, tag, taglen, ctx); - require_iova_in_mbuf = false -diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c -index 353c8688ec..74f750dbb3 100644 ---- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c -+++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c -@@ -83,12 +83,13 @@ RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE); + } else { + if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC || +@@ -1636,12 +1816,12 @@ process_openssl_combined_op + status = process_openssl_auth_decryption_gcm( + mbuf_src, offset, srclen, + aad, aadlen, iv, +- dst, tag, sess->cipher.ctx); ++ dst, tag, ctx); + else + status = process_openssl_auth_decryption_ccm( + mbuf_src, offset, srclen, + aad, aadlen, iv, +- dst, tag, taglen, sess->cipher.ctx); ++ dst, tag, taglen, ctx); + } - #define ETH_AF_XDP_MP_KEY "afxdp_mp_send_fds" + if (status != 0) { +@@ -1656,14 +1836,13 @@ process_openssl_combined_op -+#define DP_BASE_PATH "/tmp/afxdp_dp" -+#define DP_UDS_SOCK "afxdp.sock" - #define MAX_LONG_OPT_SZ 64 - #define UDS_MAX_FD_NUM 2 - #define UDS_MAX_CMD_LEN 64 - #define UDS_MAX_CMD_RESP 128 - #define UDS_XSK_MAP_FD_MSG "/xsk_map_fd" --#define UDS_SOCK "/tmp/afxdp.sock" - #define UDS_CONNECT_MSG "/connect" - #define UDS_HOST_OK_MSG "/host_ok" - #define UDS_HOST_NAK_MSG "/host_nak" -@@ -123,7 +124,7 @@ struct xsk_umem_info { - struct rx_stats { - uint64_t rx_pkts; - uint64_t rx_bytes; -- uint64_t rx_dropped; -+ uint64_t imissed_offset; - }; + /** Process cipher operation */ + static void +-process_openssl_cipher_op +- (struct rte_crypto_op *op, struct openssl_session *sess, +- struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst) ++process_openssl_cipher_op(struct openssl_qp *qp, struct rte_crypto_op *op, ++ struct openssl_session *sess, struct rte_mbuf *mbuf_src, ++ struct rte_mbuf *mbuf_dst) + { + uint8_t *dst, *iv; + int srclen, status; + uint8_t inplace = (mbuf_src == mbuf_dst) ? 1 : 0; +- EVP_CIPHER_CTX *ctx_copy; - struct pkt_rx_queue { -@@ -131,6 +132,7 @@ struct pkt_rx_queue { - struct xsk_umem_info *umem; - struct xsk_socket *xsk; - struct rte_mempool *mb_pool; -+ uint16_t port; + /* + * Segmented OOP destination buffer is not supported for encryption/ +@@ -1682,25 +1861,22 @@ process_openssl_cipher_op - struct rx_stats stats; + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->iv.offset); +- ctx_copy = EVP_CIPHER_CTX_new(); +- EVP_CIPHER_CTX_copy(ctx_copy, sess->cipher.ctx); ++ ++ EVP_CIPHER_CTX *ctx = get_local_cipher_ctx(sess, qp); -@@ -171,6 +173,7 @@ struct pmd_internals { - bool custom_prog_configured; - bool force_copy; - bool use_cni; -+ char dp_path[PATH_MAX]; - struct bpf_map *map; + if (sess->cipher.mode == OPENSSL_CIPHER_LIB) + if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + status = process_openssl_cipher_encrypt(mbuf_src, dst, + op->sym->cipher.data.offset, iv, +- srclen, ctx_copy, inplace); ++ srclen, ctx, inplace); + else + status = process_openssl_cipher_decrypt(mbuf_src, dst, + op->sym->cipher.data.offset, iv, +- srclen, ctx_copy, inplace); ++ srclen, ctx, inplace); + else + status = process_openssl_cipher_des3ctr(mbuf_src, dst, +- op->sym->cipher.data.offset, iv, +- sess->cipher.key.data, srclen, +- ctx_copy); ++ op->sym->cipher.data.offset, iv, srclen, ctx); - struct rte_ether_addr eth_addr; -@@ -191,6 +194,7 @@ struct pmd_process_private { - #define ETH_AF_XDP_BUDGET_ARG "busy_budget" - #define ETH_AF_XDP_FORCE_COPY_ARG "force_copy" - #define ETH_AF_XDP_USE_CNI_ARG "use_cni" -+#define ETH_AF_XDP_DP_PATH_ARG "dp_path" +- EVP_CIPHER_CTX_free(ctx_copy); + if (status != 0) + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + } +@@ -1819,42 +1995,40 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, - static const char * const valid_arguments[] = { - ETH_AF_XDP_IFACE_ARG, -@@ -201,6 +205,7 @@ static const char * const valid_arguments[] = { - ETH_AF_XDP_BUDGET_ARG, - ETH_AF_XDP_FORCE_COPY_ARG, - ETH_AF_XDP_USE_CNI_ARG, -+ ETH_AF_XDP_DP_PATH_ARG, - NULL - }; + switch (sess->auth.mode) { + case OPENSSL_AUTH_AS_AUTH: +- ctx_a = EVP_MD_CTX_create(); +- EVP_MD_CTX_copy_ex(ctx_a, sess->auth.auth.ctx); ++ ctx_a = get_local_auth_ctx(sess, qp); + status = process_openssl_auth(mbuf_src, dst, + op->sym->auth.data.offset, NULL, NULL, srclen, + ctx_a, sess->auth.auth.evp_algo); +- EVP_MD_CTX_destroy(ctx_a); + break; + case OPENSSL_AUTH_AS_HMAC: ++ ctx_h = get_local_hmac_ctx(sess, qp); + # if OPENSSL_VERSION_NUMBER >= 0x30000000L +- ctx_h = EVP_MAC_CTX_dup(sess->auth.hmac.ctx); + status = process_openssl_auth_mac(mbuf_src, dst, + op->sym->auth.data.offset, srclen, + ctx_h); + # else +- ctx_h = HMAC_CTX_new(); +- HMAC_CTX_copy(ctx_h, sess->auth.hmac.ctx); + status = process_openssl_auth_hmac(mbuf_src, dst, + op->sym->auth.data.offset, srclen, + ctx_h); +- HMAC_CTX_free(ctx_h); + # endif ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) ++ EVP_MAC_CTX_free(ctx_h); ++#endif + break; + case OPENSSL_AUTH_AS_CMAC: ++ ctx_c = get_local_cmac_ctx(sess, qp); + # if OPENSSL_VERSION_NUMBER >= 0x30000000L +- ctx_c = EVP_MAC_CTX_dup(sess->auth.cmac.ctx); + status = process_openssl_auth_mac(mbuf_src, dst, + op->sym->auth.data.offset, srclen, + ctx_c); + # else +- ctx_c = CMAC_CTX_new(); +- CMAC_CTX_copy(ctx_c, sess->auth.cmac.ctx); + status = process_openssl_auth_cmac(mbuf_src, dst, + op->sym->auth.data.offset, srclen, + ctx_c); +- CMAC_CTX_free(ctx_c); + # endif ++#if (OPENSSL_VERSION_NUMBER >= 0x30000000L && OPENSSL_VERSION_NUMBER < 0x30003000L) ++ EVP_MAC_CTX_free(ctx_c); ++#endif + break; + default: + status = -1; +@@ -1939,7 +2113,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, + dsa_sign_data_p = (const unsigned char *)dsa_sign_data; + DSA_SIG *sign = d2i_DSA_SIG(NULL, &dsa_sign_data_p, outlen); + if (!sign) { +- OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__); ++ OPENSSL_LOG(ERR, "%s:%d", __func__, __LINE__); + OPENSSL_free(dsa_sign_data); + goto err_dsa_sign; + } else { +@@ -1984,7 +2158,7 @@ process_openssl_dsa_verify_op_evp(struct rte_crypto_op *cop, -@@ -311,6 +316,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) - unsigned long rx_bytes = 0; - int i; - struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; -+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port]; + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + if (!param_bld) { +- OPENSSL_LOG(ERR, " %s:%d\n", __func__, __LINE__); ++ OPENSSL_LOG(ERR, " %s:%d", __func__, __LINE__); + return -1; + } - nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); +@@ -2062,7 +2236,7 @@ process_openssl_dsa_sign_op(struct rte_crypto_op *cop, + dsa); -@@ -338,6 +344,8 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) - * xsk_ring_cons__peek - */ - rx->cached_cons -= nb_pkts; -+ dev->data->rx_mbuf_alloc_failed += nb_pkts; -+ - return 0; + if (sign == NULL) { +- OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__); ++ OPENSSL_LOG(ERR, "%s:%d", __func__, __LINE__); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } else { + const BIGNUM *r = NULL, *s = NULL; +@@ -2091,7 +2265,7 @@ process_openssl_dsa_verify_op(struct rte_crypto_op *cop, + BIGNUM *pub_key = NULL; + + if (sign == NULL) { +- OPENSSL_LOG(ERR, " %s:%d\n", __func__, __LINE__); ++ OPENSSL_LOG(ERR, " %s:%d", __func__, __LINE__); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; } +@@ -2168,7 +2342,7 @@ process_openssl_dh_op_evp(struct rte_crypto_op *cop, -@@ -360,6 +368,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) - bufs[i]->data_off = offset - sizeof(struct rte_mbuf) - - rte_pktmbuf_priv_size(umem->mb_pool) - - umem->mb_pool->header_size; -+ bufs[i]->port = rxq->port; + if (!OSSL_PARAM_BLD_push_BN(param_bld_peer, OSSL_PKEY_PARAM_PUB_KEY, + pub_key)) { +- OPENSSL_LOG(ERR, "Failed to set public key\n"); ++ OPENSSL_LOG(ERR, "Failed to set public key"); + OSSL_PARAM_BLD_free(param_bld_peer); + BN_free(pub_key); + return ret; +@@ -2213,7 +2387,7 @@ process_openssl_dh_op_evp(struct rte_crypto_op *cop, + + if (!OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_PRIV_KEY, + priv_key)) { +- OPENSSL_LOG(ERR, "Failed to set private key\n"); ++ OPENSSL_LOG(ERR, "Failed to set private key"); + EVP_PKEY_CTX_free(peer_ctx); + OSSL_PARAM_free(params_peer); + BN_free(pub_key); +@@ -2239,7 +2413,7 @@ process_openssl_dh_op_evp(struct rte_crypto_op *cop, + goto err_dh; + + if (op->ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE) { +- OPENSSL_LOG(DEBUG, "%s:%d updated pub key\n", __func__, __LINE__); ++ OPENSSL_LOG(DEBUG, "%s:%d updated pub key", __func__, __LINE__); + if (!EVP_PKEY_get_bn_param(dhpkey, OSSL_PKEY_PARAM_PUB_KEY, &pub_key)) + goto err_dh; + /* output public key */ +@@ -2248,7 +2422,7 @@ process_openssl_dh_op_evp(struct rte_crypto_op *cop, + + if (op->ke_type == RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE) { + +- OPENSSL_LOG(DEBUG, "%s:%d updated priv key\n", __func__, __LINE__); ++ OPENSSL_LOG(DEBUG, "%s:%d updated priv key", __func__, __LINE__); + if (!EVP_PKEY_get_bn_param(dhpkey, OSSL_PKEY_PARAM_PRIV_KEY, &priv_key)) + goto err_dh; + +@@ -2343,7 +2517,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop, + } + ret = set_dh_priv_key(dh_key, priv_key); + if (ret) { +- OPENSSL_LOG(ERR, "Failed to set private key\n"); ++ OPENSSL_LOG(ERR, "Failed to set private key"); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + BN_free(peer_key); + BN_free(priv_key); +@@ -2390,7 +2564,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop, + } + ret = set_dh_priv_key(dh_key, priv_key); + if (ret) { +- OPENSSL_LOG(ERR, "Failed to set private key\n"); ++ OPENSSL_LOG(ERR, "Failed to set private key"); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + BN_free(priv_key); + return 0; +@@ -2412,7 +2586,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop, + if (asym_op->dh.ke_type == RTE_CRYPTO_ASYM_KE_PUB_KEY_GENERATE) { + const BIGNUM *pub_key = NULL; - rte_pktmbuf_pkt_len(bufs[i]) = len; - rte_pktmbuf_data_len(bufs[i]) = len; -@@ -388,6 +397,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) - int i; - uint32_t free_thresh = fq->size >> 1; - struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; -+ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port]; +- OPENSSL_LOG(DEBUG, "%s:%d update public key\n", ++ OPENSSL_LOG(DEBUG, "%s:%d update public key", + __func__, __LINE__); - if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) - (void)reserve_fill_queue(umem, nb_pkts, NULL, fq); -@@ -406,6 +416,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) - * xsk_ring_cons__peek - */ - rx->cached_cons -= nb_pkts; -+ dev->data->rx_mbuf_alloc_failed += nb_pkts; - return 0; - } + /* get the generated keys */ +@@ -2426,7 +2600,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop, + if (asym_op->dh.ke_type == RTE_CRYPTO_ASYM_KE_PRIV_KEY_GENERATE) { + const BIGNUM *priv_key = NULL; -@@ -426,6 +437,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) - rte_pktmbuf_data_len(mbufs[i]) = len; - rx_bytes += len; - bufs[i] = mbufs[i]; -+ bufs[i]->port = rxq->port; +- OPENSSL_LOG(DEBUG, "%s:%d updated priv key\n", ++ OPENSSL_LOG(DEBUG, "%s:%d updated priv key", + __func__, __LINE__); + + /* get the generated keys */ +@@ -2535,7 +2709,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, + default: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + OPENSSL_LOG(ERR, +- "rsa pad type not supported %d\n", pad); ++ "rsa pad type not supported %d", pad); + return ret; } - xsk_ring_cons__release(rx, nb_pkts); -@@ -867,7 +879,6 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +@@ -2562,7 +2736,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, + op->rsa.cipher.length = outlen; - stats->ipackets += stats->q_ipackets[i]; - stats->ibytes += stats->q_ibytes[i]; -- stats->imissed += rxq->stats.rx_dropped; - stats->oerrors += txq->stats.tx_dropped; - fd = process_private->rxq_xsk_fds[i]; - ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS, -@@ -876,7 +887,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) - AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n"); - return -1; - } -- stats->imissed += xdp_stats.rx_dropped; -+ stats->imissed += xdp_stats.rx_dropped - rxq->stats.imissed_offset; + OPENSSL_LOG(DEBUG, +- "length of encrypted text %zu\n", outlen); ++ "length of encrypted text %zu", outlen); + break; - stats->opackets += stats->q_opackets[i]; - stats->obytes += stats->q_obytes[i]; -@@ -889,13 +900,25 @@ static int - eth_stats_reset(struct rte_eth_dev *dev) - { - struct pmd_internals *internals = dev->data->dev_private; -- int i; -+ struct pmd_process_private *process_private = dev->process_private; -+ struct xdp_statistics xdp_stats; -+ socklen_t optlen; -+ int i, ret, fd; + case RTE_CRYPTO_ASYM_OP_DECRYPT: +@@ -2586,7 +2760,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, + goto err_rsa; + op->rsa.message.length = outlen; - for (i = 0; i < internals->queue_cnt; i++) { - memset(&internals->rx_queues[i].stats, 0, - sizeof(struct rx_stats)); - memset(&internals->tx_queues[i].stats, 0, - sizeof(struct tx_stats)); -+ fd = process_private->rxq_xsk_fds[i]; -+ optlen = sizeof(struct xdp_statistics); -+ ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS, -+ &xdp_stats, &optlen) : -1; -+ if (ret != 0) { -+ AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n"); -+ return -1; -+ } -+ internals->rx_queues[i].stats.imissed_offset = xdp_stats.rx_dropped; - } +- OPENSSL_LOG(DEBUG, "length of decrypted text %zu\n", outlen); ++ OPENSSL_LOG(DEBUG, "length of decrypted text %zu", outlen); + break; - return 0; -@@ -960,6 +983,9 @@ remove_xdp_program(struct pmd_internals *internals) - static void - xdp_umem_destroy(struct xsk_umem_info *umem) - { -+ (void)xsk_umem__delete(umem->umem); -+ umem->umem = NULL; -+ - #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) - umem->mb_pool = NULL; - #else -@@ -992,11 +1018,8 @@ eth_dev_close(struct rte_eth_dev *dev) - break; - xsk_socket__delete(rxq->xsk); + case RTE_CRYPTO_ASYM_OP_SIGN: +@@ -2641,7 +2815,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, + + OPENSSL_LOG(DEBUG, + "Length of public_decrypt %zu " +- "length of message %zd\n", ++ "length of message %zd", + outlen, op->rsa.message.length); + if (CRYPTO_memcmp(tmp, op->rsa.message.data, + op->rsa.message.length)) { +@@ -2913,7 +3087,7 @@ process_openssl_rsa_op(struct rte_crypto_op *cop, + default: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + OPENSSL_LOG(ERR, +- "rsa pad type not supported %d\n", pad); ++ "rsa pad type not supported %d", pad); + return 0; + } -- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 -- == 0) { -- (void)xsk_umem__delete(rxq->umem->umem); -+ if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) - xdp_umem_destroy(rxq->umem); -- } +@@ -2928,7 +3102,7 @@ process_openssl_rsa_op(struct rte_crypto_op *cop, + if (ret > 0) + op->rsa.cipher.length = ret; + OPENSSL_LOG(DEBUG, +- "length of encrypted text %d\n", ret); ++ "length of encrypted text %d", ret); + break; - /* free pkt_tx_queue */ - rte_free(rxq->pair); -@@ -1234,6 +1257,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, - AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n"); - goto err; - } -+ umem->mz = mz; + case RTE_CRYPTO_ASYM_OP_DECRYPT: +@@ -2966,7 +3140,7 @@ process_openssl_rsa_op(struct rte_crypto_op *cop, - ret = xsk_umem__create(&umem->umem, mz->addr, - ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, -@@ -1244,7 +1268,6 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, - AF_XDP_LOG(ERR, "Failed to create umem\n"); - goto err; - } -- umem->mz = mz; + OPENSSL_LOG(DEBUG, + "Length of public_decrypt %d " +- "length of message %zd\n", ++ "length of message %zd", + ret, op->rsa.message.length); + if ((ret <= 0) || (CRYPTO_memcmp(tmp, op->rsa.message.data, + op->rsa.message.length))) { +@@ -3105,13 +3279,13 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, - return umem; + switch (sess->chain_order) { + case OPENSSL_CHAIN_ONLY_CIPHER: +- process_openssl_cipher_op(op, sess, msrc, mdst); ++ process_openssl_cipher_op(qp, op, sess, msrc, mdst); + break; + case OPENSSL_CHAIN_ONLY_AUTH: + process_openssl_auth_op(qp, op, sess, msrc, mdst); + break; + case OPENSSL_CHAIN_CIPHER_AUTH: +- process_openssl_cipher_op(op, sess, msrc, mdst); ++ process_openssl_cipher_op(qp, op, sess, msrc, mdst); + /* OOP */ + if (msrc != mdst) + copy_plaintext(msrc, mdst, op); +@@ -3119,10 +3293,10 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, + break; + case OPENSSL_CHAIN_AUTH_CIPHER: + process_openssl_auth_op(qp, op, sess, msrc, mdst); +- process_openssl_cipher_op(op, sess, msrc, mdst); ++ process_openssl_cipher_op(qp, op, sess, msrc, mdst); + break; + case OPENSSL_CHAIN_COMBINED: +- process_openssl_combined_op(op, sess, msrc, mdst); ++ process_openssl_combined_op(qp, op, sess, msrc, mdst); + break; + case OPENSSL_CHAIN_CIPHER_BPI: + process_openssl_docsis_bpi_op(op, sess, msrc, mdst); +diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c +index b16baaa08f..b7b612fc57 100644 +--- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c ++++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c +@@ -794,9 +794,35 @@ qp_setup_cleanup: -@@ -1351,7 +1374,7 @@ err_prefer: + /** Returns the size of the symmetric session structure */ + static unsigned +-openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) ++openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev) + { +- return sizeof(struct openssl_session); ++ /* ++ * For 0 qps, return the max size of the session - this is necessary if ++ * the user calls into this function to create the session mempool, ++ * without first configuring the number of qps for the cryptodev. ++ */ ++ if (dev->data->nb_queue_pairs == 0) { ++ unsigned int max_nb_qps = ((struct openssl_private *) ++ dev->data->dev_private)->max_nb_qpairs; ++ return sizeof(struct openssl_session) + ++ (sizeof(struct evp_ctx_pair) * max_nb_qps); ++ } ++ ++ /* ++ * With only one queue pair, the thread safety of multiple context ++ * copies is not necessary, so don't allocate extra memory for the ++ * array. ++ */ ++ if (dev->data->nb_queue_pairs == 1) ++ return sizeof(struct openssl_session); ++ ++ /* ++ * Otherwise, the size of the flexible array member should be enough to ++ * fit pointers to per-qp contexts. This is twice the number of queue ++ * pairs, to allow for auth and cipher contexts. ++ */ ++ return sizeof(struct openssl_session) + ++ (sizeof(struct evp_ctx_pair) * dev->data->nb_queue_pairs); } + /** Returns the size of the asymmetric session structure */ +@@ -808,7 +834,7 @@ openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused) + + /** Configure the session from a crypto xform chain */ static int --init_uds_sock(struct sockaddr_un *server) -+init_uds_sock(struct sockaddr_un *server, const char *dp_path) +-openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, ++openssl_pmd_sym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess) { - int sock; - -@@ -1362,7 +1385,7 @@ init_uds_sock(struct sockaddr_un *server) +@@ -820,7 +846,8 @@ openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, + return -EINVAL; } - server->sun_family = AF_UNIX; -- strlcpy(server->sun_path, UDS_SOCK, sizeof(server->sun_path)); -+ strlcpy(server->sun_path, dp_path, sizeof(server->sun_path)); +- ret = openssl_set_session_parameters(sess_private_data, xform); ++ ret = openssl_set_session_parameters(sess_private_data, xform, ++ dev->data->nb_queue_pairs); + if (ret != 0) { + OPENSSL_LOG(ERR, "failed configure session parameters"); - if (connect(sock, (struct sockaddr *)server, sizeof(struct sockaddr_un)) < 0) { - close(sock); -@@ -1382,7 +1405,7 @@ struct msg_internal { - }; +@@ -865,7 +892,7 @@ static int openssl_set_asym_session_parameters( + #if (OPENSSL_VERSION_NUMBER >= 0x30000000L) + OSSL_PARAM_BLD * param_bld = OSSL_PARAM_BLD_new(); + if (!param_bld) { +- OPENSSL_LOG(ERR, "failed to allocate resources\n"); ++ OPENSSL_LOG(ERR, "failed to allocate resources"); + goto err_rsa; + } - static int --send_msg(int sock, char *request, int *fd) -+send_msg(int sock, char *request, int *fd, const char *dp_path) - { - int snd; - struct iovec iov; -@@ -1393,7 +1416,7 @@ send_msg(int sock, char *request, int *fd) +@@ -873,7 +900,7 @@ static int openssl_set_asym_session_parameters( + || !OSSL_PARAM_BLD_push_BN(param_bld, + OSSL_PKEY_PARAM_RSA_E, e)) { + OSSL_PARAM_BLD_free(param_bld); +- OPENSSL_LOG(ERR, "failed to allocate resources\n"); ++ OPENSSL_LOG(ERR, "failed to allocate resources"); + goto err_rsa; + } - memset(&dst, 0, sizeof(dst)); - dst.sun_family = AF_UNIX; -- strlcpy(dst.sun_path, UDS_SOCK, sizeof(dst.sun_path)); -+ strlcpy(dst.sun_path, dp_path, sizeof(dst.sun_path)); +@@ -1006,14 +1033,14 @@ static int openssl_set_asym_session_parameters( + ret = set_rsa_params(rsa, p, q); + if (ret) { + OPENSSL_LOG(ERR, +- "failed to set rsa params\n"); ++ "failed to set rsa params"); + RSA_free(rsa); + goto err_rsa; + } + ret = set_rsa_crt_params(rsa, dmp1, dmq1, iqmp); + if (ret) { + OPENSSL_LOG(ERR, +- "failed to set crt params\n"); ++ "failed to set crt params"); + RSA_free(rsa); + /* + * set already populated params to NULL +@@ -1026,7 +1053,7 @@ static int openssl_set_asym_session_parameters( - /* Initialize message header structure */ - memset(&msgh, 0, sizeof(msgh)); -@@ -1470,8 +1493,8 @@ read_msg(int sock, char *response, struct sockaddr_un *s, int *fd) - } + ret = set_rsa_keys(rsa, n, e, d); + if (ret) { +- OPENSSL_LOG(ERR, "Failed to load rsa keys\n"); ++ OPENSSL_LOG(ERR, "Failed to load rsa keys"); + RSA_free(rsa); + return ret; + } +@@ -1053,7 +1080,7 @@ err_rsa: + BN_CTX *ctx = BN_CTX_new(); + if (ctx == NULL) { + OPENSSL_LOG(ERR, +- " failed to allocate resources\n"); ++ " failed to allocate resources"); + return ret; + } + BN_CTX_start(ctx); +@@ -1084,7 +1111,7 @@ err_rsa: + BN_CTX *ctx = BN_CTX_new(); + if (ctx == NULL) { + OPENSSL_LOG(ERR, +- " failed to allocate resources\n"); ++ " failed to allocate resources"); + return ret; + } + BN_CTX_start(ctx); +@@ -1125,7 +1152,7 @@ err_rsa: + OSSL_PARAM_BLD *param_bld = NULL; + param_bld = OSSL_PARAM_BLD_new(); + if (!param_bld) { +- OPENSSL_LOG(ERR, "failed to allocate resources\n"); ++ OPENSSL_LOG(ERR, "failed to allocate resources"); + goto err_dh; + } + if ((!OSSL_PARAM_BLD_push_utf8_string(param_bld, +@@ -1141,7 +1168,7 @@ err_rsa: + OSSL_PARAM_BLD *param_bld_peer = NULL; + param_bld_peer = OSSL_PARAM_BLD_new(); + if (!param_bld_peer) { +- OPENSSL_LOG(ERR, "failed to allocate resources\n"); ++ OPENSSL_LOG(ERR, "failed to allocate resources"); + OSSL_PARAM_BLD_free(param_bld); + goto err_dh; + } +@@ -1176,7 +1203,7 @@ err_rsa: + dh = DH_new(); + if (dh == NULL) { + OPENSSL_LOG(ERR, +- "failed to allocate resources\n"); ++ "failed to allocate resources"); + goto err_dh; + } + ret = set_dh_params(dh, p, g); +@@ -1190,7 +1217,7 @@ err_rsa: + break; - static int --make_request_cni(int sock, struct sockaddr_un *server, char *request, -- int *req_fd, char *response, int *out_fd) -+make_request_dp(int sock, struct sockaddr_un *server, char *request, -+ int *req_fd, char *response, int *out_fd, const char *dp_path) - { - int rval; + err_dh: +- OPENSSL_LOG(ERR, " failed to set dh params\n"); ++ OPENSSL_LOG(ERR, " failed to set dh params"); + #if (OPENSSL_VERSION_NUMBER >= 0x30000000L) + BN_free(*p); + BN_free(*g); +@@ -1236,7 +1263,7 @@ err_dh: + + param_bld = OSSL_PARAM_BLD_new(); + if (!param_bld) { +- OPENSSL_LOG(ERR, "failed to allocate resources\n"); ++ OPENSSL_LOG(ERR, "failed to allocate resources"); + goto err_dsa; + } -@@ -1483,7 +1506,7 @@ make_request_cni(int sock, struct sockaddr_un *server, char *request, - if (req_fd == NULL) - rval = write(sock, request, strlen(request)); - else -- rval = send_msg(sock, request, req_fd); -+ rval = send_msg(sock, request, req_fd, dp_path); +@@ -1246,7 +1273,7 @@ err_dh: + || !OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_PRIV_KEY, + *priv_key)) { + OSSL_PARAM_BLD_free(param_bld); +- OPENSSL_LOG(ERR, "failed to allocate resources\n"); ++ OPENSSL_LOG(ERR, "failed to allocate resources"); + goto err_dsa; + } + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DSA; +@@ -1286,14 +1313,14 @@ err_dh: + DSA *dsa = DSA_new(); + if (dsa == NULL) { + OPENSSL_LOG(ERR, +- " failed to allocate resources\n"); ++ " failed to allocate resources"); + goto err_dsa; + } - if (rval < 0) { - AF_XDP_LOG(ERR, "Write error %s\n", strerror(errno)); -@@ -1507,7 +1530,7 @@ check_response(char *response, char *exp_resp, long size) - } + ret = set_dsa_params(dsa, p, q, g); + if (ret) { + DSA_free(dsa); +- OPENSSL_LOG(ERR, "Failed to dsa params\n"); ++ OPENSSL_LOG(ERR, "Failed to dsa params"); + goto err_dsa; + } - static int --get_cni_fd(char *if_name) -+uds_get_xskmap_fd(char *if_name, const char *dp_path) - { - char request[UDS_MAX_CMD_LEN], response[UDS_MAX_CMD_RESP]; - char hostname[MAX_LONG_OPT_SZ], exp_resp[UDS_MAX_CMD_RESP]; -@@ -1520,14 +1543,14 @@ get_cni_fd(char *if_name) - return -1; +@@ -1307,7 +1334,7 @@ err_dh: + ret = set_dsa_keys(dsa, pub_key, priv_key); + if (ret) { + DSA_free(dsa); +- OPENSSL_LOG(ERR, "Failed to set keys\n"); ++ OPENSSL_LOG(ERR, "Failed to set keys"); + goto err_dsa; + } + asym_session->u.s.dsa = dsa; +@@ -1342,21 +1369,21 @@ err_dsa: + + param_bld = OSSL_PARAM_BLD_new(); + if (!param_bld) { +- OPENSSL_LOG(ERR, "failed to allocate params\n"); ++ OPENSSL_LOG(ERR, "failed to allocate params"); + goto err_sm2; + } - memset(&server, 0, sizeof(server)); -- sock = init_uds_sock(&server); -+ sock = init_uds_sock(&server, dp_path); - if (sock < 0) - return -1; + ret = OSSL_PARAM_BLD_push_utf8_string(param_bld, + OSSL_ASYM_CIPHER_PARAM_DIGEST, "SM3", 0); + if (!ret) { +- OPENSSL_LOG(ERR, "failed to push params\n"); ++ OPENSSL_LOG(ERR, "failed to push params"); + goto err_sm2; + } -- /* Initiates handshake to CNI send: /connect,hostname */ -+ /* Initiates handshake to the AF_XDP Device Plugin send: /connect,hostname */ - snprintf(request, sizeof(request), "%s,%s", UDS_CONNECT_MSG, hostname); - memset(response, 0, sizeof(response)); -- if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) { -+ if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) { - AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request); - goto err_close; - } -@@ -1541,7 +1564,7 @@ get_cni_fd(char *if_name) - /* Request for "/version" */ - strlcpy(request, UDS_VERSION_MSG, UDS_MAX_CMD_LEN); - memset(response, 0, sizeof(response)); -- if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) { -+ if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) { - AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request); - goto err_close; - } -@@ -1549,7 +1572,7 @@ get_cni_fd(char *if_name) - /* Request for file descriptor for netdev name*/ - snprintf(request, sizeof(request), "%s,%s", UDS_XSK_MAP_FD_MSG, if_name); - memset(response, 0, sizeof(response)); -- if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) { -+ if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) { - AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request); - goto err_close; - } -@@ -1571,7 +1594,7 @@ get_cni_fd(char *if_name) - /* Initiate close connection */ - strlcpy(request, UDS_FIN_MSG, UDS_MAX_CMD_LEN); - memset(response, 0, sizeof(response)); -- if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) { -+ if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) { - AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request); - goto err_close; - } -@@ -1695,21 +1718,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, - } + ret = OSSL_PARAM_BLD_push_utf8_string(param_bld, + OSSL_PKEY_PARAM_GROUP_NAME, "SM2", 0); + if (!ret) { +- OPENSSL_LOG(ERR, "failed to push params\n"); ++ OPENSSL_LOG(ERR, "failed to push params"); + goto err_sm2; + } - if (internals->use_cni) { -- int err, fd, map_fd; -+ int err, map_fd; +@@ -1366,7 +1393,7 @@ err_dsa: + ret = OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_PRIV_KEY, + pkey_bn); + if (!ret) { +- OPENSSL_LOG(ERR, "failed to push params\n"); ++ OPENSSL_LOG(ERR, "failed to push params"); + goto err_sm2; + } -- /* get socket fd from CNI plugin */ -- map_fd = get_cni_fd(internals->if_name); -+ /* get socket fd from AF_XDP Device Plugin */ -+ map_fd = uds_get_xskmap_fd(internals->if_name, internals->dp_path); - if (map_fd < 0) { -- AF_XDP_LOG(ERR, "Failed to receive CNI plugin fd\n"); -+ AF_XDP_LOG(ERR, "Failed to receive xskmap fd from AF_XDP Device Plugin\n"); - goto out_xsk; +@@ -1381,13 +1408,13 @@ err_dsa: + ret = OSSL_PARAM_BLD_push_octet_string(param_bld, + OSSL_PKEY_PARAM_PUB_KEY, pubkey, len); + if (!ret) { +- OPENSSL_LOG(ERR, "failed to push params\n"); ++ OPENSSL_LOG(ERR, "failed to push params"); + goto err_sm2; } -- /* get socket fd */ -- fd = xsk_socket__fd(rxq->xsk); -- err = bpf_map_update_elem(map_fd, &rxq->xsk_queue_idx, &fd, 0); -+ -+ err = update_xskmap(rxq->xsk, map_fd, rxq->xsk_queue_idx); - if (err) { -- AF_XDP_LOG(ERR, "Failed to insert unprivileged xsk in map.\n"); -+ AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n"); - goto out_xsk; + + params = OSSL_PARAM_BLD_to_param(param_bld); + if (!params) { +- OPENSSL_LOG(ERR, "failed to push params\n"); ++ OPENSSL_LOG(ERR, "failed to push params"); + goto err_sm2; } -+ - } else if (rxq->busy_budget) { - ret = configure_preferred_busy_poll(rxq); - if (ret) { -@@ -1779,6 +1802,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, - process_private->rxq_xsk_fds[rx_queue_id] = rxq->fds[0].fd; +diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +index de72383d4b..b44acece7c 100644 +--- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c ++++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +@@ -9,6 +9,7 @@ + #include "qat_asym.h" + #include "qat_crypto.h" + #include "qat_crypto_pmd_gens.h" ++#include "adf_transport_access_macros_gen4vf.h" -+ rxq->port = dev->data->port_id; -+ - dev->data->rx_queues[rx_queue_id] = rxq; - return 0; -@@ -1881,13 +1906,13 @@ static const struct eth_dev_ops ops = { - .get_monitor_addr = eth_get_monitor_addr, - }; + static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = { +@@ -233,6 +234,78 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx, + return 0; + } --/* CNI option works in unprivileged container environment -- * and ethernet device functionality will be reduced. So -- * additional customiszed eth_dev_ops struct is needed -- * for cni. Promiscuous enable and disable functionality -- * is removed. -+/* AF_XDP Device Plugin option works in unprivileged -+ * container environments and ethernet device functionality -+ * will be reduced. So additional customised eth_dev_ops -+ * struct is needed for the Device Plugin. Promiscuous -+ * enable and disable functionality is removed. - **/ --static const struct eth_dev_ops ops_cni = { -+static const struct eth_dev_ops ops_afxdp_dp = { - .dev_start = eth_dev_start, - .dev_stop = eth_dev_stop, - .dev_close = eth_dev_close, -@@ -2023,7 +2048,8 @@ xdp_get_channels_info(const char *if_name, int *max_queues, ++int ++qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n) ++{ ++ struct qat_qp *qp = qp_data; ++ struct qat_queue *tx_queue = &qp->tx_q; ++ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; ++ ++ if (unlikely(dp_ctx->cached_enqueue != n)) ++ return -1; ++ ++ qp->enqueued += n; ++ qp->stats.enqueued_count += n; ++ ++ tx_queue->tail = dp_ctx->tail; ++ ++ WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr, ++ tx_queue->hw_bundle_number, ++ tx_queue->hw_queue_number, tx_queue->tail); ++ ++ tx_queue->csr_tail = tx_queue->tail; ++ dp_ctx->cached_enqueue = 0; ++ ++ return 0; ++} ++ ++int ++qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n) ++{ ++ struct qat_qp *qp = qp_data; ++ struct qat_queue *rx_queue = &qp->rx_q; ++ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx; ++ ++ if (unlikely(dp_ctx->cached_dequeue != n)) ++ return -1; ++ ++ rx_queue->head = dp_ctx->head; ++ rx_queue->nb_processed_responses += n; ++ qp->dequeued += n; ++ qp->stats.dequeued_count += n; ++ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) { ++ uint32_t old_head, new_head; ++ uint32_t max_head; ++ ++ old_head = rx_queue->csr_head; ++ new_head = rx_queue->head; ++ max_head = qp->nb_descriptors * rx_queue->msg_size; ++ ++ /* write out free descriptors */ ++ void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head; ++ ++ if (new_head < old_head) { ++ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, ++ max_head - old_head); ++ memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE, ++ new_head); ++ } else { ++ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - ++ old_head); ++ } ++ rx_queue->nb_processed_responses = 0; ++ rx_queue->csr_head = new_head; ++ ++ /* write current head to CSR */ ++ WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr, ++ rx_queue->hw_bundle_number, rx_queue->hw_queue_number, ++ new_head); ++ } ++ ++ dp_ctx->cached_dequeue = 0; ++ return 0; ++} ++ static int - parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, - int *queue_cnt, int *shared_umem, char *prog_path, -- int *busy_budget, int *force_copy, int *use_cni) -+ int *busy_budget, int *force_copy, int *use_cni, -+ char *dp_path) + qat_sym_crypto_set_session_gen4(void *cdev, void *session) { - int ret; - -@@ -2069,6 +2095,11 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, - if (ret < 0) - goto free_kvlist; - -+ ret = rte_kvargs_process(kvlist, ETH_AF_XDP_DP_PATH_ARG, -+ &parse_prog_arg, dp_path); -+ if (ret < 0) -+ goto free_kvlist; -+ - free_kvlist: - rte_kvargs_free(kvlist); - return ret; -@@ -2108,7 +2139,7 @@ static struct rte_eth_dev * - init_internals(struct rte_vdev_device *dev, const char *if_name, - int start_queue_idx, int queue_cnt, int shared_umem, - const char *prog_path, int busy_budget, int force_copy, -- int use_cni) -+ int use_cni, const char *dp_path) +@@ -390,11 +463,51 @@ qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx) { - const char *name = rte_vdev_device_name(dev); - const unsigned int numa_node = dev->device.numa_node; -@@ -2138,6 +2169,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, - internals->shared_umem = shared_umem; - internals->force_copy = force_copy; - internals->use_cni = use_cni; -+ strlcpy(internals->dp_path, dp_path, PATH_MAX); + struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx; + struct qat_sym_session *ctx = _ctx; +- int ret; - if (xdp_get_channels_info(if_name, &internals->max_queue_cnt, - &internals->combined_queue_cnt)) { -@@ -2199,7 +2231,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, - if (!internals->use_cni) - eth_dev->dev_ops = &ops; - else -- eth_dev->dev_ops = &ops_cni; -+ eth_dev->dev_ops = &ops_afxdp_dp; +- ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx); +- if (ret < 0) +- return ret; ++ raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen4; ++ raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1; ++ raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1; ++ raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen4; ++ ++ if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || ++ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && ++ !ctx->is_gmac) { ++ /* AES-GCM or AES-CCM */ ++ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || ++ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || ++ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 ++ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE ++ && ctx->qat_hash_alg == ++ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { ++ raw_dp_ctx->enqueue_burst = ++ qat_sym_dp_enqueue_aead_jobs_gen1; ++ raw_dp_ctx->enqueue = ++ qat_sym_dp_enqueue_single_aead_gen1; ++ } else { ++ raw_dp_ctx->enqueue_burst = ++ qat_sym_dp_enqueue_chain_jobs_gen1; ++ raw_dp_ctx->enqueue = ++ qat_sym_dp_enqueue_single_chain_gen1; ++ } ++ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { ++ raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1; ++ raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1; ++ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { ++ if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE || ++ ctx->qat_cipher_alg == ++ ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) { ++ raw_dp_ctx->enqueue_burst = ++ qat_sym_dp_enqueue_aead_jobs_gen1; ++ raw_dp_ctx->enqueue = ++ qat_sym_dp_enqueue_single_aead_gen1; ++ } else { ++ raw_dp_ctx->enqueue_burst = ++ qat_sym_dp_enqueue_cipher_jobs_gen1; ++ raw_dp_ctx->enqueue = ++ qat_sym_dp_enqueue_single_cipher_gen1; ++ } ++ } else ++ return -1; - eth_dev->rx_pkt_burst = eth_af_xdp_rx; - eth_dev->tx_pkt_burst = eth_af_xdp_tx; -@@ -2328,6 +2360,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) - int busy_budget = -1, ret; - int force_copy = 0; - int use_cni = 0; -+ char dp_path[PATH_MAX] = {'\0'}; - struct rte_eth_dev *eth_dev = NULL; - const char *name = rte_vdev_device_name(dev); + if (ctx->is_single_pass && ctx->is_ucs) { + raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4; +diff --git a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +index b8ddf42d6f..64e892d022 100644 +--- a/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h ++++ b/dpdk/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +@@ -394,7 +394,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, + struct qat_sym_op_cookie *cookie) + { + union rte_crypto_sym_ofs ofs; +- uint32_t max_len = 0; ++ uint32_t max_len = 0, oop_offset = 0; + uint32_t cipher_len = 0, cipher_ofs = 0; + uint32_t auth_len = 0, auth_ofs = 0; + int is_oop = (op->sym->m_dst != NULL) && +@@ -468,6 +468,16 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, -@@ -2370,7 +2403,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) + max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len); - if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx, - &xsk_queue_cnt, &shared_umem, prog_path, -- &busy_budget, &force_copy, &use_cni) < 0) { -+ &busy_budget, &force_copy, &use_cni, dp_path) < 0) { - AF_XDP_LOG(ERR, "Invalid kvargs value\n"); - return -EINVAL; - } -@@ -2384,7 +2417,19 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) - if (use_cni && strnlen(prog_path, PATH_MAX)) { - AF_XDP_LOG(ERR, "When '%s' parameter is used, '%s' parameter is not valid\n", - ETH_AF_XDP_USE_CNI_ARG, ETH_AF_XDP_PROG_ARG); -- return -EINVAL; -+ return -EINVAL; -+ } -+ -+ if (use_cni && !strnlen(dp_path, PATH_MAX)) { -+ snprintf(dp_path, sizeof(dp_path), "%s/%s/%s", DP_BASE_PATH, if_name, DP_UDS_SOCK); -+ AF_XDP_LOG(INFO, "'%s' parameter not provided, setting value to '%s'\n", -+ ETH_AF_XDP_DP_PATH_ARG, dp_path); ++ /* If OOP, we need to keep in mind that offset needs to start where ++ * cipher/auth starts, namely no offset on the smaller one ++ */ ++ if (is_oop) { ++ oop_offset = RTE_MIN(auth_ofs, cipher_ofs); ++ auth_ofs -= oop_offset; ++ cipher_ofs -= oop_offset; ++ max_len -= oop_offset; + } + -+ if (!use_cni && strnlen(dp_path, PATH_MAX)) { -+ AF_XDP_LOG(ERR, "'%s' parameter is set, but '%s' was not enabled\n", -+ ETH_AF_XDP_DP_PATH_ARG, ETH_AF_XDP_USE_CNI_ARG); -+ return -EINVAL; + /* digest in buffer check. Needed only for wireless algos + * or combined cipher-crc operations + */ +@@ -508,9 +518,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, + max_len = RTE_MAX(max_len, auth_ofs + auth_len + + ctx->digest_length); } +- +- /* Passing 0 as cipher & auth offsets are assigned into ofs later */ +- n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, 0, max_len, ++ n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, oop_offset, max_len, + in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); + if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; +@@ -520,7 +528,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, - if (strlen(if_name) == 0) { -@@ -2410,7 +2455,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) - - eth_dev = init_internals(dev, if_name, xsk_start_queue_idx, - xsk_queue_cnt, shared_umem, prog_path, -- busy_budget, force_copy, use_cni); -+ busy_budget, force_copy, use_cni, dp_path); - if (eth_dev == NULL) { - AF_XDP_LOG(ERR, "Failed to init internals\n"); - return -1; -@@ -2471,4 +2516,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp, - "xdp_prog= " - "busy_budget= " - "force_copy= " -- "use_cni= "); -+ "use_cni= " -+ "dp_path= "); -diff --git a/dpdk/drivers/net/ark/ark_ethdev_tx.c b/dpdk/drivers/net/ark/ark_ethdev_tx.c -index 4792754f19..8f1f90b1a4 100644 ---- a/dpdk/drivers/net/ark/ark_ethdev_tx.c -+++ b/dpdk/drivers/net/ark/ark_ethdev_tx.c -@@ -39,8 +39,8 @@ struct ark_tx_queue { - uint32_t queue_mask; - - /* 3 indexes to the paired data rings. */ -- int32_t prod_index; /* where to put the next one */ -- int32_t free_index; /* mbuf has been freed */ -+ uint32_t prod_index; /* where to put the next one */ -+ uint32_t free_index; /* mbuf has been freed */ - - /* The queue Id is used to identify the HW Q */ - uint16_t phys_qid; -@@ -49,7 +49,7 @@ struct ark_tx_queue { - - /* next cache line - fields written by device */ - RTE_MARKER cacheline1 __rte_cache_min_aligned; -- volatile int32_t cons_index; /* hw is done, can be freed */ -+ volatile uint32_t cons_index; /* hw is done, can be freed */ - } __rte_cache_aligned; - - /* Forward declarations */ -@@ -108,7 +108,7 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) - uint32_t user_meta[5]; - - int stat; -- int32_t prod_index_limit; -+ uint32_t prod_index_limit; - uint16_t nb; - uint8_t user_len = 0; - const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN; -@@ -123,8 +123,13 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) - /* leave 4 elements mpu data */ - prod_index_limit = queue->queue_size + queue->free_index - 4; - -+ /* Populate the buffer bringing prod_index up to or slightly beyond -+ * prod_index_limit. Prod_index will increment by 2 or more each -+ * iteration. Note: indexes are uint32_t, cast to (signed) int32_t -+ * to catch the slight overage case; e.g. (200 - 201) -+ */ - for (nb = 0; -- (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0; -+ (nb < nb_pkts) && (int32_t)(prod_index_limit - queue->prod_index) > 0; - ++nb) { - mbuf = tx_pkts[nb]; - -@@ -194,13 +199,13 @@ eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf, - uint32_t *user_meta, uint8_t meta_cnt) - { - struct rte_mbuf *next; -- int32_t free_queue_space; -+ uint32_t free_queue_space; - uint8_t flags = ARK_DDM_SOP; + if (unlikely((op->sym->m_dst != NULL) && + (op->sym->m_dst != op->sym->m_src))) { +- int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, 0, ++ int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, oop_offset, + max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); - free_queue_space = queue->queue_mask - - (queue->prod_index - queue->free_index); - /* We need up to 4 mbufs for first header and 2 for subsequent ones */ -- if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs)))) -+ if (unlikely(free_queue_space < (2U + (2U * mbuf->nb_segs)))) - return -1; + if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { +@@ -894,10 +902,12 @@ enqueue_one_aead_job_gen1(struct qat_sym_session *ctx, + *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = + q - ICP_QAT_HW_CCM_NONCE_OFFSET; - while (mbuf != NULL) { -@@ -392,10 +397,11 @@ free_completed_tx(struct ark_tx_queue *queue) - { - struct rte_mbuf *mbuf; - union ark_tx_meta *meta; -- int32_t top_index; -+ uint32_t top_index; +- rte_memcpy((uint8_t *)aad->va + +- ICP_QAT_HW_CCM_NONCE_OFFSET, +- (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, +- ctx->cipher_iv.length); ++ if (ctx->aad_len > 0) { ++ rte_memcpy((uint8_t *)aad->va + ++ ICP_QAT_HW_CCM_NONCE_OFFSET, ++ (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, ++ ctx->cipher_iv.length); ++ } + break; + default: + break; +@@ -1007,6 +1017,12 @@ qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n); + int + qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n); - top_index = queue->cons_index; /* read once */ -- while ((top_index - queue->free_index) > 0) { ++int ++qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n); + -+ while ((int32_t)(top_index - queue->free_index) > 0) { - meta = &queue->meta_q[queue->free_index & queue->queue_mask]; - if (likely((meta->flags & ARK_DDM_SOP) != 0)) { - mbuf = queue->bufs[queue->free_index & -diff --git a/dpdk/drivers/net/axgbe/axgbe_common.h b/dpdk/drivers/net/axgbe/axgbe_common.h -index a5d11c5832..51532fb34a 100644 ---- a/dpdk/drivers/net/axgbe/axgbe_common.h -+++ b/dpdk/drivers/net/axgbe/axgbe_common.h -@@ -407,8 +407,6 @@ - #define MAC_MDIOSCAR_PA_WIDTH 5 - #define MAC_MDIOSCAR_RA_INDEX 0 - #define MAC_MDIOSCAR_RA_WIDTH 16 --#define MAC_MDIOSCAR_REG_INDEX 0 --#define MAC_MDIOSCAR_REG_WIDTH 21 - #define MAC_MDIOSCCDR_BUSY_INDEX 22 - #define MAC_MDIOSCCDR_BUSY_WIDTH 1 - #define MAC_MDIOSCCDR_CMD_INDEX 16 -diff --git a/dpdk/drivers/net/axgbe/axgbe_dev.c b/dpdk/drivers/net/axgbe/axgbe_dev.c -index 6a7fddffca..5233633a53 100644 ---- a/dpdk/drivers/net/axgbe/axgbe_dev.c -+++ b/dpdk/drivers/net/axgbe/axgbe_dev.c -@@ -63,15 +63,27 @@ static int mdio_complete(struct axgbe_port *pdata) - return 0; ++int ++qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n); ++ + int + qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx); + +diff --git a/dpdk/drivers/crypto/qat/qat_asym.c b/dpdk/drivers/crypto/qat/qat_asym.c +index 2bf3060278..4bc087987f 100644 +--- a/dpdk/drivers/crypto/qat/qat_asym.c ++++ b/dpdk/drivers/crypto/qat/qat_asym.c +@@ -270,6 +270,7 @@ modexp_collect(struct rte_crypto_asym_op *asym_op, + rte_memcpy(modexp_result, + cookie->output_array[0] + alg_bytesize + - n.length, n.length); ++ asym_op->modex.result.length = alg_bytesize; + HEXDUMP("ModExp result", cookie->output_array[0], + alg_bytesize); + return RTE_CRYPTO_OP_STATUS_SUCCESS; +@@ -331,6 +332,7 @@ modinv_collect(struct rte_crypto_asym_op *asym_op, + - n.length), + cookie->output_array[0] + alg_bytesize + - n.length, n.length); ++ asym_op->modinv.result.length = alg_bytesize; + HEXDUMP("ModInv result", cookie->output_array[0], + alg_bytesize); + return RTE_CRYPTO_OP_STATUS_SUCCESS; +@@ -1335,11 +1337,48 @@ err: + return ret; } -+static unsigned int axgbe_create_mdio_sca(int port, int reg) -+{ -+ unsigned int mdio_sca, da; +-static void ++static int + session_set_ec(struct qat_asym_session *qat_session, + struct rte_crypto_asym_xform *xform) + { ++ uint8_t *pkey = xform->ec.pkey.data; ++ uint8_t *q_x = xform->ec.q.x.data; ++ uint8_t *q_y = xform->ec.q.y.data; + -+ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; ++ qat_session->xform.ec.pkey.data = ++ rte_malloc(NULL, xform->ec.pkey.length, 0); ++ if (qat_session->xform.ec.pkey.length && ++ qat_session->xform.ec.pkey.data == NULL) ++ return -ENOMEM; ++ qat_session->xform.ec.q.x.data = rte_malloc(NULL, ++ xform->ec.q.x.length, 0); ++ if (qat_session->xform.ec.q.x.length && ++ qat_session->xform.ec.q.x.data == NULL) { ++ rte_free(qat_session->xform.ec.pkey.data); ++ return -ENOMEM; ++ } ++ qat_session->xform.ec.q.y.data = rte_malloc(NULL, ++ xform->ec.q.y.length, 0); ++ if (qat_session->xform.ec.q.y.length && ++ qat_session->xform.ec.q.y.data == NULL) { ++ rte_free(qat_session->xform.ec.pkey.data); ++ rte_free(qat_session->xform.ec.q.x.data); ++ return -ENOMEM; ++ } + -+ mdio_sca = 0; -+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); -+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); -+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); ++ memcpy(qat_session->xform.ec.pkey.data, pkey, ++ xform->ec.pkey.length); ++ qat_session->xform.ec.pkey.length = xform->ec.pkey.length; ++ memcpy(qat_session->xform.ec.q.x.data, q_x, ++ xform->ec.q.x.length); ++ qat_session->xform.ec.q.x.length = xform->ec.q.x.length; ++ memcpy(qat_session->xform.ec.q.y.data, q_y, ++ xform->ec.q.y.length); ++ qat_session->xform.ec.q.y.length = xform->ec.q.y.length; + qat_session->xform.ec.curve_id = xform->ec.curve_id; + -+ return mdio_sca; -+} ++ return 0; + - static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr, - int reg, u16 val) - { - unsigned int mdio_sca, mdio_sccd; - uint64_t timeout; + } -- mdio_sca = 0; -- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); -- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); -+ mdio_sca = axgbe_create_mdio_sca(addr, reg); - AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + int +@@ -1373,7 +1412,7 @@ qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused, + case RTE_CRYPTO_ASYM_XFORM_ECDSA: + case RTE_CRYPTO_ASYM_XFORM_ECPM: + case RTE_CRYPTO_ASYM_XFORM_ECDH: +- session_set_ec(qat_session, xform); ++ ret = session_set_ec(qat_session, xform); + break; + case RTE_CRYPTO_ASYM_XFORM_SM2: + break; +@@ -1520,7 +1559,7 @@ qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, - mdio_sccd = 0; -@@ -97,9 +109,7 @@ static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr, - unsigned int mdio_sca, mdio_sccd; - uint64_t timeout; + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "asym"); +- QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); ++ QAT_LOG(DEBUG, "Creating QAT ASYM device %s", name); -- mdio_sca = 0; -- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); -- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); -+ mdio_sca = axgbe_create_mdio_sca(addr, reg); - AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); + if (gen_dev_ops->cryptodev_ops == NULL) { + QAT_LOG(ERR, "Device %s does not support asymmetric crypto", +diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c +index 6e03bde841..8235fc0a5a 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym.c ++++ b/dpdk/drivers/crypto/qat/qat_sym.c +@@ -18,7 +18,6 @@ + #include "qat_qp.h" - mdio_sccd = 0; -@@ -259,20 +269,28 @@ static int axgbe_set_speed(struct axgbe_port *pdata, int speed) - return 0; - } + uint8_t qat_sym_driver_id; +-int qat_legacy_capa; -+static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata) -+{ -+ unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; -+ -+ /* From MAC ver 30H the TFCR is per priority, instead of per queue */ -+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) -+ return max_q_count; -+ else -+ return (RTE_MIN(pdata->tx_q_count, max_q_count)); -+} -+ - static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) - { -- unsigned int max_q_count, q_count; - unsigned int reg, reg_val; -- unsigned int i; -+ unsigned int i, q_count; + struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS]; - /* Clear MTL flow control */ - for (i = 0; i < pdata->rx_q_count; i++) - AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); +@@ -266,7 +265,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev, + } - /* Clear MAC flow control */ -- max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; -- q_count = RTE_MIN(pdata->tx_q_count, -- max_q_count); -+ q_count = axgbe_get_fc_queue_count(pdata); - reg = MAC_Q0TFCR; - for (i = 0; i < q_count; i++) { - reg_val = AXGMAC_IOREAD(pdata, reg); -@@ -287,9 +305,8 @@ static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) + cryptodev->feature_flags |= RTE_CRYPTODEV_FF_SECURITY; +- QAT_LOG(INFO, "Device %s rte_security support ensabled", name); ++ QAT_LOG(INFO, "Device %s rte_security support enabled", name); + } else { + QAT_LOG(INFO, "Device %s rte_security support disabled", name); + } +diff --git a/dpdk/drivers/crypto/qat/qat_sym_session.c b/dpdk/drivers/crypto/qat/qat_sym_session.c +index 9f4f6c3d93..224cc0ab50 100644 +--- a/dpdk/drivers/crypto/qat/qat_sym_session.c ++++ b/dpdk/drivers/crypto/qat/qat_sym_session.c +@@ -569,7 +569,7 @@ qat_sym_session_configure_cipher(struct rte_cryptodev *dev, + ret = -ENOTSUP; + goto error_out; + default: +- QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n", ++ QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u", + cipher_xform->algo); + ret = -EINVAL; + goto error_out; +@@ -1073,7 +1073,7 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev, + aead_xform); + break; + default: +- QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n", ++ QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u", + aead_xform->algo); + return -EINVAL; + } +@@ -1676,7 +1676,7 @@ static int aes_ipsecmb_job(uint8_t *in, uint8_t *out, IMB_MGR *m, - static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) - { -- unsigned int max_q_count, q_count; - unsigned int reg, reg_val; -- unsigned int i; -+ unsigned int i, q_count; + err = imb_get_errno(m); + if (err) +- QAT_LOG(ERR, "Error: %s!\n", imb_get_strerror(err)); ++ QAT_LOG(ERR, "Error: %s!", imb_get_strerror(err)); - /* Set MTL flow control */ - for (i = 0; i < pdata->rx_q_count; i++) { -@@ -306,9 +323,7 @@ static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) + return -EFAULT; + } +@@ -2480,10 +2480,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, + &state2_size, cdesc->aes_cmac); + #endif + if (ret) { +- cdesc->aes_cmac ? QAT_LOG(ERR, +- "(CMAC)precompute failed") +- : QAT_LOG(ERR, +- "(XCBC)precompute failed"); ++ QAT_LOG(ERR, "(%s)precompute failed", ++ cdesc->aes_cmac ? "CMAC" : "XCBC"); + return -EFAULT; + } + break; +diff --git a/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c b/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c +index a18f7a08b0..6e43438469 100644 +--- a/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c ++++ b/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c +@@ -185,7 +185,7 @@ scheduler_session_size_get(struct scheduler_ctx *sched_ctx, + uint8_t session_type) + { + uint8_t i = 0; +- uint32_t max_priv_sess_size = 0; ++ uint32_t max_priv_sess_size = sizeof(struct scheduler_session_ctx); + + /* Check what is the maximum private session size for all workers */ + for (i = 0; i < sched_ctx->nb_workers; i++) { +diff --git a/dpdk/drivers/crypto/uadk/uadk_crypto_pmd.c b/dpdk/drivers/crypto/uadk/uadk_crypto_pmd.c +index 824383512e..e4b1a32398 100644 +--- a/dpdk/drivers/crypto/uadk/uadk_crypto_pmd.c ++++ b/dpdk/drivers/crypto/uadk/uadk_crypto_pmd.c +@@ -634,7 +634,7 @@ uadk_set_session_cipher_parameters(struct rte_cryptodev *dev, + setup.sched_param = ¶ms; + sess->handle_cipher = wd_cipher_alloc_sess(&setup); + if (!sess->handle_cipher) { +- UADK_LOG(ERR, "uadk failed to alloc session!\n"); ++ UADK_LOG(ERR, "uadk failed to alloc session!"); + ret = -EINVAL; + goto env_uninit; + } +@@ -642,7 +642,7 @@ uadk_set_session_cipher_parameters(struct rte_cryptodev *dev, + ret = wd_cipher_set_key(sess->handle_cipher, cipher->key.data, cipher->key.length); + if (ret) { + wd_cipher_free_sess(sess->handle_cipher); +- UADK_LOG(ERR, "uadk failed to set key!\n"); ++ UADK_LOG(ERR, "uadk failed to set key!"); + ret = -EINVAL; + goto env_uninit; + } +@@ -734,7 +734,7 @@ uadk_set_session_auth_parameters(struct rte_cryptodev *dev, + setup.sched_param = ¶ms; + sess->handle_digest = wd_digest_alloc_sess(&setup); + if (!sess->handle_digest) { +- UADK_LOG(ERR, "uadk failed to alloc session!\n"); ++ UADK_LOG(ERR, "uadk failed to alloc session!"); + ret = -EINVAL; + goto env_uninit; + } +@@ -745,7 +745,7 @@ uadk_set_session_auth_parameters(struct rte_cryptodev *dev, + xform->auth.key.data, + xform->auth.key.length); + if (ret) { +- UADK_LOG(ERR, "uadk failed to alloc session!\n"); ++ UADK_LOG(ERR, "uadk failed to alloc session!"); + wd_digest_free_sess(sess->handle_digest); + sess->handle_digest = 0; + ret = -EINVAL; +diff --git a/dpdk/drivers/crypto/virtio/virtio_cryptodev.c b/dpdk/drivers/crypto/virtio/virtio_cryptodev.c +index 4854820ba6..c0d3178b71 100644 +--- a/dpdk/drivers/crypto/virtio/virtio_cryptodev.c ++++ b/dpdk/drivers/crypto/virtio/virtio_cryptodev.c +@@ -591,7 +591,7 @@ virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, + qp_conf->nb_descriptors, socket_id, &vq); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR( +- "virtio crypto data queue initialization failed\n"); ++ "virtio crypto data queue initialization failed"); + return ret; } - /* Set MAC flow control */ -- max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; -- q_count = RTE_MIN(pdata->tx_q_count, -- max_q_count); -+ q_count = axgbe_get_fc_queue_count(pdata); - reg = MAC_Q0TFCR; - for (i = 0; i < q_count; i++) { - reg_val = AXGMAC_IOREAD(pdata, reg); -@@ -637,23 +652,21 @@ static void axgbe_config_dma_cache(struct axgbe_port *pdata) - unsigned int arcache, awcache, arwcache; +diff --git a/dpdk/drivers/dma/dpaa/dpaa_qdma.c b/dpdk/drivers/dma/dpaa/dpaa_qdma.c +index 10e65ef1d7..3d4fd818f8 100644 +--- a/dpdk/drivers/dma/dpaa/dpaa_qdma.c ++++ b/dpdk/drivers/dma/dpaa/dpaa_qdma.c +@@ -295,7 +295,7 @@ static struct fsl_qdma_queue + for (i = 0; i < queue_num; i++) { + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || + queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { +- DPAA_QDMA_ERR("Get wrong queue-sizes.\n"); ++ DPAA_QDMA_ERR("Get wrong queue-sizes."); + goto fail; + } + queue_temp = queue_head + i + (j * queue_num); +@@ -345,7 +345,7 @@ fsl_qdma_queue *fsl_qdma_prep_status_queue(void) + status_size = QDMA_STATUS_SIZE; + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || + status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { +- DPAA_QDMA_ERR("Get wrong status_size.\n"); ++ DPAA_QDMA_ERR("Get wrong status_size."); + return NULL; + } - arcache = 0; -- AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3); -+ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf); -+ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf); -+ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf); - AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); +@@ -643,7 +643,7 @@ fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan) + FSL_QDMA_COMMAND_BUFFER_SIZE, 64); + if (ret) { + DPAA_QDMA_ERR( +- "failed to alloc dma buffer for comp descriptor\n"); ++ "failed to alloc dma buffer for comp descriptor"); + goto exit; + } - awcache = 0; -- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3); -- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3); -- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1); -- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3); -- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1); -- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3); -- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1); -+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf); -+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf); -+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf); -+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf); - AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); +@@ -779,7 +779,7 @@ dpaa_qdma_enqueue(void *dev_private, uint16_t vchan, + (dma_addr_t)dst, (dma_addr_t)src, + length, NULL, NULL); + if (!fsl_comp) { +- DPAA_QDMA_DP_DEBUG("fsl_comp is NULL\n"); ++ DPAA_QDMA_DP_DEBUG("fsl_comp is NULL"); + return -1; + } + ret = fsl_qdma_enqueue_desc(fsl_chan, fsl_comp, flags); +@@ -803,19 +803,19 @@ dpaa_qdma_dequeue_status(void *dev_private, uint16_t vchan, + + intr = qdma_readl_be(status + FSL_QDMA_DEDR); + if (intr) { +- DPAA_QDMA_ERR("DMA transaction error! %x\n", intr); ++ DPAA_QDMA_ERR("DMA transaction error! %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFDW0R); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFDW1R); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFDW2R); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFDW3R); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFQIDR); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECBR); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr); + qdma_writel(0xffffffff, + status + FSL_QDMA_DEDR); + intr = qdma_readl(status + FSL_QDMA_DEDR); +@@ -849,19 +849,19 @@ dpaa_qdma_dequeue(void *dev_private, + + intr = qdma_readl_be(status + FSL_QDMA_DEDR); + if (intr) { +- DPAA_QDMA_ERR("DMA transaction error! %x\n", intr); ++ DPAA_QDMA_ERR("DMA transaction error! %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFDW0R); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW0R %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFDW1R); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW1R %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFDW2R); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW2R %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFDW3R); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFDW3R %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECFQIDR); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECFQIDR %x", intr); + intr = qdma_readl(status + FSL_QDMA_DECBR); +- DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x\n", intr); ++ DPAA_QDMA_INFO("reg FSL_QDMA_DECBR %x", intr); + qdma_writel(0xffffffff, + status + FSL_QDMA_DEDR); + intr = qdma_readl(status + FSL_QDMA_DEDR); +@@ -974,7 +974,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev) + close(ccsr_qdma_fd); + if (fsl_qdma->ctrl_base == MAP_FAILED) { + DPAA_QDMA_ERR("Can not map CCSR base qdma: Phys: %08" PRIx64 +- "size %d\n", phys_addr, regs_size); ++ "size %d", phys_addr, regs_size); + goto err; + } - arwcache = 0; -- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1); -- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3); -- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3); -+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf); -+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf); - AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache); - } +@@ -998,7 +998,7 @@ dpaa_qdma_init(struct rte_dma_dev *dmadev) -diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c -index f174d46143..6ce87f83f4 100644 ---- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c -+++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c -@@ -207,6 +207,7 @@ static struct axgbe_version_data axgbe_v2a = { - .ecc_support = 1, - .i2c_support = 1, - .an_cdr_workaround = 1, -+ .enable_rrc = 1, - }; + ret = fsl_qdma_reg_init(fsl_qdma); + if (ret) { +- DPAA_QDMA_ERR("Can't Initialize the qDMA engine.\n"); ++ DPAA_QDMA_ERR("Can't Initialize the qDMA engine."); + munmap(fsl_qdma->ctrl_base, regs_size); + goto err; + } +diff --git a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c +index 8968bb853b..5780e49297 100644 +--- a/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c ++++ b/dpdk/drivers/dma/dpaa2/dpaa2_qdma.c +@@ -16,9 +16,6 @@ - static struct axgbe_version_data axgbe_v2b = { -@@ -219,6 +220,7 @@ static struct axgbe_version_data axgbe_v2b = { - .ecc_support = 1, - .i2c_support = 1, - .an_cdr_workaround = 1, -+ .enable_rrc = 1, - }; + #define DPAA2_QDMA_PREFETCH "prefetch" - static const struct rte_eth_desc_lim rx_desc_lim = { -@@ -2267,6 +2269,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) +-/* Dynamic log type identifier */ +-int dpaa2_qdma_logtype; +- + uint32_t dpaa2_coherent_no_alloc_cache; + uint32_t dpaa2_coherent_alloc_cache; - /* Yellow Carp devices do not need cdr workaround */ - pdata->vdata->an_cdr_workaround = 0; -+ -+ /* Yellow Carp devices do not need rrc */ -+ pdata->vdata->enable_rrc = 0; - } else { - unknown_cpu = 1; +@@ -581,7 +578,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_QDMA_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; } -@@ -2404,12 +2409,14 @@ static int - axgbe_dev_close(struct rte_eth_dev *eth_dev) +@@ -611,7 +608,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq, + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_QDMA_DP_WARN( +- "VDQ command not issued.QBMAN busy\n"); ++ "VDQ command not issued.QBMAN busy"); + /* Portal was busy, try again */ + continue; + } +@@ -687,7 +684,7 @@ dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq, + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_QDMA_DP_WARN( +- "VDQ command is not issued. QBMAN is busy (2)\n"); ++ "VDQ command is not issued. QBMAN is busy (2)"); + continue; + } + break; +@@ -731,7 +728,7 @@ dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_QDMA_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -828,7 +825,7 @@ dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_QDMA_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -1699,4 +1696,4 @@ static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = { + RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd); + RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma, + "no_prefetch= "); +-RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO); ++RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO); +diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c +index 0e11ca14cc..8bc076f5d5 100644 +--- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.c ++++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.c +@@ -39,8 +39,6 @@ hisi_dma_queue_base(struct hisi_dma_dev *hw) { - struct rte_pci_device *pci_dev; -+ struct axgbe_port *pdata; - - PMD_INIT_FUNC_TRACE(); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP08) + return HISI_DMA_HIP08_QUEUE_BASE; +- else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) +- return HISI_DMA_HIP09_QUEUE_BASE; + else return 0; - -+ pdata = eth_dev->data->dev_private; - pci_dev = RTE_DEV_TO_PCI(eth_dev->device); - axgbe_dev_clear_queues(eth_dev); - -@@ -2419,6 +2426,9 @@ axgbe_dev_close(struct rte_eth_dev *eth_dev) - axgbe_dev_interrupt_handler, - (void *)eth_dev); - -+ /* Disable all interrupts in the hardware */ -+ XP_IOWRITE(pdata, XP_INT_EN, 0x0); -+ - return 0; } - -diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/dpdk/drivers/net/axgbe/axgbe_ethdev.h -index 7f19321d88..b4bd56e239 100644 ---- a/dpdk/drivers/net/axgbe/axgbe_ethdev.h -+++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.h -@@ -111,6 +111,7 @@ - /* Auto-negotiation */ - #define AXGBE_AN_MS_TIMEOUT 500 - #define AXGBE_LINK_TIMEOUT 5 -+#define AXGBE_KR_TRAINING_WAIT_ITER 50 - - #define AXGBE_SGMII_AN_LINK_STATUS BIT(1) - #define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) -@@ -463,6 +464,7 @@ struct axgbe_version_data { - unsigned int ecc_support; - unsigned int i2c_support; - unsigned int an_cdr_workaround; -+ unsigned int enable_rrc; - }; - - struct axgbe_mmc_stats { -@@ -653,6 +655,7 @@ struct axgbe_port { - unsigned int parallel_detect; - unsigned int fec_ability; - unsigned long an_start; -+ unsigned long kr_start_time; - enum axgbe_an_mode an_mode; - - /* I2C support */ -diff --git a/dpdk/drivers/net/axgbe/axgbe_mdio.c b/dpdk/drivers/net/axgbe/axgbe_mdio.c -index 913ceada0d..d95a52659e 100644 ---- a/dpdk/drivers/net/axgbe/axgbe_mdio.c -+++ b/dpdk/drivers/net/axgbe/axgbe_mdio.c -@@ -200,13 +200,14 @@ static void axgbe_switch_mode(struct axgbe_port *pdata) - axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); +@@ -216,25 +214,6 @@ hisi_dma_init_hw(struct hisi_dma_dev *hw) + HISI_DMA_HIP08_QUEUE_INT_MASK_M, true); + hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG, + HISI_DMA_HIP08_QUEUE_INT_MASK_M, true); +- } else if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) { +- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_CTRL0_REG, +- HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M, false); +- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_STATUS_REG, +- HISI_DMA_HIP09_QUEUE_INT_MASK_M, true); +- hisi_dma_update_queue_mbit(hw, HISI_DMA_QUEUE_INT_MASK_REG, +- HISI_DMA_HIP09_QUEUE_INT_MASK_M, true); +- hisi_dma_update_queue_mbit(hw, +- HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG, +- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true); +- hisi_dma_update_queue_mbit(hw, +- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG, +- HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M, true); +- hisi_dma_update_queue_bit(hw, HISI_DMA_QUEUE_CTRL1_REG, +- HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B, true); +- hisi_dma_update_bit(hw, +- HISI_DMA_HIP09_QUEUE_CFG_REG(hw->queue_id), +- HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B, +- true); + } } --static void axgbe_set_mode(struct axgbe_port *pdata, -+static bool axgbe_set_mode(struct axgbe_port *pdata, - enum axgbe_mode mode) +@@ -256,8 +235,6 @@ hisi_dma_reg_layout(uint8_t revision) { - if (mode == axgbe_cur_mode(pdata)) -- return; -+ return false; - - axgbe_change_mode(pdata, mode); -+ return true; + if (revision == HISI_DMA_REVISION_HIP08B) + return HISI_DMA_REG_LAYOUT_HIP08; +- else if (revision >= HISI_DMA_REVISION_HIP09A) +- return HISI_DMA_REG_LAYOUT_HIP09; + else + return HISI_DMA_REG_LAYOUT_INVALID; } +@@ -328,14 +305,11 @@ hisi_dma_info_get(const struct rte_dma_dev *dev, + struct rte_dma_info *dev_info, + uint32_t info_sz) + { +- struct hisi_dma_dev *hw = dev->data->dev_private; ++ RTE_SET_USED(dev); + RTE_SET_USED(info_sz); - static bool axgbe_use_mode(struct axgbe_port *pdata, -@@ -357,6 +358,7 @@ static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata, - reg |= AXGBE_KR_TRAINING_ENABLE; - reg |= AXGBE_KR_TRAINING_START; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); -+ pdata->kr_start_time = rte_get_timer_cycles(); + dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | + RTE_DMA_CAPA_OPS_COPY; +- if (hw->reg_layout == HISI_DMA_REG_LAYOUT_HIP09) +- dev_info->dev_capa |= RTE_DMA_CAPA_HANDLES_ERRORS; +- + dev_info->max_vchans = 1; + dev_info->max_desc = HISI_DMA_MAX_DESC_NUM; + dev_info->min_desc = HISI_DMA_MIN_DESC_NUM; +@@ -384,7 +358,7 @@ hisi_dma_start(struct rte_dma_dev *dev) + struct hisi_dma_dev *hw = dev->data->dev_private; - PMD_DRV_LOG(DEBUG, "KR training initiated\n"); - if (pdata->phy_if.phy_impl.kr_training_post) -@@ -487,6 +489,7 @@ static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata) + if (hw->iomz == NULL) { +- HISI_DMA_ERR(hw, "Vchan was not setup, start fail!\n"); ++ HISI_DMA_ERR(hw, "Vchan was not setup, start fail!"); + return -EINVAL; + } - axgbe_an_disable(pdata); - axgbe_switch_mode(pdata); -+ pdata->an_result = AXGBE_AN_READY; - axgbe_an_restart(pdata); +@@ -514,18 +488,6 @@ hisi_dma_dump_common(struct hisi_dma_dev *hw, FILE *f) + { HISI_DMA_REG_LAYOUT_HIP08, + HISI_DMA_HIP08_DUMP_START_REG, + HISI_DMA_HIP08_DUMP_END_REG }, +- { HISI_DMA_REG_LAYOUT_HIP09, +- HISI_DMA_HIP09_DUMP_REGION_A_START_REG, +- HISI_DMA_HIP09_DUMP_REGION_A_END_REG }, +- { HISI_DMA_REG_LAYOUT_HIP09, +- HISI_DMA_HIP09_DUMP_REGION_B_START_REG, +- HISI_DMA_HIP09_DUMP_REGION_B_END_REG }, +- { HISI_DMA_REG_LAYOUT_HIP09, +- HISI_DMA_HIP09_DUMP_REGION_C_START_REG, +- HISI_DMA_HIP09_DUMP_REGION_C_END_REG }, +- { HISI_DMA_REG_LAYOUT_HIP09, +- HISI_DMA_HIP09_DUMP_REGION_D_START_REG, +- HISI_DMA_HIP09_DUMP_REGION_D_END_REG }, + }; + uint32_t i; - return AXGBE_AN_INCOMPAT_LINK; -@@ -967,11 +970,34 @@ static void axgbe_check_link_timeout(struct axgbe_port *pdata) - { - unsigned long link_timeout; - unsigned long ticks; -+ unsigned long kr_time; -+ int wait; +@@ -669,7 +631,7 @@ hisi_dma_scan_cq(struct hisi_dma_dev *hw) + * status array indexed by csq_head. Only error logs + * are used for prompting. + */ +- HISI_DMA_ERR(hw, "invalid csq_head:%u!\n", csq_head); ++ HISI_DMA_ERR(hw, "invalid csq_head:%u!", csq_head); + count = 0; + break; + } +@@ -951,7 +913,7 @@ hisi_dma_probe(struct rte_pci_driver *pci_drv __rte_unused, + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); - link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT * - 2 * rte_get_timer_hz()); - ticks = rte_get_timer_cycles(); - if (time_after(ticks, link_timeout)) { -+ if ((axgbe_cur_mode(pdata) == AXGBE_MODE_KR) && -+ pdata->phy.autoneg == AUTONEG_ENABLE) { -+ /* AN restart should not happen while KR training is in progress. -+ * The while loop ensures no AN restart during KR training, -+ * waits up to 500ms and AN restart is triggered only if KR -+ * training is failed. -+ */ -+ wait = AXGBE_KR_TRAINING_WAIT_ITER; -+ while (wait--) { -+ kr_time = pdata->kr_start_time + -+ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT); -+ ticks = rte_get_timer_cycles(); -+ if (time_after(ticks, kr_time)) -+ break; -+ /* AN restart is not required, if AN result is COMPLETE */ -+ if (pdata->an_result == AXGBE_AN_COMPLETE) -+ return; -+ rte_delay_us(10500); -+ } -+ } -+ - PMD_DRV_LOG(NOTICE, "AN link timeout\n"); - axgbe_phy_config_aneg(pdata); + if (pci_dev->mem_resource[2].addr == NULL) { +- HISI_DMA_LOG(ERR, "%s BAR2 is NULL!\n", name); ++ HISI_DMA_LOG(ERR, "%s BAR2 is NULL!", name); + return -ENODEV; } -@@ -982,7 +1008,7 @@ static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata) - return pdata->phy_if.phy_impl.an_outcome(pdata); - } --static void axgbe_phy_status_result(struct axgbe_port *pdata) -+static bool axgbe_phy_status_result(struct axgbe_port *pdata) - { - enum axgbe_mode mode; +diff --git a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h +index 5a17f9f69e..a57b5c759a 100644 +--- a/dpdk/drivers/dma/hisilicon/hisi_dmadev.h ++++ b/dpdk/drivers/dma/hisilicon/hisi_dmadev.h +@@ -25,22 +25,14 @@ + #define HISI_DMA_DEVICE_ID 0xA122 + #define HISI_DMA_PCI_REVISION_ID_REG 0x08 + #define HISI_DMA_REVISION_HIP08B 0x21 +-#define HISI_DMA_REVISION_HIP09A 0x30 -@@ -1016,7 +1042,10 @@ static void axgbe_phy_status_result(struct axgbe_port *pdata) + #define HISI_DMA_MAX_HW_QUEUES 4 + #define HISI_DMA_MAX_DESC_NUM 8192 + #define HISI_DMA_MIN_DESC_NUM 32 - pdata->phy.duplex = DUPLEX_FULL; +-/** +- * The HIP08B(HiSilicon IP08) and HIP09B(HiSilicon IP09) are DMA iEPs, they +- * have the same pci device id but different pci revision. +- * Unfortunately, they have different register layouts, so two layout +- * enumerations are defined. +- */ + enum { + HISI_DMA_REG_LAYOUT_INVALID = 0, +- HISI_DMA_REG_LAYOUT_HIP08, +- HISI_DMA_REG_LAYOUT_HIP09 ++ HISI_DMA_REG_LAYOUT_HIP08 + }; -- axgbe_set_mode(pdata, mode); -+ if (axgbe_set_mode(pdata, mode)) -+ return true; -+ else -+ return false; - } + /** +@@ -69,9 +61,6 @@ enum { + * length of queue-region. The global offset for a single queue register is + * calculated by: + * offset = queue-base + (queue-id * queue-region) + reg-offset-in-region. +- * +- * The first part of queue region is basically the same for HIP08 and HIP09 +- * register layouts, therefore, HISI_QUEUE_* registers are defined for it. + */ + #define HISI_DMA_QUEUE_SQ_BASE_L_REG 0x0 + #define HISI_DMA_QUEUE_SQ_BASE_H_REG 0x4 +@@ -110,28 +99,6 @@ enum { + #define HISI_DMA_HIP08_DUMP_START_REG 0x2000 + #define HISI_DMA_HIP08_DUMP_END_REG 0x2280 - static int autoneg_time_out(unsigned long autoneg_start_time) -@@ -1051,7 +1080,7 @@ static void axgbe_phy_status(struct axgbe_port *pdata) - &an_restart); - if (an_restart) { - axgbe_phy_config_aneg(pdata); -- return; -+ goto adjust_link; - } +-/** +- * HiSilicon IP09 DMA register and field define: +- */ +-#define HISI_DMA_HIP09_QUEUE_BASE 0x2000 +-#define HISI_DMA_HIP09_QUEUE_CTRL0_ERR_ABORT_M GENMASK(31, 28) +-#define HISI_DMA_HIP09_QUEUE_CTRL1_VA_ENABLE_B 2 +-#define HISI_DMA_HIP09_QUEUE_INT_MASK_M 0x1 +-#define HISI_DMA_HIP09_QUEUE_ERR_INT_STATUS_REG 0x48 +-#define HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_REG 0x4C +-#define HISI_DMA_HIP09_QUEUE_ERR_INT_MASK_M GENMASK(18, 1) +-#define HISI_DMA_HIP09_QUEUE_CFG_REG(queue_id) (0x800 + \ +- (queue_id) * 0x20) +-#define HISI_DMA_HIP09_QUEUE_CFG_LINK_DOWN_MASK_B 16 +-#define HISI_DMA_HIP09_DUMP_REGION_A_START_REG 0x0 +-#define HISI_DMA_HIP09_DUMP_REGION_A_END_REG 0x368 +-#define HISI_DMA_HIP09_DUMP_REGION_B_START_REG 0x800 +-#define HISI_DMA_HIP09_DUMP_REGION_B_END_REG 0xA08 +-#define HISI_DMA_HIP09_DUMP_REGION_C_START_REG 0x1800 +-#define HISI_DMA_HIP09_DUMP_REGION_C_END_REG 0x1A4C +-#define HISI_DMA_HIP09_DUMP_REGION_D_START_REG 0x1C00 +-#define HISI_DMA_HIP09_DUMP_REGION_D_END_REG 0x1CC4 +- + /** + * In fact, there are multiple states, but it need to pay attention to + * the following three states for the driver: +diff --git a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py +index c0c833ade9..5c9572b49d 100755 +--- a/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py ++++ b/dpdk/drivers/dma/idxd/dpdk_idxd_cfg.py +@@ -104,8 +104,10 @@ def configure_dsa(dsa_id, args): + "priority": 1, + "max_batch_size": 1024, + "size": int(max_work_queues_size / nb_queues)} +- wqcfg.update(parse_wq_opts(args.wq_option)) + wq_dir = SysfsDir(os.path.join(dsa_dir.path, f"wq{dsa_id}.{q}")) ++ if os.path.exists(os.path.join(wq_dir.path, f"driver_name")): ++ wqcfg.update({"driver_name": "user"}) ++ wqcfg.update(parse_wq_opts(args.wq_option)) + wq_dir.write_values(wqcfg) - if (pdata->phy.link) { -@@ -1083,7 +1112,10 @@ static void axgbe_phy_status(struct axgbe_port *pdata) - return; - } - } -- axgbe_phy_status_result(pdata); + # enable device and then queues +diff --git a/dpdk/drivers/dma/idxd/idxd_bus.c b/dpdk/drivers/dma/idxd/idxd_bus.c +index 3b2d4c2b65..ba8076715d 100644 +--- a/dpdk/drivers/dma/idxd/idxd_bus.c ++++ b/dpdk/drivers/dma/idxd/idxd_bus.c +@@ -261,9 +261,15 @@ static int + is_for_this_process_use(struct rte_dsa_device *dev, const char *name) + { + char *runtime_dir = strdup(rte_eal_get_runtime_dir()); +- char *prefix = basename(runtime_dir); +- int prefixlen = strlen(prefix); + int retval = 0; ++ int prefixlen; ++ char *prefix; + -+ if (axgbe_phy_status_result(pdata)) -+ return; ++ if (runtime_dir == NULL) ++ return retval; + - if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) - rte_bit_relaxed_clear32(AXGBE_LINK_INIT, - &pdata->dev_state); -diff --git a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c -index d97fbbfddd..12908d4e6f 100644 ---- a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c -+++ b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c -@@ -69,6 +69,7 @@ enum axgbe_sfp_cable { - AXGBE_SFP_CABLE_UNKNOWN = 0, - AXGBE_SFP_CABLE_ACTIVE, - AXGBE_SFP_CABLE_PASSIVE, -+ AXGBE_SFP_CABLE_FIBER, - }; - - enum axgbe_sfp_base { -@@ -116,9 +117,7 @@ enum axgbe_sfp_speed { - - #define AXGBE_SFP_BASE_BR 12 - #define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a --#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d - #define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64 --#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68 ++ prefix = basename(runtime_dir); ++ prefixlen = strlen(prefix); - #define AXGBE_SFP_BASE_CU_CABLE_LEN 18 + if (strncmp(name, "dpdk_", 5) == 0) + retval = 1; +diff --git a/dpdk/drivers/dma/idxd/idxd_common.c b/dpdk/drivers/dma/idxd/idxd_common.c +index 83d53942eb..dc2e8cd432 100644 +--- a/dpdk/drivers/dma/idxd/idxd_common.c ++++ b/dpdk/drivers/dma/idxd/idxd_common.c +@@ -616,7 +616,7 @@ idxd_dmadev_create(const char *name, struct rte_device *dev, + sizeof(idxd->batch_comp_ring[0])) * (idxd->max_batches + 1), + sizeof(idxd->batch_comp_ring[0]), dev->numa_node); + if (idxd->batch_comp_ring == NULL) { +- IDXD_PMD_ERR("Unable to reserve memory for batch data\n"); ++ IDXD_PMD_ERR("Unable to reserve memory for batch data"); + ret = -ENOMEM; + goto cleanup; + } +diff --git a/dpdk/drivers/dma/idxd/idxd_pci.c b/dpdk/drivers/dma/idxd/idxd_pci.c +index a78889a7ef..c314aee65c 100644 +--- a/dpdk/drivers/dma/idxd/idxd_pci.c ++++ b/dpdk/drivers/dma/idxd/idxd_pci.c +@@ -300,7 +300,7 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd, + return nb_wqs; -@@ -535,25 +534,22 @@ static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata) - static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom, - enum axgbe_sfp_speed sfp_speed) - { -- u8 *sfp_base, min, max; -+ u8 *sfp_base, min; + err: +- free(pci); ++ rte_free(pci); + return err_code; + } - sfp_base = sfp_eeprom->base; +@@ -323,7 +323,7 @@ idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev) - switch (sfp_speed) { - case AXGBE_SFP_SPEED_1000: - min = AXGBE_SFP_BASE_BR_1GBE_MIN; -- max = AXGBE_SFP_BASE_BR_1GBE_MAX; - break; - case AXGBE_SFP_SPEED_10000: - min = AXGBE_SFP_BASE_BR_10GBE_MIN; -- max = AXGBE_SFP_BASE_BR_10GBE_MAX; - break; - default: - return false; + /* look up queue 0 to get the PCI structure */ + snprintf(qname, sizeof(qname), "%s-q0", name); +- IDXD_PMD_INFO("Looking up %s\n", qname); ++ IDXD_PMD_INFO("Looking up %s", qname); + ret = idxd_dmadev_create(qname, &dev->device, NULL, &idxd_pci_ops); + if (ret != 0) { + IDXD_PMD_ERR("Failed to create dmadev %s", name); +@@ -338,7 +338,7 @@ idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev) + for (qid = 1; qid < max_qid; qid++) { + /* add the queue number to each device name */ + snprintf(qname, sizeof(qname), "%s-q%d", name, qid); +- IDXD_PMD_INFO("Looking up %s\n", qname); ++ IDXD_PMD_INFO("Looking up %s", qname); + ret = idxd_dmadev_create(qname, &dev->device, NULL, &idxd_pci_ops); + if (ret != 0) { + IDXD_PMD_ERR("Failed to create dmadev %s", name); +@@ -364,7 +364,7 @@ idxd_dmadev_probe_pci(struct rte_pci_driver *drv, struct rte_pci_device *dev) + return ret; } + if (idxd.u.pci->portals == NULL) { +- IDXD_PMD_ERR("Error, invalid portal assigned during initialization\n"); ++ IDXD_PMD_ERR("Error, invalid portal assigned during initialization"); + free(idxd.u.pci); + return -EINVAL; + } +diff --git a/dpdk/drivers/dma/ioat/ioat_dmadev.c b/dpdk/drivers/dma/ioat/ioat_dmadev.c +index 5fc14bcf22..8b7ff5652f 100644 +--- a/dpdk/drivers/dma/ioat/ioat_dmadev.c ++++ b/dpdk/drivers/dma/ioat/ioat_dmadev.c +@@ -156,12 +156,12 @@ ioat_dev_start(struct rte_dma_dev *dev) + ioat->offset = 0; + ioat->failure = 0; + +- IOAT_PMD_DEBUG("channel status - %s [0x%"PRIx64"]\n", ++ IOAT_PMD_DEBUG("channel status - %s [0x%"PRIx64"]", + chansts_readable[ioat->status & IOAT_CHANSTS_STATUS], + ioat->status); + + if ((ioat->regs->chansts & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED) { +- IOAT_PMD_WARN("Device HALTED on start, attempting to recover\n"); ++ IOAT_PMD_WARN("Device HALTED on start, attempting to recover"); + if (__ioat_recover(ioat) != 0) { + IOAT_PMD_ERR("Device couldn't be recovered"); + return -1; +@@ -469,7 +469,7 @@ ioat_completed(void *dev_private, uint16_t qid __rte_unused, const uint16_t max_ + ioat->failure = ioat->regs->chanerr; + ioat->next_read = read + count + 1; + if (__ioat_recover(ioat) != 0) { +- IOAT_PMD_ERR("Device HALTED and could not be recovered\n"); ++ IOAT_PMD_ERR("Device HALTED and could not be recovered"); + __dev_dump(dev_private, stdout); + return 0; + } +@@ -515,7 +515,7 @@ ioat_completed_status(void *dev_private, uint16_t qid __rte_unused, + count++; + ioat->next_read = read + count; + if (__ioat_recover(ioat) != 0) { +- IOAT_PMD_ERR("Device HALTED and could not be recovered\n"); ++ IOAT_PMD_ERR("Device HALTED and could not be recovered"); + __dev_dump(dev_private, stdout); + return 0; + } +@@ -652,12 +652,12 @@ ioat_dmadev_create(const char *name, struct rte_pci_device *dev) + + /* Do device initialization - reset and set error behaviour. */ + if (ioat->regs->chancnt != 1) +- IOAT_PMD_WARN("%s: Channel count == %d\n", __func__, ++ IOAT_PMD_WARN("%s: Channel count == %d", __func__, + ioat->regs->chancnt); + + /* Locked by someone else. */ + if (ioat->regs->chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE) { +- IOAT_PMD_WARN("%s: Channel appears locked\n", __func__); ++ IOAT_PMD_WARN("%s: Channel appears locked", __func__); + ioat->regs->chanctrl = 0; + } + +@@ -676,7 +676,7 @@ ioat_dmadev_create(const char *name, struct rte_pci_device *dev) + rte_delay_ms(1); + if (++retry >= 200) { + IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=%#"PRIx8 +- ", CHANSTS=%#"PRIx64", CHANERR=%#"PRIx32"\n", ++ ", CHANSTS=%#"PRIx64", CHANERR=%#"PRIx32, + __func__, + ioat->regs->chancmd, + ioat->regs->chansts, +diff --git a/dpdk/drivers/event/cnxk/cn10k_eventdev.c b/dpdk/drivers/event/cnxk/cn10k_eventdev.c +index bb0c910553..a44a33eae8 100644 +--- a/dpdk/drivers/event/cnxk/cn10k_eventdev.c ++++ b/dpdk/drivers/event/cnxk/cn10k_eventdev.c +@@ -782,12 +782,53 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem) + } + } -- return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) && -- (sfp_base[AXGBE_SFP_BASE_BR] <= max)); -+ return sfp_base[AXGBE_SFP_BASE_BR] >= min; - } - - static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata) -@@ -578,6 +574,9 @@ static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata) - AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR))) - return false; - -+ /* Reset PHY - wait for self-clearing reset bit to clear */ -+ pdata->phy_if.phy_impl.reset(pdata); ++static void ++eventdev_fops_update(struct rte_eventdev *event_dev) ++{ ++ struct rte_event_fp_ops *fp_op = ++ rte_event_fp_ops + event_dev->data->dev_id; + - if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN], - AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) { - phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; -@@ -613,16 +612,21 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) - - axgbe_phy_sfp_parse_quirks(pdata); - -- /* Assume ACTIVE cable unless told it is PASSIVE */ -+ /* Assume FIBER cable unless told otherwise */ - if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) { - phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE; - phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN]; -- } else { -+ } else if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_ACTIVE) { - phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; -+ } else { -+ phy_data->sfp_cable = AXGBE_SFP_CABLE_FIBER; ++ fp_op->dequeue = event_dev->dequeue; ++ fp_op->dequeue_burst = event_dev->dequeue_burst; ++} ++ ++static void ++cn10k_sso_tstamp_hdl_update(uint16_t port_id, uint16_t flags, bool ptp_en) ++{ ++ struct rte_eth_dev *dev = &rte_eth_devices[port_id]; ++ struct cnxk_eth_dev *cnxk_eth_dev = dev->data->dev_private; ++ struct rte_eventdev *event_dev = cnxk_eth_dev->evdev_priv; ++ struct cnxk_sso_evdev *evdev = cnxk_sso_pmd_priv(event_dev); ++ ++ evdev->rx_offloads |= flags; ++ if (ptp_en) ++ evdev->tstamp[port_id] = &cnxk_eth_dev->tstamp; ++ else ++ evdev->tstamp[port_id] = NULL; ++ cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); ++ eventdev_fops_update(event_dev); ++} ++ ++static void ++cn10k_sso_rx_offload_cb(uint16_t port_id, uint64_t flags) ++{ ++ struct rte_eth_dev *dev = &rte_eth_devices[port_id]; ++ struct cnxk_eth_dev *cnxk_eth_dev = dev->data->dev_private; ++ struct rte_eventdev *event_dev = cnxk_eth_dev->evdev_priv; ++ struct cnxk_sso_evdev *evdev = cnxk_sso_pmd_priv(event_dev); ++ ++ evdev->rx_offloads |= flags; ++ cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); ++ eventdev_fops_update(event_dev); ++} ++ + static int + cn10k_sso_rx_adapter_queue_add( + const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, + int32_t rx_queue_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) + { ++ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + struct roc_sso_hwgrp_stash stash; + struct cn10k_eth_rxq *rxq; +@@ -802,6 +843,10 @@ cn10k_sso_rx_adapter_queue_add( + queue_conf); + if (rc) + return -EINVAL; ++ ++ cnxk_eth_dev->cnxk_sso_ptp_tstamp_cb = cn10k_sso_tstamp_hdl_update; ++ cnxk_eth_dev->evdev_priv = (struct rte_eventdev *)(uintptr_t)event_dev; ++ + rxq = eth_dev->data->rx_queues[0]; + lookup_mem = rxq->lookup_mem; + cn10k_sso_set_priv_mem(event_dev, lookup_mem); +@@ -1084,6 +1129,7 @@ cn10k_sso_init(struct rte_eventdev *event_dev) + return rc; } - /* Determine the type of SFP */ -- if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) -+ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_FIBER && -+ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) -+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; -+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) - phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR; - else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR) - phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR; -@@ -639,9 +643,6 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) - phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX; - else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T) - phy_data->sfp_base = AXGBE_SFP_BASE_1000_T; -- else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) && -- axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) -- phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; ++ cnxk_ethdev_rx_offload_cb_register(cn10k_sso_rx_offload_cb); + event_dev->dev_ops = &cn10k_sso_dev_ops; + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { +diff --git a/dpdk/drivers/event/cnxk/cn10k_tx_worker.h b/dpdk/drivers/event/cnxk/cn10k_tx_worker.h +index 53e0dde20c..256237b895 100644 +--- a/dpdk/drivers/event/cnxk/cn10k_tx_worker.h ++++ b/dpdk/drivers/event/cnxk/cn10k_tx_worker.h +@@ -70,6 +70,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, + const uint64_t *txq_data, const uint32_t flags) + { + uint8_t lnum = 0, loff = 0, shft = 0; ++ struct rte_mbuf *extm = NULL; + struct cn10k_eth_txq *txq; + uintptr_t laddr; + uint16_t segdw; +@@ -90,7 +91,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, + if (flags & NIX_TX_OFFLOAD_TSO_F) + cn10k_nix_xmit_prepare_tso(m, flags); - switch (phy_data->sfp_base) { - case AXGBE_SFP_BASE_1000_T: -@@ -1225,6 +1226,10 @@ static void axgbe_phy_rx_reset(struct axgbe_port *pdata) +- cn10k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, &sec, ++ cn10k_nix_xmit_prepare(txq, m, &extm, cmd, flags, txq->lso_tun_fmt, &sec, + txq->mark_flag, txq->mark_fmt); - static void axgbe_phy_pll_ctrl(struct axgbe_port *pdata, bool enable) - { -+ /* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */ -+ if (pdata->phy.autoneg != AUTONEG_DISABLE) -+ return; -+ - XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0, - XGBE_PMA_PLL_CTRL_MASK, - enable ? XGBE_PMA_PLL_CTRL_SET -@@ -1269,8 +1274,9 @@ static void axgbe_phy_perform_ratechange(struct axgbe_port *pdata, - axgbe_phy_rx_reset(pdata); + laddr = lmt_addr; +@@ -105,7 +106,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, + cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags); - reenable_pll: -- /* Re-enable the PLL control */ -- axgbe_phy_pll_ctrl(pdata, true); -+ /* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */ -+ if (cmd != 0 && cmd != 5) -+ axgbe_phy_pll_ctrl(pdata, true); + if (flags & NIX_TX_MULTI_SEG_F) +- segdw = cn10k_nix_prepare_mseg(txq, m, (uint64_t *)laddr, flags); ++ segdw = cn10k_nix_prepare_mseg(txq, m, &extm, (uint64_t *)laddr, flags); + else + segdw = cn10k_nix_tx_ext_subs(flags) + 2; - PMD_DRV_LOG(NOTICE, "firmware mailbox command did not complete\n"); - } -@@ -1697,8 +1703,15 @@ static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart) - if (reg & MDIO_STAT1_LSTATUS) - return 1; +@@ -127,6 +128,9 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd, + /* Memory barrier to make sure lmtst store completes */ + rte_io_wmb(); -+ if (pdata->phy.autoneg == AUTONEG_ENABLE && -+ phy_data->port_mode == AXGBE_PORT_MODE_BACKPLANE) { -+ if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) { -+ *an_restart = 1; -+ } -+ } ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) ++ cn10k_nix_free_extmbuf(extm); + - /* No link, attempt a receiver reset cycle */ -- if (phy_data->rrc_count++) { -+ if (pdata->vdata->enable_rrc && phy_data->rrc_count++) { - phy_data->rrc_count = 0; - axgbe_phy_rrc(pdata); - } -diff --git a/dpdk/drivers/net/bnx2x/bnx2x.c b/dpdk/drivers/net/bnx2x/bnx2x.c -index c3283c94f3..597ee43359 100644 ---- a/dpdk/drivers/net/bnx2x/bnx2x.c -+++ b/dpdk/drivers/net/bnx2x/bnx2x.c -@@ -2389,7 +2389,7 @@ int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) - static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) - { - sc->ilt->lines = rte_calloc("", -- sizeof(struct ilt_line), ILT_MAX_LINES, -+ ILT_MAX_LINES, sizeof(struct ilt_line), - RTE_CACHE_LINE_SIZE); - return sc->ilt->lines == NULL; + return 1; } -diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/dpdk/drivers/net/bnx2x/bnx2x_stats.c -index c07b01510a..69132c7c80 100644 ---- a/dpdk/drivers/net/bnx2x/bnx2x_stats.c -+++ b/dpdk/drivers/net/bnx2x/bnx2x_stats.c -@@ -114,7 +114,7 @@ bnx2x_hw_stats_post(struct bnx2x_softc *sc) - /* Update MCP's statistics if possible */ - if (sc->func_stx) { -- rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, -+ memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, - sizeof(sc->func_stats)); +diff --git a/dpdk/drivers/event/cnxk/cn9k_eventdev.c b/dpdk/drivers/event/cnxk/cn9k_eventdev.c +index 9fb9ca0d63..ec3022b38c 100644 +--- a/dpdk/drivers/event/cnxk/cn9k_eventdev.c ++++ b/dpdk/drivers/event/cnxk/cn9k_eventdev.c +@@ -834,12 +834,40 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem) } + } -@@ -817,10 +817,10 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc) - etherstatspktsover1522octets); - } ++static void ++eventdev_fops_tstamp_update(struct rte_eventdev *event_dev) ++{ ++ struct rte_event_fp_ops *fp_op = ++ rte_event_fp_ops + event_dev->data->dev_id; ++ ++ fp_op->dequeue = event_dev->dequeue; ++ fp_op->dequeue_burst = event_dev->dequeue_burst; ++} ++ ++static void ++cn9k_sso_tstamp_hdl_update(uint16_t port_id, uint16_t flags, bool ptp_en) ++{ ++ struct rte_eth_dev *dev = &rte_eth_devices[port_id]; ++ struct cnxk_eth_dev *cnxk_eth_dev = dev->data->dev_private; ++ struct rte_eventdev *event_dev = cnxk_eth_dev->evdev_priv; ++ struct cnxk_sso_evdev *evdev = cnxk_sso_pmd_priv(event_dev); ++ ++ evdev->rx_offloads |= flags; ++ if (ptp_en) ++ evdev->tstamp[port_id] = &cnxk_eth_dev->tstamp; ++ else ++ evdev->tstamp[port_id] = NULL; ++ cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); ++ eventdev_fops_tstamp_update(event_dev); ++} ++ + static int + cn9k_sso_rx_adapter_queue_add( + const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, + int32_t rx_queue_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) + { ++ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cn9k_eth_rxq *rxq; + void *lookup_mem; + int rc; +@@ -853,6 +881,9 @@ cn9k_sso_rx_adapter_queue_add( + if (rc) + return -EINVAL; -- rte_memcpy(old, new, sizeof(struct nig_stats)); -+ memcpy(old, new, sizeof(struct nig_stats)); ++ cnxk_eth_dev->cnxk_sso_ptp_tstamp_cb = cn9k_sso_tstamp_hdl_update; ++ cnxk_eth_dev->evdev_priv = (struct rte_eventdev *)(uintptr_t)event_dev; ++ + rxq = eth_dev->data->rx_queues[0]; + lookup_mem = rxq->lookup_mem; + cn9k_sso_set_priv_mem(event_dev, lookup_mem); +diff --git a/dpdk/drivers/event/cnxk/cn9k_worker.h b/dpdk/drivers/event/cnxk/cn9k_worker.h +index 0451157812..107265d54b 100644 +--- a/dpdk/drivers/event/cnxk/cn9k_worker.h ++++ b/dpdk/drivers/event/cnxk/cn9k_worker.h +@@ -746,7 +746,7 @@ static __rte_always_inline uint16_t + cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, + uint64_t *txq_data, const uint32_t flags) + { +- struct rte_mbuf *m = ev->mbuf; ++ struct rte_mbuf *m = ev->mbuf, *extm = NULL; + struct cn9k_eth_txq *txq; -- rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), -- sizeof(struct mac_stx)); -+ memcpy(RTE_PTR_ADD(estats, offsetof(struct bnx2x_eth_stats, rx_stat_ifhcinbadoctets_hi)), -+ &pstats->mac_stx[1], sizeof(struct mac_stx)); - estats->brb_drop_hi = pstats->brb_drop_hi; - estats->brb_drop_lo = pstats->brb_drop_lo; + /* Perform header writes before barrier for TSO */ +@@ -767,7 +767,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, + if (cn9k_sso_sq_depth(txq) <= 0) + return 0; + cn9k_nix_tx_skeleton(txq, cmd, flags, 0); +- cn9k_nix_xmit_prepare(txq, m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag, ++ cn9k_nix_xmit_prepare(txq, m, &extm, cmd, flags, txq->lso_tun_fmt, txq->mark_flag, + txq->mark_fmt); -@@ -1492,9 +1492,11 @@ bnx2x_stats_init(struct bnx2x_softc *sc) - REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); - if (!CHIP_IS_E3(sc)) { - REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, -- &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2); -+ RTE_PTR_ADD(&sc->port.old_nig_stats, -+ offsetof(struct nig_stats, egress_mac_pkt0_lo)), 2); - REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, -- &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2); -+ RTE_PTR_ADD(&sc->port.old_nig_stats, -+ offsetof(struct nig_stats, egress_mac_pkt1_lo)), 2); + if (flags & NIX_TX_OFFLOAD_SECURITY_F) { +@@ -789,7 +789,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, } - /* function stats */ -diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c -index 63953c2979..5411df3a38 100644 ---- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c -+++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c -@@ -52,9 +52,9 @@ bnx2x_check_bull(struct bnx2x_softc *sc) - - /* check the mac address and VLAN and allocate memory if valid */ - if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN)) -- rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); -+ memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); - if (valid_bitmap & (1 << VLAN_VALID)) -- rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, RTE_VLAN_HLEN); -+ memcpy(&bull->vlan, &sc->old_bulletin.vlan, sizeof(bull->vlan)); - - sc->old_bulletin = *bull; + if (flags & NIX_TX_MULTI_SEG_F) { +- const uint16_t segdw = cn9k_nix_prepare_mseg(txq, m, cmd, flags); ++ const uint16_t segdw = cn9k_nix_prepare_mseg(txq, m, &extm, cmd, flags); + cn9k_nix_xmit_prepare_tstamp(txq, cmd, m->ol_flags, segdw, + flags); + if (!CNXK_TT_FROM_EVENT(ev->event)) { +@@ -819,6 +819,9 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd, + } -@@ -569,7 +569,7 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) + done: ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) ++ cn9k_nix_free_extmbuf(extm); ++ + return 1; + } - bnx2x_check_bull(sc); +diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c +index 0c61f4c20e..f44d8fb377 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c ++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c +@@ -118,8 +118,8 @@ cnxk_setup_event_ports(const struct rte_eventdev *event_dev, + return 0; + hws_fini: + for (i = i - 1; i >= 0; i--) { +- event_dev->data->ports[i] = NULL; + rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i])); ++ event_dev->data->ports[i] = NULL; + } + return -ENOMEM; + } +@@ -162,16 +162,17 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev, uint32_t deq_depth, -- rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); -+ memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); + deq_tmo_ns = conf->dequeue_timeout_ns; - bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, - BNX2X_VF_TLV_LIST_END, -@@ -583,9 +583,9 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) - while (BNX2X_VF_STATUS_FAILURE == reply->status && - bnx2x_check_bull(sc)) { - /* A new mac was configured by PF for us */ -- rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, -+ memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, - ETH_ALEN); -- rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, -+ memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, - ETH_ALEN); +- if (deq_tmo_ns == 0) +- deq_tmo_ns = dev->min_dequeue_timeout_ns; +- if (deq_tmo_ns < dev->min_dequeue_timeout_ns || +- deq_tmo_ns > dev->max_dequeue_timeout_ns) { ++ if (deq_tmo_ns && (deq_tmo_ns < dev->min_dequeue_timeout_ns || ++ deq_tmo_ns > dev->max_dequeue_timeout_ns)) { + plt_err("Unsupported dequeue timeout requested"); + return -EINVAL; + } - rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); -@@ -622,10 +622,10 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc, - BNX2X_VF_TLV_LIST_END, - sizeof(struct channel_list_end_tlv)); +- if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) ++ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { ++ if (deq_tmo_ns == 0) ++ deq_tmo_ns = dev->min_dequeue_timeout_ns; + dev->is_timeout_deq = 1; ++ } -- rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); -+ memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); - query->rss_key_size = T_ETH_RSS_KEY; + dev->deq_tmo_ns = deq_tmo_ns; -- rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); -+ memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); - query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; +@@ -553,6 +554,9 @@ parse_list(const char *value, void *opaque, param_parse_t fn) + char *end = NULL; + char *f = s; - query->rss_result_mask = params->rss_result_mask; -diff --git a/dpdk/drivers/net/bnxt/bnxt.h b/dpdk/drivers/net/bnxt/bnxt.h -index 0e01b1d4ba..be2fd689bb 100644 ---- a/dpdk/drivers/net/bnxt/bnxt.h -+++ b/dpdk/drivers/net/bnxt/bnxt.h -@@ -449,8 +449,8 @@ struct bnxt_ring_mem_info { ++ if (s == NULL) ++ return; ++ + while (*s) { + if (*s == '[') + start = s; +@@ -663,7 +667,7 @@ cnxk_sso_init(struct rte_eventdev *event_dev) + } - struct bnxt_ctx_pg_info { - uint32_t entries; -- void *ctx_pg_arr[MAX_CTX_PAGES]; -- rte_iova_t ctx_dma_arr[MAX_CTX_PAGES]; -+ void **ctx_pg_arr; -+ rte_iova_t *ctx_dma_arr; - struct bnxt_ring_mem_info ring_mem; - }; + dev->is_timeout_deq = 0; +- dev->min_dequeue_timeout_ns = 0; ++ dev->min_dequeue_timeout_ns = USEC2NSEC(1); + dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF); + dev->max_num_events = -1; + dev->nb_event_queues = 0; +diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c +index 92aea92389..fe905b5461 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c ++++ b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c +@@ -212,7 +212,7 @@ static void + cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev, + struct cnxk_sso_evdev *dev) + { +- if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ++ if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP || cnxk_eth_dev->ptp_en) + dev->tstamp[port_id] = &cnxk_eth_dev->tstamp; + } + +diff --git a/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c b/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c +index 6d59fdf909..bba70646fa 100644 +--- a/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c ++++ b/dpdk/drivers/event/cnxk/cnxk_tim_evdev.c +@@ -268,7 +268,7 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr) + sso_set_priv_mem_fn(dev->event_dev, NULL); + + plt_tim_dbg( +- "Total memory used %" PRIu64 "MB\n", ++ "Total memory used %" PRIu64 "MB", + (uint64_t)(((tim_ring->nb_chunks * tim_ring->chunk_sz) + + (tim_ring->nb_bkts * sizeof(struct cnxk_tim_bkt))) / + BIT_ULL(20))); +diff --git a/dpdk/drivers/event/dlb2/dlb2.c b/dpdk/drivers/event/dlb2/dlb2.c +index 050ace0904..9dc5edb3fb 100644 +--- a/dpdk/drivers/event/dlb2/dlb2.c ++++ b/dpdk/drivers/event/dlb2/dlb2.c +@@ -160,7 +160,6 @@ static int + dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) + { + struct dlb2_hw_dev *handle = &dlb2->qm_instance; +- struct dlb2_hw_resource_info *dlb2_info = &handle->info; + int num_ldb_ports; + int ret; -@@ -550,7 +550,6 @@ struct bnxt_mark_info { +@@ -169,7 +168,7 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) + ret = dlb2_iface_get_num_resources(handle, + &dlb2->hw_rsrc_query_results); + if (ret) { +- DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret); ++ DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d", ret); + return ret; + } - struct bnxt_rep_info { - struct rte_eth_dev *vfr_eth_dev; -- pthread_mutex_t vfr_lock; - pthread_mutex_t vfr_start_lock; - bool conduit_valid; - }; -@@ -896,6 +895,7 @@ struct bnxt { - struct rte_ether_addr *mcast_addr_list; - rte_iova_t mc_list_dma_addr; - uint32_t nb_mc_addr; -+#define BNXT_DFLT_MAX_MC_ADDR 16 /* for compatibility with older firmware */ - uint32_t max_mcast_addr; /* maximum number of mcast filters supported */ +@@ -222,8 +221,6 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) + handle->info.hw_rsrc_max.reorder_window_size = + dlb2->hw_rsrc_query_results.num_hist_list_entries; - struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ -diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c -index acf7e6e46e..0fc561d258 100644 ---- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c -+++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c -@@ -1673,10 +1673,8 @@ bnxt_uninit_locks(struct bnxt *bp) - pthread_mutex_destroy(&bp->def_cp_lock); - pthread_mutex_destroy(&bp->health_check_lock); - pthread_mutex_destroy(&bp->err_recovery_lock); -- if (bp->rep_info) { -- pthread_mutex_destroy(&bp->rep_info->vfr_lock); -+ if (bp->rep_info) - pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); -- } +- rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info)); +- + return 0; } - static void bnxt_drv_uninit(struct bnxt *bp) -@@ -4750,7 +4748,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, - { - struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; - const struct rte_memzone *mz = NULL; -- char mz_name[RTE_MEMZONE_NAMESIZE]; -+ char name[RTE_MEMZONE_NAMESIZE]; - rte_iova_t mz_phys_addr; - uint64_t valid_bits = 0; - uint32_t sz; -@@ -4762,6 +4760,19 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, - rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / - BNXT_PAGE_SIZE; - rmem->page_size = BNXT_PAGE_SIZE; -+ -+ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d", -+ suffix, idx, bp->eth_dev->data->port_id); -+ ctx_pg->ctx_pg_arr = rte_zmalloc(name, sizeof(void *) * rmem->nr_pages, 0); -+ if (ctx_pg->ctx_pg_arr == NULL) -+ return -ENOMEM; -+ -+ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%x_%d", -+ suffix, idx, bp->eth_dev->data->port_id); -+ ctx_pg->ctx_dma_arr = rte_zmalloc(name, sizeof(rte_iova_t *) * rmem->nr_pages, 0); -+ if (ctx_pg->ctx_dma_arr == NULL) -+ return -ENOMEM; -+ - rmem->pg_arr = ctx_pg->ctx_pg_arr; - rmem->dma_arr = ctx_pg->ctx_dma_arr; - rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; -@@ -4769,13 +4780,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, - valid_bits = PTU_PTE_VALID; +@@ -259,7 +256,7 @@ set_producer_coremask(const char *key __rte_unused, + const char **mask_str = opaque; - if (rmem->nr_pages > 1) { -- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, -+ snprintf(name, RTE_MEMZONE_NAMESIZE, - "bnxt_ctx_pg_tbl%s_%x_%d", - suffix, idx, bp->eth_dev->data->port_id); -- mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; -- mz = rte_memzone_lookup(mz_name); -+ name[RTE_MEMZONE_NAMESIZE - 1] = 0; -+ mz = rte_memzone_lookup(name); - if (!mz) { -- mz = rte_memzone_reserve_aligned(mz_name, -+ mz = rte_memzone_reserve_aligned(name, - rmem->nr_pages * 8, - bp->eth_dev->device->numa_node, - RTE_MEMZONE_2MB | -@@ -4794,11 +4805,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, - rmem->pg_tbl_mz = mz; + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; } -- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", -+ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", - suffix, idx, bp->eth_dev->data->port_id); -- mz = rte_memzone_lookup(mz_name); -+ mz = rte_memzone_lookup(name); - if (!mz) { -- mz = rte_memzone_reserve_aligned(mz_name, -+ mz = rte_memzone_reserve_aligned(name, - mem_size, - bp->eth_dev->device->numa_node, - RTE_MEMZONE_1GB | -@@ -4844,6 +4855,17 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) - return; +@@ -293,7 +290,7 @@ set_max_cq_depth(const char *key __rte_unused, + int ret; - bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; -+ rte_free(bp->ctx->qp_mem.ctx_pg_arr); -+ rte_free(bp->ctx->srq_mem.ctx_pg_arr); -+ rte_free(bp->ctx->cq_mem.ctx_pg_arr); -+ rte_free(bp->ctx->vnic_mem.ctx_pg_arr); -+ rte_free(bp->ctx->stat_mem.ctx_pg_arr); -+ rte_free(bp->ctx->qp_mem.ctx_dma_arr); -+ rte_free(bp->ctx->srq_mem.ctx_dma_arr); -+ rte_free(bp->ctx->cq_mem.ctx_dma_arr); -+ rte_free(bp->ctx->vnic_mem.ctx_dma_arr); -+ rte_free(bp->ctx->stat_mem.ctx_dma_arr); -+ - rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); - rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); - rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); -@@ -4856,6 +4878,8 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) - rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } - for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { -+ rte_free(bp->ctx->tqm_mem[i]->ctx_pg_arr); -+ rte_free(bp->ctx->tqm_mem[i]->ctx_dma_arr); - if (bp->ctx->tqm_mem[i]) - rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); +@@ -304,7 +301,7 @@ set_max_cq_depth(const char *key __rte_unused, + if (*max_cq_depth < DLB2_MIN_CQ_DEPTH_OVERRIDE || + *max_cq_depth > DLB2_MAX_CQ_DEPTH_OVERRIDE || + !rte_is_power_of_2(*max_cq_depth)) { +- DLB2_LOG_ERR("dlb2: max_cq_depth %d and %d and a power of 2\n", ++ DLB2_LOG_ERR("dlb2: max_cq_depth %d and %d and a power of 2", + DLB2_MIN_CQ_DEPTH_OVERRIDE, + DLB2_MAX_CQ_DEPTH_OVERRIDE); + return -EINVAL; +@@ -322,7 +319,7 @@ set_max_enq_depth(const char *key __rte_unused, + int ret; + + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; } -@@ -6173,13 +6197,6 @@ static int bnxt_init_rep_info(struct bnxt *bp) - for (i = 0; i < BNXT_MAX_CFA_CODE; i++) - bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; -- rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); -- if (rc) { -- PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); -- bnxt_free_rep_info(bp); -- return rc; -- } -- - rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); - if (rc) { - PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); -diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c -index 06f196760f..94c3249ae4 100644 ---- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c -+++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c -@@ -863,6 +863,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) - bp->max_l2_ctx, bp->max_vnics); - bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); - bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters); -+ if (!bp->max_mcast_addr) -+ bp->max_mcast_addr = BNXT_DFLT_MAX_MC_ADDR; - memcpy(bp->dsn, resp->device_serial_number, sizeof(bp->dsn)); +@@ -333,7 +330,7 @@ set_max_enq_depth(const char *key __rte_unused, + if (*max_enq_depth < DLB2_MIN_ENQ_DEPTH_OVERRIDE || + *max_enq_depth > DLB2_MAX_ENQ_DEPTH_OVERRIDE || + !rte_is_power_of_2(*max_enq_depth)) { +- DLB2_LOG_ERR("dlb2: max_enq_depth %d and %d and a power of 2\n", ++ DLB2_LOG_ERR("dlb2: max_enq_depth %d and %d and a power of 2", + DLB2_MIN_ENQ_DEPTH_OVERRIDE, + DLB2_MAX_ENQ_DEPTH_OVERRIDE); + return -EINVAL; +@@ -351,7 +348,7 @@ set_max_num_events(const char *key __rte_unused, + int ret; - if (BNXT_PF(bp)) -@@ -3039,6 +3041,8 @@ static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) - static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, - struct bnxt_link_info *link_info) - { -+ uint16_t support_pam4_speeds = link_info->support_pam4_speeds; -+ uint16_t support_speeds = link_info->support_speeds; - uint16_t eth_link_speed = 0; + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } - if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG) -@@ -3070,29 +3074,30 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, - case RTE_ETH_LINK_SPEED_25G: - eth_link_speed = - HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB; -+ link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; - break; - case RTE_ETH_LINK_SPEED_40G: - eth_link_speed = - HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; - break; - case RTE_ETH_LINK_SPEED_50G: -- if (link_info->support_pam4_speeds & -- HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { -- eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; -- link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; -- } else { -+ if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) { - eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; - link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; -+ } else if (support_pam4_speeds & -+ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { -+ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; -+ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; - } - break; - case RTE_ETH_LINK_SPEED_100G: -- if (link_info->support_pam4_speeds & -- HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { -- eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; -- link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; -- } else { -+ if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) { - eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; - link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; -+ } else if (support_pam4_speeds & -+ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { -+ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; -+ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; - } - break; - case RTE_ETH_LINK_SPEED_200G: -diff --git a/dpdk/drivers/net/bnxt/bnxt_reps.c b/dpdk/drivers/net/bnxt/bnxt_reps.c -index 78337431af..6d6b8252e2 100644 ---- a/dpdk/drivers/net/bnxt/bnxt_reps.c -+++ b/dpdk/drivers/net/bnxt/bnxt_reps.c -@@ -32,6 +32,14 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = { - .flow_ops_get = bnxt_flow_ops_get_op - }; +@@ -361,7 +358,7 @@ set_max_num_events(const char *key __rte_unused, -+static bool bnxt_rep_check_parent(struct bnxt_representor *rep) -+{ -+ if (!rep->parent_dev->data->dev_private) -+ return false; -+ -+ return true; -+} -+ - uint16_t - bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf) - { -@@ -124,8 +132,8 @@ bnxt_rep_tx_burst(void *tx_queue, - qid = vfr_txq->txq->queue_id; - vf_rep_bp = vfr_txq->bp; - parent = vf_rep_bp->parent_dev->data->dev_private; -- pthread_mutex_lock(&parent->rep_info->vfr_lock); - ptxq = parent->tx_queues[qid]; -+ pthread_mutex_lock(&ptxq->txq_lock); + if (*max_num_events < 0 || *max_num_events > + DLB2_MAX_NUM_LDB_CREDITS) { +- DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n", ++ DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d", + DLB2_MAX_NUM_LDB_CREDITS); + return -EINVAL; + } +@@ -378,7 +375,7 @@ set_num_dir_credits(const char *key __rte_unused, + int ret; - ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action; + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } -@@ -134,9 +142,9 @@ bnxt_rep_tx_burst(void *tx_queue, - vf_rep_bp->tx_pkts[qid]++; +@@ -388,7 +385,7 @@ set_num_dir_credits(const char *key __rte_unused, + + if (*num_dir_credits < 0 || + *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) { +- DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n", ++ DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d", + DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)); + return -EINVAL; } +@@ -405,7 +402,7 @@ set_dev_id(const char *key __rte_unused, + int ret; -- rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); -+ rc = _bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); - ptxq->vfr_tx_cfa_action = 0; -- pthread_mutex_unlock(&parent->rep_info->vfr_lock); -+ pthread_mutex_unlock(&ptxq->txq_lock); + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } - return rc; - } -@@ -266,12 +274,12 @@ int bnxt_representor_uninit(struct rte_eth_dev *eth_dev) - PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id); - eth_dev->data->mac_addrs = NULL; +@@ -425,7 +422,7 @@ set_poll_interval(const char *key __rte_unused, + int ret; -- parent_bp = rep->parent_dev->data->dev_private; -- if (!parent_bp) { -+ if (!bnxt_rep_check_parent(rep)) { - PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n", - eth_dev->data->port_id); - return 0; + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; } -+ parent_bp = rep->parent_dev->data->dev_private; - parent_bp->num_reps--; - vf_id = rep->vf_id; -@@ -539,11 +547,12 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev, - int rc = 0; +@@ -445,7 +442,7 @@ set_port_cos(const char *key __rte_unused, + int first, last, cos_id, i; - /* MAC Specifics */ -- parent_bp = rep_bp->parent_dev->data->dev_private; -- if (!parent_bp) { -- PMD_DRV_LOG(ERR, "Rep parent NULL!\n"); -+ if (!bnxt_rep_check_parent(rep_bp)) { -+ /* Need not be an error scenario, if parent is closed first */ -+ PMD_DRV_LOG(INFO, "Rep parent port does not exist.\n"); - return rc; + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; } -+ parent_bp = rep_bp->parent_dev->data->dev_private; - PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n"); - dev_info->max_mac_addrs = parent_bp->max_l2_ctx; - dev_info->max_hash_mac_addrs = 0; -@@ -730,10 +739,10 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev, - struct bnxt_tx_queue *parent_txq, *txq; - struct bnxt_vf_rep_tx_queue *vfr_txq; -- if (queue_idx >= rep_bp->rx_nr_rings) { -+ if (queue_idx >= rep_bp->tx_nr_rings) { - PMD_DRV_LOG(ERR, - "Cannot create Tx rings %d. %d rings available\n", -- queue_idx, rep_bp->rx_nr_rings); -+ queue_idx, rep_bp->tx_nr_rings); +@@ -458,18 +455,18 @@ set_port_cos(const char *key __rte_unused, + } else if (sscanf(value, "%d:%d", &first, &cos_id) == 2) { + last = first; + } else { +- DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be port-port:val, or port:val\n"); ++ DLB2_LOG_ERR("Error parsing ldb port port_cos devarg. Should be port-port:val, or port:val"); return -EINVAL; } -diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.c b/dpdk/drivers/net/bnxt/bnxt_txq.c -index 4df4604975..696603757b 100644 ---- a/dpdk/drivers/net/bnxt/bnxt_txq.c -+++ b/dpdk/drivers/net/bnxt/bnxt_txq.c -@@ -112,6 +112,7 @@ void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx) - txq->mz = NULL; + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_LDB_PORTS) { +- DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value\n"); ++ DLB2_LOG_ERR("Error parsing ldb port cos_id arg, invalid port value"); + return -EINVAL; + } - rte_free(txq->free); -+ pthread_mutex_destroy(&txq->txq_lock); - rte_free(txq); - dev->data->tx_queues[queue_idx] = NULL; + if (cos_id < DLB2_COS_0 || cos_id > DLB2_COS_3) { +- DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4\n"); ++ DLB2_LOG_ERR("Error parsing ldb port cos_id devarg, must be between 0 and 4"); + return -EINVAL; } -@@ -195,6 +196,11 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, - goto err; + +@@ -487,7 +484,7 @@ set_cos_bw(const char *key __rte_unused, + struct dlb2_cos_bw *cos_bw = opaque; + + if (opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; } -+ rc = pthread_mutex_init(&txq->txq_lock, NULL); -+ if (rc != 0) { -+ PMD_DRV_LOG(ERR, "TxQ mutex init failed!"); -+ goto err; -+ } - return 0; - err: - bnxt_tx_queue_release_op(eth_dev, queue_idx); -diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.h b/dpdk/drivers/net/bnxt/bnxt_txq.h -index 3a483ad5c3..9e54985c4c 100644 ---- a/dpdk/drivers/net/bnxt/bnxt_txq.h -+++ b/dpdk/drivers/net/bnxt/bnxt_txq.h -@@ -26,6 +26,7 @@ struct bnxt_tx_queue { - int index; - int tx_wake_thresh; - uint32_t vfr_tx_cfa_action; -+ pthread_mutex_t txq_lock; - struct bnxt_tx_ring_info *tx_ring; +@@ -495,11 +492,11 @@ set_cos_bw(const char *key __rte_unused, - unsigned int cp_nr_rings; -diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c -index 899986764f..cef14427a8 100644 ---- a/dpdk/drivers/net/bnxt/bnxt_txr.c -+++ b/dpdk/drivers/net/bnxt/bnxt_txr.c -@@ -562,6 +562,19 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) + if (sscanf(value, "%d:%d:%d:%d", &cos_bw->val[0], &cos_bw->val[1], + &cos_bw->val[2], &cos_bw->val[3]) != 4) { +- DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100\n"); ++ DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100"); + return -EINVAL; + } + if (cos_bw->val[0] + cos_bw->val[1] + cos_bw->val[2] + cos_bw->val[3] > 100) { +- DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100\n"); ++ DLB2_LOG_ERR("Error parsing cos bandwidth devarg. Should be bw0:bw1:bw2:bw3 where all values combined are <= 100"); + return -EINVAL; + } - uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) -+{ -+ struct bnxt_tx_queue *txq = tx_queue; -+ uint16_t rc; -+ -+ pthread_mutex_lock(&txq->txq_lock); -+ rc = _bnxt_xmit_pkts(tx_queue, tx_pkts, nb_pkts); -+ pthread_mutex_unlock(&txq->txq_lock); -+ -+ return rc; -+} -+ -+uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -+ uint16_t nb_pkts) - { - int rc; - uint16_t nb_tx_pkts = 0; -diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.h b/dpdk/drivers/net/bnxt/bnxt_txr.h -index e64ea2c7d1..09078d545d 100644 ---- a/dpdk/drivers/net/bnxt/bnxt_txr.h -+++ b/dpdk/drivers/net/bnxt/bnxt_txr.h -@@ -47,7 +47,9 @@ void bnxt_free_tx_rings(struct bnxt *bp); - int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq); - int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); - uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -- uint16_t nb_pkts); -+ uint16_t nb_pkts); -+uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, -+ uint16_t nb_pkts); - #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) - uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); -diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c -index f3f5bda890..852deef3b4 100644 ---- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c -+++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c -@@ -253,6 +253,7 @@ ulp_ha_mgr_timer_cb(void *arg) +@@ -515,7 +512,7 @@ set_sw_credit_quanta(const char *key __rte_unused, + int ret; - myclient_cnt = bnxt_ulp_cntxt_num_shared_clients_get(ulp_ctx); - if (myclient_cnt == 0) { -+ bnxt_ulp_cntxt_entry_release(); - BNXT_TF_DBG(ERR, - "PANIC Client Count is zero kill timer\n."); - return; -diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c -index 79f1b3f1a0..06c21ebe6d 100644 ---- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c -+++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c -@@ -865,7 +865,6 @@ bond_mode_8023ad_periodic_cb(void *arg) - struct bond_dev_private *internals = bond_dev->data->dev_private; - struct port *port; - struct rte_eth_link link_info; -- struct rte_ether_addr member_addr; - struct rte_mbuf *lacp_pkt = NULL; - uint16_t member_id; - uint16_t i; -@@ -892,7 +891,6 @@ bond_mode_8023ad_periodic_cb(void *arg) - key = 0; - } + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } -- rte_eth_macaddr_get(member_id, &member_addr); - port = &bond_mode_8023ad_ports[member_id]; +@@ -524,7 +521,7 @@ set_sw_credit_quanta(const char *key __rte_unused, + return ret; - key = rte_cpu_to_be_16(key); -@@ -904,8 +902,8 @@ bond_mode_8023ad_periodic_cb(void *arg) - SM_FLAG_SET(port, NTT); - } + if (*sw_credit_quanta <= 0) { +- DLB2_LOG_ERR("sw_credit_quanta must be > 0\n"); ++ DLB2_LOG_ERR("sw_credit_quanta must be > 0"); + return -EINVAL; + } -- if (!rte_is_same_ether_addr(&port->actor.system, &member_addr)) { -- rte_ether_addr_copy(&member_addr, &port->actor.system); -+ if (!rte_is_same_ether_addr(&internals->mode4.mac_addr, &port->actor.system)) { -+ rte_ether_addr_copy(&internals->mode4.mac_addr, &port->actor.system); - if (port->aggregator_port_id == member_id) - SM_FLAG_SET(port, NTT); - } -@@ -1173,21 +1171,20 @@ void - bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) - { - struct bond_dev_private *internals = bond_dev->data->dev_private; -- struct rte_ether_addr member_addr; - struct port *member, *agg_member; - uint16_t member_id, i, j; +@@ -540,7 +537,7 @@ set_hw_credit_quanta(const char *key __rte_unused, + int ret; - bond_mode_8023ad_stop(bond_dev); + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } -+ rte_eth_macaddr_get(internals->port_id, &internals->mode4.mac_addr); - for (i = 0; i < internals->active_member_count; i++) { - member_id = internals->active_members[i]; - member = &bond_mode_8023ad_ports[member_id]; -- rte_eth_macaddr_get(member_id, &member_addr); +@@ -560,7 +557,7 @@ set_default_depth_thresh(const char *key __rte_unused, + int ret; -- if (rte_is_same_ether_addr(&member_addr, &member->actor.system)) -+ if (rte_is_same_ether_addr(&internals->mode4.mac_addr, &member->actor.system)) - continue; + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } -- rte_ether_addr_copy(&member_addr, &member->actor.system); -+ rte_ether_addr_copy(&internals->mode4.mac_addr, &member->actor.system); - /* Do nothing if this port is not an aggregator. In other case - * Set NTT flag on every port that use this aggregator. */ - if (member->aggregator_port_id != member_id) -diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_flow.c b/dpdk/drivers/net/bonding/rte_eth_bond_flow.c -index 71a91675f7..5d0be5caf5 100644 ---- a/dpdk/drivers/net/bonding/rte_eth_bond_flow.c -+++ b/dpdk/drivers/net/bonding/rte_eth_bond_flow.c -@@ -180,6 +180,8 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, +@@ -579,7 +576,7 @@ set_vector_opts_enab(const char *key __rte_unused, + bool *dlb2_vector_opts_enabled = opaque; - count->bytes = 0; - count->hits = 0; -+ count->bytes_set = 0; -+ count->hits_set = 0; - rte_memcpy(&member_count, count, sizeof(member_count)); - for (i = 0; i < internals->member_count; i++) { - ret = rte_flow_query(internals->members[i].port_id, -@@ -192,8 +194,12 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, - } - count->bytes += member_count.bytes; - count->hits += member_count.hits; -+ count->bytes_set |= member_count.bytes_set; -+ count->hits_set |= member_count.hits_set; - member_count.bytes = 0; - member_count.hits = 0; -+ member_count.bytes_set = 0; -+ member_count.hits_set = 0; + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; } - return 0; - } -diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev.c b/dpdk/drivers/net/cnxk/cn10k_ethdev.c -index 4a4e97287c..29b7f2ba5e 100644 ---- a/dpdk/drivers/net/cnxk/cn10k_ethdev.c -+++ b/dpdk/drivers/net/cnxk/cn10k_ethdev.c -@@ -389,7 +389,13 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx) - struct roc_nix_sq *sq = &dev->sqs[qidx]; - do { - handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F); -+ /* Check if SQ is empty */ - roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail); -+ if (head != tail) -+ continue; -+ -+ /* Check if completion CQ is empty */ -+ roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail); - } while (head != tail); + +@@ -599,7 +596,7 @@ set_default_ldb_port_allocation(const char *key __rte_unused, + bool *default_ldb_port_allocation = opaque; + + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; } -diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c b/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c -index 575d0fabd5..4719f6b863 100644 ---- a/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c -+++ b/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c -@@ -1087,8 +1087,8 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess, - { - struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); -- struct roc_ot_ipsec_inb_sa *inb_sa_dptr; - struct rte_security_ipsec_xform *ipsec; -+ struct cn10k_sec_sess_priv sess_priv; - struct rte_crypto_sym_xform *crypto; - struct cnxk_eth_sec_sess *eth_sec; - bool inbound; -@@ -1109,6 +1109,11 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess, - eth_sec->spi = conf->ipsec.spi; +@@ -619,7 +616,7 @@ set_enable_cq_weight(const char *key __rte_unused, + bool *enable_cq_weight = opaque; - if (inbound) { -+ struct roc_ot_ipsec_inb_sa *inb_sa_dptr, *inb_sa; -+ struct cn10k_inb_priv_data *inb_priv; -+ -+ inb_sa = eth_sec->sa; -+ inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa); - inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr; - memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa)); + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } -@@ -1116,26 +1121,74 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess, - true); - if (rc) - return -EINVAL; -+ /* Use cookie for original data */ -+ inb_sa_dptr->w1.s.cookie = inb_sa->w1.s.cookie; -+ -+ if (ipsec->options.stats == 1) { -+ /* Enable mib counters */ -+ inb_sa_dptr->w0.s.count_mib_bytes = 1; -+ inb_sa_dptr->w0.s.count_mib_pkts = 1; -+ } -+ -+ /* Enable out-of-place processing */ -+ if (ipsec->options.ingress_oop) -+ inb_sa_dptr->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_FULL; +@@ -640,7 +637,7 @@ set_qid_depth_thresh(const char *key __rte_unused, + int first, last, thresh, i; - rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa, - eth_sec->inb, - sizeof(struct roc_ot_ipsec_inb_sa)); - if (rc) - return -EINVAL; -+ -+ /* Save userdata in inb private area */ -+ inb_priv->userdata = conf->userdata; + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } + +@@ -657,18 +654,18 @@ set_qid_depth_thresh(const char *key __rte_unused, + } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) { + last = first; } else { -- struct roc_ot_ipsec_outb_sa *outb_sa_dptr; -+ struct roc_ot_ipsec_outb_sa *outb_sa_dptr, *outb_sa; -+ struct cn10k_outb_priv_data *outb_priv; -+ struct cnxk_ipsec_outb_rlens *rlens; +- DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n"); ++ DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val"); + return -EINVAL; + } -+ outb_sa = eth_sec->sa; -+ outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa); -+ rlens = &outb_priv->rlens; - outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr; - memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa)); + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) { +- DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n"); ++ DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value"); + return -EINVAL; + } - rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto); - if (rc) - return -EINVAL; -+ -+ /* Save rlen info */ -+ cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto); -+ -+ if (ipsec->options.stats == 1) { -+ /* Enable mib counters */ -+ outb_sa_dptr->w0.s.count_mib_bytes = 1; -+ outb_sa_dptr->w0.s.count_mib_pkts = 1; -+ } -+ -+ sess_priv.u64 = 0; -+ sess_priv.sa_idx = outb_priv->sa_idx; -+ sess_priv.roundup_byte = rlens->roundup_byte; -+ sess_priv.roundup_len = rlens->roundup_len; -+ sess_priv.partial_len = rlens->partial_len; -+ sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode; -+ sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver; -+ /* Propagate inner checksum enable from SA to fast path */ -+ sess_priv.chksum = -+ (!ipsec->options.ip_csum_enable << 1 | !ipsec->options.l4_csum_enable); -+ sess_priv.dec_ttl = ipsec->options.dec_ttl; -+ if (roc_feature_nix_has_inl_ipsec_mseg() && dev->outb.cpt_eng_caps & BIT_ULL(35)) -+ sess_priv.nixtx_off = 1; -+ - rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa, - eth_sec->inb, - sizeof(struct roc_ot_ipsec_outb_sa)); - if (rc) - return -EINVAL; -+ -+ /* Save userdata */ -+ outb_priv->userdata = conf->userdata; -+ sess->fast_mdata = sess_priv.u64; + if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) { +- DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n", ++ DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d", + DLB2_MAX_QUEUE_DEPTH_THRESHOLD); + return -EINVAL; } +@@ -688,7 +685,7 @@ set_qid_depth_thresh_v2_5(const char *key __rte_unused, + int first, last, thresh, i; - return 0; -diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.h b/dpdk/drivers/net/cnxk/cn10k_rx.h -index 7bb4c86d75..86e4233dc7 100644 ---- a/dpdk/drivers/net/cnxk/cn10k_rx.h -+++ b/dpdk/drivers/net/cnxk/cn10k_rx.h -@@ -705,7 +705,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, - if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) { - const uint64_t *wqe = (const uint64_t *)(mbuf + 1); + if (value == NULL || opaque == NULL) { +- DLB2_LOG_ERR("NULL pointer\n"); ++ DLB2_LOG_ERR("NULL pointer"); + return -EINVAL; + } -- if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) -+ if (!(flags & NIX_RX_REAS_F) || hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) - rx = (const union nix_rx_parse_u *)(wqe + 1); +@@ -705,18 +702,18 @@ set_qid_depth_thresh_v2_5(const char *key __rte_unused, + } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) { + last = first; + } else { +- DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n"); ++ DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val"); + return -EINVAL; } -diff --git a/dpdk/drivers/net/cnxk/cn10k_rxtx.h b/dpdk/drivers/net/cnxk/cn10k_rxtx.h -index aeffc4ac92..9f33d0192e 100644 ---- a/dpdk/drivers/net/cnxk/cn10k_rxtx.h -+++ b/dpdk/drivers/net/cnxk/cn10k_rxtx.h -@@ -177,6 +177,7 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe) - m = m_next; + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) { +- DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n"); ++ DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value"); + return -EINVAL; + } + + if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) { +- DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n", ++ DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d", + DLB2_MAX_QUEUE_DEPTH_THRESHOLD); + return -EINVAL; + } +@@ -738,7 +735,7 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev, + if (ret) { + const struct rte_eventdev_data *data = dev->data; + +- DLB2_LOG_ERR("get resources err=%d, devid=%d\n", ++ DLB2_LOG_ERR("get resources err=%d, devid=%d", + ret, data->dev_id); + /* fn is void, so fall through and return values set up in + * probe +@@ -781,7 +778,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, + struct dlb2_create_sched_domain_args *cfg; + + if (resources_asked == NULL) { +- DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n"); ++ DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter"); + ret = EINVAL; + goto error_exit; + } +@@ -809,7 +806,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, + + if (cos_ports > resources_asked->num_ldb_ports || + (cos_ports && dlb2->max_cos_port >= resources_asked->num_ldb_ports)) { +- DLB2_LOG_ERR("dlb2: num_ldb_ports < cos_ports\n"); ++ DLB2_LOG_ERR("dlb2: num_ldb_ports < cos_ports"); + ret = EINVAL; + goto error_exit; + } +@@ -854,7 +851,7 @@ dlb2_hw_create_sched_domain(struct dlb2_eventdev *dlb2, + + ret = dlb2_iface_sched_domain_create(handle, cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n", ++ DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s", + ret, + dlb2_error_strings[cfg->response.status]); + +@@ -930,27 +927,27 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev) + dlb2_hw_reset_sched_domain(dev, true); + ret = dlb2_hw_query_resources(dlb2); + if (ret) { +- DLB2_LOG_ERR("get resources err=%d, devid=%d\n", ++ DLB2_LOG_ERR("get resources err=%d, devid=%d", + ret, data->dev_id); + return ret; } - rte_pktmbuf_free_seg(m); -+ txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL; + } - head++; - head &= qmask; -diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h -index 467f0ccc65..c84154ee84 100644 ---- a/dpdk/drivers/net/cnxk/cn10k_tx.h -+++ b/dpdk/drivers/net/cnxk/cn10k_tx.h -@@ -784,19 +784,35 @@ cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr, - } - #endif + if (config->nb_event_queues > rsrcs->num_queues) { +- DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n", ++ DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).", + config->nb_event_queues, + rsrcs->num_queues); + return -EINVAL; + } + if (config->nb_event_ports > (rsrcs->num_ldb_ports + + rsrcs->num_dir_ports)) { +- DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n", ++ DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).", + config->nb_event_ports, + (rsrcs->num_ldb_ports + rsrcs->num_dir_ports)); + return -EINVAL; + } + if (config->nb_events_limit > rsrcs->nb_events_limit) { +- DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n", ++ DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).", + config->nb_events_limit, + rsrcs->nb_events_limit); + return -EINVAL; +@@ -1000,7 +997,7 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev) -+static inline void -+cn10k_nix_free_extmbuf(struct rte_mbuf *m) -+{ -+ struct rte_mbuf *m_next; -+ while (m != NULL) { -+ m_next = m->next; -+ rte_pktmbuf_free_seg(m); -+ m = m_next; -+ } -+} -+ - static __rte_always_inline uint64_t --cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq, -- struct nix_send_hdr_s *send_hdr) -+cn10k_nix_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **extm, struct cn10k_eth_txq *txq, -+ struct nix_send_hdr_s *send_hdr, uint64_t *aura) - { -+ struct rte_mbuf *prev = NULL; - uint32_t sqe_id; + if (dlb2_hw_create_sched_domain(dlb2, handle, rsrcs, + dlb2->version) < 0) { +- DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n"); ++ DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed"); + return -ENODEV; + } - if (RTE_MBUF_HAS_EXTBUF(m)) { - if (unlikely(txq->tx_compl.ena == 0)) { -- rte_pktmbuf_free_seg(m); -+ m->next = *extm; -+ *extm = m; - return 1; - } - if (send_hdr->w0.pnc) { -- txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m; -+ sqe_id = send_hdr->w1.sqe_id; -+ prev = txq->tx_compl.ptr[sqe_id]; -+ m->next = prev; -+ txq->tx_compl.ptr[sqe_id] = m; - } else { - sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED); - send_hdr->w0.pnc = 1; -@@ -806,10 +822,160 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq, - } - return 1; - } else { -- return cnxk_nix_prefree_seg(m); -+ return cnxk_nix_prefree_seg(m, aura); +@@ -1068,7 +1065,7 @@ dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group) + + ret = dlb2_iface_get_sn_allocation(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return ret; } - } +@@ -1088,7 +1085,7 @@ dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num) -+#if defined(RTE_ARCH_ARM64) -+/* Only called for first segments of single segmented mbufs */ -+static __rte_always_inline void -+cn10k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct rte_mbuf **extm, -+ struct cn10k_eth_txq *txq, -+ uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0, -+ uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1) -+{ -+ struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr; -+ uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask; -+ bool tx_compl_ena = txq->tx_compl.ena; -+ struct rte_mbuf *m0, *m1, *m2, *m3; -+ struct rte_mbuf *cookie; -+ uint64_t w0, w1, aura; -+ uint64_t sqe_id; -+ -+ m0 = mbufs[0]; -+ m1 = mbufs[1]; -+ m2 = mbufs[2]; -+ m3 = mbufs[3]; -+ -+ /* mbuf 0 */ -+ w0 = vgetq_lane_u64(*senddesc01_w0, 0); -+ if (RTE_MBUF_HAS_EXTBUF(m0)) { -+ w0 |= BIT_ULL(19); -+ w1 = vgetq_lane_u64(*senddesc01_w1, 0); -+ w1 &= ~0xFFFF000000000000UL; -+ if (unlikely(!tx_compl_ena)) { -+ m0->next = *extm; -+ *extm = m0; -+ } else { -+ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, -+ rte_memory_order_relaxed); -+ sqe_id = sqe_id & nb_desc_mask; -+ /* Set PNC */ -+ w0 |= BIT_ULL(43); -+ w1 |= sqe_id << 48; -+ tx_compl_ptr[sqe_id] = m0; -+ *senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0); -+ } -+ } else { -+ cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0); -+ aura = (w0 >> 20) & 0xFFFFF; -+ w0 &= ~0xFFFFF00000UL; -+ w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19; -+ w0 |= aura << 20; -+ -+ if ((w0 & BIT_ULL(19)) == 0) -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+ } -+ *senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0); -+ -+ /* mbuf1 */ -+ w0 = vgetq_lane_u64(*senddesc01_w0, 1); -+ if (RTE_MBUF_HAS_EXTBUF(m1)) { -+ w0 |= BIT_ULL(19); -+ w1 = vgetq_lane_u64(*senddesc01_w1, 1); -+ w1 &= ~0xFFFF000000000000UL; -+ if (unlikely(!tx_compl_ena)) { -+ m1->next = *extm; -+ *extm = m1; -+ } else { -+ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, -+ rte_memory_order_relaxed); -+ sqe_id = sqe_id & nb_desc_mask; -+ /* Set PNC */ -+ w0 |= BIT_ULL(43); -+ w1 |= sqe_id << 48; -+ tx_compl_ptr[sqe_id] = m1; -+ *senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1); -+ } -+ } else { -+ cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1); -+ aura = (w0 >> 20) & 0xFFFFF; -+ w0 &= ~0xFFFFF00000UL; -+ w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19; -+ w0 |= aura << 20; -+ -+ if ((w0 & BIT_ULL(19)) == 0) -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+ } -+ *senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1); -+ -+ /* mbuf 2 */ -+ w0 = vgetq_lane_u64(*senddesc23_w0, 0); -+ if (RTE_MBUF_HAS_EXTBUF(m2)) { -+ w0 |= BIT_ULL(19); -+ w1 = vgetq_lane_u64(*senddesc23_w1, 0); -+ w1 &= ~0xFFFF000000000000UL; -+ if (unlikely(!tx_compl_ena)) { -+ m2->next = *extm; -+ *extm = m2; -+ } else { -+ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, -+ rte_memory_order_relaxed); -+ sqe_id = sqe_id & nb_desc_mask; -+ /* Set PNC */ -+ w0 |= BIT_ULL(43); -+ w1 |= sqe_id << 48; -+ tx_compl_ptr[sqe_id] = m2; -+ *senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0); -+ } -+ } else { -+ cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2); -+ aura = (w0 >> 20) & 0xFFFFF; -+ w0 &= ~0xFFFFF00000UL; -+ w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19; -+ w0 |= aura << 20; -+ -+ if ((w0 & BIT_ULL(19)) == 0) -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+ } -+ *senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0); -+ -+ /* mbuf3 */ -+ w0 = vgetq_lane_u64(*senddesc23_w0, 1); -+ if (RTE_MBUF_HAS_EXTBUF(m3)) { -+ w0 |= BIT_ULL(19); -+ w1 = vgetq_lane_u64(*senddesc23_w1, 1); -+ w1 &= ~0xFFFF000000000000UL; -+ if (unlikely(!tx_compl_ena)) { -+ m3->next = *extm; -+ *extm = m3; -+ } else { -+ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, -+ rte_memory_order_relaxed); -+ sqe_id = sqe_id & nb_desc_mask; -+ /* Set PNC */ -+ w0 |= BIT_ULL(43); -+ w1 |= sqe_id << 48; -+ tx_compl_ptr[sqe_id] = m3; -+ *senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1); -+ } -+ } else { -+ cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3); -+ aura = (w0 >> 20) & 0xFFFFF; -+ w0 &= ~0xFFFFF00000UL; -+ w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19; -+ w0 |= aura << 20; -+ -+ if ((w0 & BIT_ULL(19)) == 0) -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+ } -+ *senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1); -+#ifndef RTE_LIBRTE_MEMPOOL_DEBUG -+ RTE_SET_USED(cookie); -+#endif -+} -+#endif -+ - static __rte_always_inline void - cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) - { -@@ -864,9 +1030,9 @@ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) + ret = dlb2_iface_set_sn_allocation(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return ret; + } +@@ -1107,7 +1104,7 @@ dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group) - static __rte_always_inline void - cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq, -- struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags, -- const uint64_t lso_tun_fmt, bool *sec, uint8_t mark_flag, -- uint64_t mark_fmt) -+ struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd, -+ const uint16_t flags, const uint64_t lso_tun_fmt, bool *sec, -+ uint8_t mark_flag, uint64_t mark_fmt) - { - uint8_t mark_off = 0, mark_vlan = 0, markptr = 0; - struct nix_send_ext_s *send_hdr_ext; -@@ -889,6 +1055,9 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq, - sg = (union nix_send_sg_s *)(cmd + 2); + ret = dlb2_iface_get_sn_occupancy(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return ret; + } +@@ -1161,7 +1158,7 @@ dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2, } -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) -+ send_hdr->w0.pnc = 0; -+ - if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) { - ol_flags = m->ol_flags; - w1.u = 0; -@@ -1049,19 +1218,30 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq, - send_hdr->w1.u = w1.u; + if (i == DLB2_NUM_SN_GROUPS) { +- DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n", ++ DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots", + __func__, sequence_numbers); + return; + } +@@ -1236,7 +1233,7 @@ dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2, - if (!(flags & NIX_TX_MULTI_SEG_F)) { -+ struct rte_mbuf *cookie; -+ - sg->seg1_size = send_hdr->w0.total; - *(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m); -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); + ret = dlb2_iface_ldb_queue_create(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return -EINVAL; + } +@@ -1272,7 +1269,7 @@ dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev, - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { -+ uint64_t aura; -+ - /* DF bit = 1 if refcount of current mbuf or parent mbuf - * is greater than 1 - * DF bit = 0 otherwise - */ -- send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr); -+ aura = send_hdr->w0.aura; -+ send_hdr->w0.df = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura); -+ send_hdr->w0.aura = aura; - } -+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG - /* Mark mempool object as "put" since it is freed by NIX */ - if (!send_hdr->w0.df) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+#else -+ RTE_SET_USED(cookie); -+#endif - } else { - sg->seg1_size = m->data_len; - *(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m); -@@ -1113,7 +1293,7 @@ cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr, - struct nix_send_mem_s *send_mem; + qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf); + if (qm_qid < 0) { +- DLB2_LOG_ERR("Failed to create the load-balanced queue\n"); ++ DLB2_LOG_ERR("Failed to create the load-balanced queue"); - send_mem = (struct nix_send_mem_s *)(lmt + off); -- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp -+ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, Tx tstamp - * should not be recorded, hence changing the alg type to - * NIX_SENDMEMALG_SUB and also changing send mem addr field to - * next 8 bytes as it corrupts the actual Tx tstamp registered -@@ -1128,13 +1308,14 @@ cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr, - } + return qm_qid; + } +@@ -1380,7 +1377,7 @@ dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name) + RTE_CACHE_LINE_SIZE); - static __rte_always_inline uint16_t --cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, -- struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) -+cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm, -+ uint64_t *cmd, const uint16_t flags) - { - uint64_t prefree = 0, aura0, aura, nb_segs, segdw; - struct nix_send_hdr_s *send_hdr; - union nix_send_sg_s *sg, l_sg; - union nix_send_sg2_s l_sg2; -+ struct rte_mbuf *cookie; - struct rte_mbuf *m_next; - uint8_t off, is_sg2; - uint64_t len, dlen; -@@ -1163,21 +1344,27 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, - len -= dlen; - nb_segs = m->nb_segs - 1; - m_next = m->next; -+ m->next = NULL; -+ m->nb_segs = 1; - slist = &cmd[3 + off + 1]; + if (qe == NULL) { +- DLB2_LOG_ERR("dlb2: no memory for consume_qe\n"); ++ DLB2_LOG_ERR("dlb2: no memory for consume_qe"); + return -ENOMEM; + } + qm_port->consume_qe = qe; +@@ -1412,7 +1409,7 @@ dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name) + RTE_CACHE_LINE_SIZE); -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); - /* Set invert df if buffer is not to be freed by H/W */ - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { -- prefree = cn10k_nix_prefree_seg(m, txq, send_hdr); -+ aura = send_hdr->w0.aura; -+ prefree = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura); -+ send_hdr->w0.aura = aura; - l_sg.i1 = prefree; + if (qe == NULL) { +- DLB2_LOG_ERR("dlb2: no memory for complete_qe\n"); ++ DLB2_LOG_ERR("dlb2: no memory for complete_qe"); + return -ENOMEM; } + qm_port->int_arm_qe = qe; +@@ -1440,20 +1437,20 @@ dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name) + qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE); - #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - /* Mark mempool object as "put" since it is freed by NIX */ - if (!prefree) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); - rte_io_wmb(); -+#else -+ RTE_SET_USED(cookie); - #endif -- m->next = NULL; + if (qm_port->qe4 == NULL) { +- DLB2_LOG_ERR("dlb2: no qe4 memory\n"); ++ DLB2_LOG_ERR("dlb2: no qe4 memory"); + ret = -ENOMEM; + goto error_exit; + } - /* Quickly handle single segmented packets. With this if-condition - * compiler will completely optimize out the below do-while loop -@@ -1207,9 +1394,12 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, - aura = aura0; - prefree = 0; + ret = dlb2_init_int_arm_qe(qm_port, mz_name); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret); ++ DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d", ret); + goto error_exit; + } -+ m->next = NULL; -+ -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { - aura = roc_npa_aura_handle_to_aura(m->pool->pool_id); -- prefree = cn10k_nix_prefree_seg(m, txq, send_hdr); -+ prefree = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura); - is_sg2 = aura != aura0 && !prefree; - } + ret = dlb2_init_consume_qe(qm_port, mz_name); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret); ++ DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d", ret); + goto error_exit; + } -@@ -1259,13 +1449,14 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, - l_sg.subdc = NIX_SUBDC_SG; - slist++; - } -- m->next = NULL; +@@ -1536,14 +1533,14 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, + return -EINVAL; - #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - /* Mark mempool object as "put" since it is freed by NIX - */ - if (!prefree) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+#else -+ RTE_SET_USED(cookie); - #endif - m = m_next; - } while (nb_segs); -@@ -1302,6 +1493,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts, - uint8_t lnum, c_lnum, c_shft, c_loff; - uintptr_t pa, lbase = txq->lmt_base; - uint16_t lmt_id, burst, left, i; -+ struct rte_mbuf *extm = NULL; - uintptr_t c_lbase = lbase; - uint64_t lso_tun_fmt = 0; - uint64_t mark_fmt = 0; -@@ -1356,7 +1548,7 @@ again: - if (flags & NIX_TX_OFFLOAD_TSO_F) - cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags); + if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { +- DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d\n", ++ DLB2_LOG_ERR("dlb2: invalid cq depth, must be at least %d", + DLB2_MIN_CQ_DEPTH); + return -EINVAL; + } -- cn10k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt, -+ cn10k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt, - &sec, mark_flag, mark_fmt); + if (dlb2->version == DLB2_HW_V2 && ev_port->cq_weight != 0 && + ev_port->cq_weight > dequeue_depth) { +- DLB2_LOG_ERR("dlb2: invalid cq dequeue depth %d, must be >= cq weight %d\n", ++ DLB2_LOG_ERR("dlb2: invalid cq dequeue depth %d, must be >= cq weight %d", + dequeue_depth, ev_port->cq_weight); + return -EINVAL; + } +@@ -1579,7 +1576,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, - laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0); -@@ -1431,6 +1623,11 @@ again: + ret = dlb2_iface_ldb_port_create(handle, &cfg, dlb2->poll_mode); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + goto error_exit; } +@@ -1602,7 +1599,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, - rte_io_wmb(); -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) { -+ cn10k_nix_free_extmbuf(extm); -+ extm = NULL; -+ } -+ - if (left) - goto again; + ret = dlb2_init_qe_mem(qm_port, mz_name); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret); ++ DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d", ret); + goto error_exit; + } -@@ -1446,6 +1643,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws, - uintptr_t pa0, pa1, lbase = txq->lmt_base; - const rte_iova_t io_addr = txq->io_addr; - uint16_t segdw, lmt_id, burst, left, i; -+ struct rte_mbuf *extm = NULL; - uint8_t lnum, c_lnum, c_loff; - uintptr_t c_lbase = lbase; - uint64_t lso_tun_fmt = 0; -@@ -1507,7 +1705,7 @@ again: - if (flags & NIX_TX_OFFLOAD_TSO_F) - cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags); +@@ -1615,7 +1612,7 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2, + ret = dlb2_iface_enable_cq_weight(handle, &cq_weight_args); -- cn10k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt, -+ cn10k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt, - &sec, mark_flag, mark_fmt); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)", + ret, + dlb2_error_strings[cfg.response. status]); + goto error_exit; +@@ -1717,7 +1714,7 @@ error_exit: - laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0); -@@ -1521,7 +1719,7 @@ again: - /* Move NIX desc to LMT/NIXTX area */ - cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags); - /* Store sg list directly on lmt line */ -- segdw = cn10k_nix_prepare_mseg(txq, tx_pkts[i], (uint64_t *)laddr, -+ segdw = cn10k_nix_prepare_mseg(txq, tx_pkts[i], &extm, (uint64_t *)laddr, - flags); - cn10k_nix_xmit_prepare_tstamp(txq, laddr, tx_pkts[i]->ol_flags, - segdw, flags); -@@ -1594,6 +1792,11 @@ again: + rte_spinlock_unlock(&handle->resource_lock); + +- DLB2_LOG_ERR("dlb2: create ldb port failed!\n"); ++ DLB2_LOG_ERR("dlb2: create ldb port failed!"); + + return ret; + } +@@ -1761,13 +1758,13 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, + return -EINVAL; + + if (dequeue_depth < DLB2_MIN_CQ_DEPTH) { +- DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n", ++ DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d", + DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH); + return -EINVAL; } - rte_io_wmb(); -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) { -+ cn10k_nix_free_extmbuf(extm); -+ extm = NULL; -+ } -+ - if (left) - goto again; + if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) { +- DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n", ++ DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d", + DLB2_MIN_ENQUEUE_DEPTH); + return -EINVAL; + } +@@ -1802,7 +1799,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, -@@ -1644,7 +1847,7 @@ cn10k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1, + ret = dlb2_iface_dir_port_create(handle, &cfg, dlb2->poll_mode); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + goto error_exit; + } +@@ -1827,7 +1824,7 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2, + ret = dlb2_init_qe_mem(qm_port, mz_name); - static __rte_always_inline uint16_t - cn10k_nix_prepare_mseg_vec_noff(struct cn10k_eth_txq *txq, -- struct rte_mbuf *m, uint64_t *cmd, -+ struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd, - uint64x2_t *cmd0, uint64x2_t *cmd1, - uint64x2_t *cmd2, uint64x2_t *cmd3, - const uint32_t flags) -@@ -1659,7 +1862,7 @@ cn10k_nix_prepare_mseg_vec_noff(struct cn10k_eth_txq *txq, - vst1q_u64(cmd + 2, *cmd1); /* sg */ + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret); ++ DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d", ret); + goto error_exit; } -- segdw = cn10k_nix_prepare_mseg(txq, m, cmd, flags); -+ segdw = cn10k_nix_prepare_mseg(txq, m, extm, cmd, flags); +@@ -1916,7 +1913,7 @@ error_exit: - if (flags & NIX_TX_OFFLOAD_TSTAMP_F) - vst1q_u64(cmd + segdw * 2 - 2, *cmd3); -@@ -1694,9 +1897,13 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, - len -= dlen; - sg_u = sg_u | ((uint64_t)dlen); + rte_spinlock_unlock(&handle->resource_lock); -+ /* Mark mempool object as "put" since it is freed by NIX */ -+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ - nb_segs = m->nb_segs - 1; - m_next = m->next; - m->next = NULL; -+ m->nb_segs = 1; - m = m_next; - /* Fill mbuf segments */ - do { -@@ -1719,6 +1926,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, - slist++; - } - m->next = NULL; -+ /* Mark mempool object as "put" since it is freed by NIX */ -+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ - m = m_next; - } while (nb_segs); +- DLB2_LOG_ERR("dlb2: create dir port failed!\n"); ++ DLB2_LOG_ERR("dlb2: create dir port failed!"); -@@ -1742,8 +1952,11 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0, - union nix_send_hdr_w0_u sh; - union nix_send_sg_s sg; + return ret; + } +@@ -1932,7 +1929,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, + int ret; -- if (m->nb_segs == 1) -+ if (m->nb_segs == 1) { -+ /* Mark mempool object as "put" since it is freed by NIX */ -+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); - return; -+ } + if (dev == NULL || port_conf == NULL) { +- DLB2_LOG_ERR("Null parameter\n"); ++ DLB2_LOG_ERR("Null parameter"); + return -EINVAL; + } - sh.u = vgetq_lane_u64(cmd0[0], 0); - sg.u = vgetq_lane_u64(cmd1[0], 0); -@@ -1759,7 +1972,7 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0, +@@ -1950,7 +1947,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, + ev_port = &dlb2->ev_ports[ev_port_id]; + /* configured? */ + if (ev_port->setup_done) { +- DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id); ++ DLB2_LOG_ERR("evport %d is already configured", ev_port_id); + return -EINVAL; + } - static __rte_always_inline uint8_t - cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq, -- struct rte_mbuf **mbufs, uint64x2_t *cmd0, -+ struct rte_mbuf **mbufs, struct rte_mbuf **extm, uint64x2_t *cmd0, - uint64x2_t *cmd1, uint64x2_t *cmd2, - uint64x2_t *cmd3, uint8_t *segdw, - uint64_t *lmt_addr, __uint128_t *data128, -@@ -1777,7 +1990,7 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq, - lmt_addr += 16; - off = 0; - } -- off += cn10k_nix_prepare_mseg_vec_noff(txq, mbufs[j], -+ off += cn10k_nix_prepare_mseg_vec_noff(txq, mbufs[j], extm, - lmt_addr + off * 2, &cmd0[j], &cmd1[j], - &cmd2[j], &cmd3[j], flags); - } -@@ -1803,6 +2016,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq, - *data128 |= ((__uint128_t)7) << *shift; - *shift += 3; +@@ -1982,7 +1979,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, -+ /* Mark mempool object as "put" since it is freed by NIX */ -+ RTE_MEMPOOL_CHECK_COOKIES(mbufs[0]->pool, (void **)&mbufs[0], 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(mbufs[1]->pool, (void **)&mbufs[1], 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(mbufs[2]->pool, (void **)&mbufs[2], 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(mbufs[3]->pool, (void **)&mbufs[3], 1, 0); - return 1; + if (port_conf->enqueue_depth > sw_credit_quanta || + port_conf->enqueue_depth > hw_credit_quanta) { +- DLB2_LOG_ERR("Invalid port config. Enqueue depth %d must be <= credit quanta %d and batch size %d\n", ++ DLB2_LOG_ERR("Invalid port config. Enqueue depth %d must be <= credit quanta %d and batch size %d", + port_conf->enqueue_depth, + sw_credit_quanta, + hw_credit_quanta); +@@ -2004,7 +2001,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, + port_conf->dequeue_depth, + port_conf->enqueue_depth); + if (ret < 0) { +- DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n", ++ DLB2_LOG_ERR("Failed to create the lB port ve portId=%d", + ev_port_id); + + return ret; +@@ -2015,7 +2012,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, + port_conf->dequeue_depth, + port_conf->enqueue_depth); + if (ret < 0) { +- DLB2_LOG_ERR("Failed to create the DIR port\n"); ++ DLB2_LOG_ERR("Failed to create the DIR port"); + return ret; } } -@@ -1821,6 +2039,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq, - vst1q_u64(lmt_addr + 10, cmd2[j + 1]); - vst1q_u64(lmt_addr + 12, cmd1[j + 1]); - vst1q_u64(lmt_addr + 14, cmd3[j + 1]); -+ -+ /* Mark mempool object as "put" since it is freed by NIX */ -+ RTE_MEMPOOL_CHECK_COOKIES(mbufs[j]->pool, (void **)&mbufs[j], 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(mbufs[j + 1]->pool, -+ (void **)&mbufs[j + 1], 1, 0); - } else if (flags & NIX_TX_NEED_EXT_HDR) { - /* EXT header take 3 each, space for 2 segs.*/ - cn10k_nix_prepare_mseg_vec(mbufs[j], -@@ -1920,14 +2143,14 @@ cn10k_nix_lmt_next(uint8_t dw, uintptr_t laddr, uint8_t *lnum, uint8_t *loff, +@@ -2082,9 +2079,9 @@ dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle, - static __rte_always_inline void - cn10k_nix_xmit_store(struct cn10k_eth_txq *txq, -- struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr, -+ struct rte_mbuf *mbuf, struct rte_mbuf **extm, uint8_t segdw, uintptr_t laddr, - uint64x2_t cmd0, uint64x2_t cmd1, uint64x2_t cmd2, - uint64x2_t cmd3, const uint16_t flags) - { - uint8_t off; + ret = dlb2_iface_map_qid(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); +- DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n", ++ DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d", + handle->domain_id, cfg.port_id, + cfg.qid, + cfg.priority); +@@ -2117,7 +2114,7 @@ dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2, + first_avail = i; + } + if (first_avail == -1) { +- DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n", ++ DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.", + ev_port->qm_port.id); + return -EINVAL; + } +@@ -2154,7 +2151,7 @@ dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2, - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { -- cn10k_nix_prepare_mseg_vec_noff(txq, mbuf, LMT_OFF(laddr, 0, 0), -+ cn10k_nix_prepare_mseg_vec_noff(txq, mbuf, extm, LMT_OFF(laddr, 0, 0), - &cmd0, &cmd1, &cmd2, &cmd3, - flags); - return; -@@ -1997,13 +2220,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, - uint64x2_t sgdesc01_w0, sgdesc23_w0; - uint64x2_t sgdesc01_w1, sgdesc23_w1; - struct cn10k_eth_txq *txq = tx_queue; -- uint64x2_t xmask01_w0, xmask23_w0; -- uint64x2_t xmask01_w1, xmask23_w1; - rte_iova_t io_addr = txq->io_addr; - uint8_t lnum, shift = 0, loff = 0; - uintptr_t laddr = txq->lmt_base; - uint8_t c_lnum, c_shft, c_loff; -- struct nix_send_hdr_s send_hdr; - uint64x2_t ltypes01, ltypes23; - uint64x2_t xtmp128, ytmp128; - uint64x2_t xmask01, xmask23; -@@ -2014,6 +2234,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, - __uint128_t data128; - uint64_t data[2]; - } wd; -+ struct rte_mbuf *extm = NULL; + ret = dlb2_iface_dir_queue_create(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return -EINVAL; + } +@@ -2172,7 +2169,7 @@ dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2, + qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id); - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena) - handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F); -@@ -2098,7 +2319,8 @@ again: + if (qm_qid < 0) { +- DLB2_LOG_ERR("Failed to create the DIR queue\n"); ++ DLB2_LOG_ERR("Failed to create the DIR queue"); + return qm_qid; } - for (i = 0; i < burst; i += NIX_DESCS_PER_LOOP) { -- if (flags & NIX_TX_OFFLOAD_SECURITY_F && c_lnum + 2 > 16) { -+ if (flags & NIX_TX_OFFLOAD_SECURITY_F && -+ (((int)((16 - c_lnum) << 1) - c_loff) < 4)) { - burst = i; - break; - } -@@ -2153,7 +2375,7 @@ again: - } - /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */ - senddesc01_w0 = -- vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF)); -+ vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF)); - sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF)); +@@ -2202,7 +2199,7 @@ dlb2_do_port_link(struct rte_eventdev *dev, + err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio); - senddesc23_w0 = senddesc01_w0; -@@ -2859,73 +3081,8 @@ again: - !(flags & NIX_TX_MULTI_SEG_F) && - !(flags & NIX_TX_OFFLOAD_SECURITY_F)) { - /* Set don't free bit if reference count > 1 */ -- xmask01_w0 = vdupq_n_u64(0); -- xmask01_w1 = vdupq_n_u64(0); -- xmask23_w0 = xmask01_w0; -- xmask23_w1 = xmask01_w1; -- -- /* Move mbufs to iova */ -- mbuf0 = (uint64_t *)tx_pkts[0]; -- mbuf1 = (uint64_t *)tx_pkts[1]; -- mbuf2 = (uint64_t *)tx_pkts[2]; -- mbuf3 = (uint64_t *)tx_pkts[3]; -- -- send_hdr.w0.u = 0; -- send_hdr.w1.u = 0; -- -- if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) { -- send_hdr.w0.df = 1; -- xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0); -- xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0); -- } else { -- RTE_MEMPOOL_CHECK_COOKIES( -- ((struct rte_mbuf *)mbuf0)->pool, -- (void **)&mbuf0, 1, 0); -- } -- -- send_hdr.w0.u = 0; -- send_hdr.w1.u = 0; -- -- if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) { -- send_hdr.w0.df = 1; -- xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1); -- xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1); -- } else { -- RTE_MEMPOOL_CHECK_COOKIES( -- ((struct rte_mbuf *)mbuf1)->pool, -- (void **)&mbuf1, 1, 0); -- } -- -- send_hdr.w0.u = 0; -- send_hdr.w1.u = 0; -- -- if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) { -- send_hdr.w0.df = 1; -- xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0); -- xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0); -- } else { -- RTE_MEMPOOL_CHECK_COOKIES( -- ((struct rte_mbuf *)mbuf2)->pool, -- (void **)&mbuf2, 1, 0); -- } -- -- send_hdr.w0.u = 0; -- send_hdr.w1.u = 0; -- -- if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) { -- send_hdr.w0.df = 1; -- xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1); -- xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1); -- } else { -- RTE_MEMPOOL_CHECK_COOKIES( -- ((struct rte_mbuf *)mbuf3)->pool, -- (void **)&mbuf3, 1, 0); -- } -- -- senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0); -- senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0); -- senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1); -- senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1); -+ cn10k_nix_prefree_seg_vec(tx_pkts, &extm, txq, &senddesc01_w0, -+ &senddesc23_w0, &senddesc01_w1, &senddesc23_w1); - } else if (!(flags & NIX_TX_MULTI_SEG_F) && - !(flags & NIX_TX_OFFLOAD_SECURITY_F)) { - /* Move mbufs to iova */ -@@ -2997,7 +3154,7 @@ again: - &shift, &wd.data128, &next); - - /* Store mbuf0 to LMTLINE/CPT NIXTX area */ -- cn10k_nix_xmit_store(txq, tx_pkts[0], segdw[0], next, -+ cn10k_nix_xmit_store(txq, tx_pkts[0], &extm, segdw[0], next, - cmd0[0], cmd1[0], cmd2[0], cmd3[0], - flags); + if (err) { +- DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n", ++ DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d", + ev_queue->qm_queue.is_directed ? "DIR" : "LDB", + ev_queue->id, ev_port->id); + +@@ -2240,7 +2237,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port, + queue_is_dir = ev_queue->qm_queue.is_directed; + + if (port_is_dir != queue_is_dir) { +- DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n", ++ DLB2_LOG_ERR("%s queue %u can't link to %s port %u", + queue_is_dir ? "DIR" : "LDB", ev_queue->id, + port_is_dir ? "DIR" : "LDB", ev_port->id); + +@@ -2250,7 +2247,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port, + + /* Check if there is space for the requested link */ + if (!link_exists && index == -1) { +- DLB2_LOG_ERR("no space for new link\n"); ++ DLB2_LOG_ERR("no space for new link"); + rte_errno = -ENOSPC; + return -1; + } +@@ -2258,7 +2255,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port, + /* Check if the directed port is already linked */ + if (ev_port->qm_port.is_directed && ev_port->num_links > 0 && + !link_exists) { +- DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n", ++ DLB2_LOG_ERR("Can't link DIR port %d to >1 queues", + ev_port->id); + rte_errno = -EINVAL; + return -1; +@@ -2267,7 +2264,7 @@ dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port, + /* Check if the directed queue is already linked */ + if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 && + !link_exists) { +- DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n", ++ DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports", + ev_queue->id); + rte_errno = -EINVAL; + return -1; +@@ -2289,14 +2286,14 @@ dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port, + RTE_SET_USED(dev); -@@ -3013,7 +3170,7 @@ again: - &shift, &wd.data128, &next); + if (ev_port == NULL) { +- DLB2_LOG_ERR("dlb2: evport not setup\n"); ++ DLB2_LOG_ERR("dlb2: evport not setup"); + rte_errno = -EINVAL; + return 0; + } - /* Store mbuf1 to LMTLINE/CPT NIXTX area */ -- cn10k_nix_xmit_store(txq, tx_pkts[1], segdw[1], next, -+ cn10k_nix_xmit_store(txq, tx_pkts[1], &extm, segdw[1], next, - cmd0[1], cmd1[1], cmd2[1], cmd3[1], - flags); + if (!ev_port->setup_done && + ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) { +- DLB2_LOG_ERR("dlb2: evport not setup\n"); ++ DLB2_LOG_ERR("dlb2: evport not setup"); + rte_errno = -EINVAL; + return 0; + } +@@ -2381,7 +2378,7 @@ dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle, -@@ -3029,7 +3186,7 @@ again: - &shift, &wd.data128, &next); + ret = dlb2_iface_unmap_qid(handle, &cfg); + if (ret < 0) +- DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); - /* Store mbuf2 to LMTLINE/CPT NIXTX area */ -- cn10k_nix_xmit_store(txq, tx_pkts[2], segdw[2], next, -+ cn10k_nix_xmit_store(txq, tx_pkts[2], &extm, segdw[2], next, - cmd0[2], cmd1[2], cmd2[2], cmd3[2], - flags); + return ret; +@@ -2434,7 +2431,7 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, + RTE_SET_USED(dev); -@@ -3045,7 +3202,7 @@ again: - &shift, &wd.data128, &next); + if (!ev_port->setup_done) { +- DLB2_LOG_ERR("dlb2: evport %d is not configured\n", ++ DLB2_LOG_ERR("dlb2: evport %d is not configured", + ev_port->id); + rte_errno = -EINVAL; + return 0; +@@ -2459,7 +2456,7 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, + int ret, j; + + if (queues[i] >= dlb2->num_queues) { +- DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]); ++ DLB2_LOG_ERR("dlb2: invalid queue id %d", queues[i]); + rte_errno = -EINVAL; + return i; /* return index of offending queue */ + } +@@ -2477,7 +2474,7 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, - /* Store mbuf3 to LMTLINE/CPT NIXTX area */ -- cn10k_nix_xmit_store(txq, tx_pkts[3], segdw[3], next, -+ cn10k_nix_xmit_store(txq, tx_pkts[3], &extm, segdw[3], next, - cmd0[3], cmd1[3], cmd2[3], cmd3[3], - flags); + ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue); + if (ret) { +- DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n", ++ DLB2_LOG_ERR("unlink err=%d for port %d queue %d", + ret, ev_port->id, queues[i]); + rte_errno = -ENOENT; + return i; /* return index of offending queue */ +@@ -2504,7 +2501,7 @@ dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, + RTE_SET_USED(dev); -@@ -3053,7 +3210,7 @@ again: - uint8_t j; + if (!ev_port->setup_done) { +- DLB2_LOG_ERR("dlb2: evport %d is not configured\n", ++ DLB2_LOG_ERR("dlb2: evport %d is not configured", + ev_port->id); + rte_errno = -EINVAL; + return 0; +@@ -2516,7 +2513,7 @@ dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, + ret = dlb2_iface_pending_port_unmaps(handle, &cfg); - segdw[4] = 8; -- j = cn10k_nix_prep_lmt_mseg_vector(txq, tx_pkts, cmd0, cmd1, -+ j = cn10k_nix_prep_lmt_mseg_vector(txq, tx_pkts, &extm, cmd0, cmd1, - cmd2, cmd3, segdw, - (uint64_t *) - LMT_OFF(laddr, lnum, -@@ -3203,6 +3360,11 @@ again: + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return ret; } +@@ -2609,7 +2606,7 @@ dlb2_eventdev_start(struct rte_eventdev *dev) - rte_io_wmb(); -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) { -+ cn10k_nix_free_extmbuf(extm); -+ extm = NULL; -+ } -+ - if (left) - goto again; + rte_spinlock_lock(&dlb2->qm_instance.resource_lock); + if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) { +- DLB2_LOG_ERR("bad state %d for dev_start\n", ++ DLB2_LOG_ERR("bad state %d for dev_start", + (int)dlb2->run_state); + rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); + return -EINVAL; +@@ -2645,7 +2642,7 @@ dlb2_eventdev_start(struct rte_eventdev *dev) -diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev.c b/dpdk/drivers/net/cnxk/cn9k_ethdev.c -index bae4dda5e2..b92b978a27 100644 ---- a/dpdk/drivers/net/cnxk/cn9k_ethdev.c -+++ b/dpdk/drivers/net/cnxk/cn9k_ethdev.c -@@ -347,7 +347,13 @@ cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx) - struct roc_nix_sq *sq = &dev->sqs[qidx]; - do { - handle_tx_completion_pkts(txq, 0); -+ /* Check if SQ is empty */ - roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail); -+ if (head != tail) -+ continue; -+ -+ /* Check if completion CQ is empty */ -+ roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail); - } while (head != tail); + ret = dlb2_iface_sched_domain_start(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return ret; } +@@ -2890,7 +2887,7 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port, + case RTE_SCHED_TYPE_ORDERED: + DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n"); + if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) { +- DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d\n", ++ DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d", + *queue_id); + rte_errno = -EINVAL; + return 1; +@@ -2909,7 +2906,7 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port, + *sched_type = DLB2_SCHED_UNORDERED; + break; + default: +- DLB2_LOG_ERR("Unsupported LDB sched type in put_qe\n"); ++ DLB2_LOG_ERR("Unsupported LDB sched type in put_qe"); + DLB2_INC_STAT(ev_port->stats.tx_invalid, 1); + rte_errno = -EINVAL; + return 1; +@@ -3156,7 +3153,7 @@ dlb2_event_release(struct dlb2_eventdev *dlb2, + int i; -diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev.h b/dpdk/drivers/net/cnxk/cn9k_ethdev.h -index 9e0a3c5bb2..6ae0db62ca 100644 ---- a/dpdk/drivers/net/cnxk/cn9k_ethdev.h -+++ b/dpdk/drivers/net/cnxk/cn9k_ethdev.h -@@ -169,6 +169,7 @@ handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe) - m = m_next; - } - rte_pktmbuf_free_seg(m); -+ txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL; + if (port_id > dlb2->num_ports) { +- DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n", ++ DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release", + port_id); + rte_errno = -EINVAL; + return; +@@ -3213,7 +3210,7 @@ dlb2_event_release(struct dlb2_eventdev *dlb2, + sw_credit_update: + /* each release returns one credit */ + if (unlikely(!ev_port->outstanding_releases)) { +- DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n", ++ DLB2_LOG_ERR("%s: Outstanding releases underflowed.", + __func__); + return; + } +@@ -3367,7 +3364,7 @@ dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port, + * buffer is a mbuf. + */ + if (unlikely(qe->error)) { +- DLB2_LOG_ERR("QE error bit ON\n"); ++ DLB2_LOG_ERR("QE error bit ON"); + DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1); + dlb2_consume_qe_immediate(qm_port, 1); + continue; /* Ignore */ +@@ -4281,7 +4278,7 @@ dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2, + + ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return ret; + } +@@ -4301,7 +4298,7 @@ dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2, - head++; - head &= qmask; -diff --git a/dpdk/drivers/net/cnxk/cn9k_tx.h b/dpdk/drivers/net/cnxk/cn9k_tx.h -index fba4bb4215..4715bf8a65 100644 ---- a/dpdk/drivers/net/cnxk/cn9k_tx.h -+++ b/dpdk/drivers/net/cnxk/cn9k_tx.h -@@ -82,32 +82,198 @@ cn9k_nix_tx_skeleton(struct cn9k_eth_txq *txq, uint64_t *cmd, + ret = dlb2_iface_get_dir_queue_depth(handle, &cfg); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n", ++ DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)", + ret, dlb2_error_strings[cfg.response.status]); + return ret; + } +@@ -4392,7 +4389,7 @@ dlb2_drain(struct rte_eventdev *dev) } - } -+static __rte_always_inline void -+cn9k_nix_free_extmbuf(struct rte_mbuf *m) -+{ -+ struct rte_mbuf *m_next; -+ while (m != NULL) { -+ m_next = m->next; -+ rte_pktmbuf_free_seg(m); -+ m = m_next; -+ } -+} -+ - static __rte_always_inline uint64_t --cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq, -- struct nix_send_hdr_s *send_hdr) -+cn9k_nix_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **extm, struct cn9k_eth_txq *txq, -+ struct nix_send_hdr_s *send_hdr, uint64_t *aura) - { -+ struct rte_mbuf *prev; - uint32_t sqe_id; + if (i == dlb2->num_ports) { +- DLB2_LOG_ERR("internal error: no LDB ev_ports\n"); ++ DLB2_LOG_ERR("internal error: no LDB ev_ports"); + return; + } - if (RTE_MBUF_HAS_EXTBUF(m)) { - if (unlikely(txq->tx_compl.ena == 0)) { -- rte_pktmbuf_free_seg(m); -+ m->next = *extm; -+ *extm = m; - return 1; +@@ -4400,7 +4397,7 @@ dlb2_drain(struct rte_eventdev *dev) + rte_event_port_unlink(dev_id, ev_port->id, NULL, 0); + + if (rte_errno) { +- DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n", ++ DLB2_LOG_ERR("internal error: failed to unlink ev_port %d", + ev_port->id); + return; + } +@@ -4418,7 +4415,7 @@ dlb2_drain(struct rte_eventdev *dev) + /* Link the ev_port to the queue */ + ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1); + if (ret != 1) { +- DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n", ++ DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d", + ev_port->id, qid); + return; } - if (send_hdr->w0.pnc) { -- txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m; -+ sqe_id = send_hdr->w1.sqe_id; -+ prev = txq->tx_compl.ptr[sqe_id]; -+ m->next = prev; -+ txq->tx_compl.ptr[sqe_id] = m; - } else { - sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED); - send_hdr->w0.pnc = 1; - send_hdr->w1.sqe_id = sqe_id & - txq->tx_compl.nb_desc_mask; - txq->tx_compl.ptr[send_hdr->w1.sqe_id] = m; -+ m->next = NULL; +@@ -4433,7 +4430,7 @@ dlb2_drain(struct rte_eventdev *dev) + /* Unlink the ev_port from the queue */ + ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1); + if (ret != 1) { +- DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n", ++ DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d", + ev_port->id, qid); + return; } - return 1; - } else { -- return cnxk_nix_prefree_seg(m); -+ return cnxk_nix_prefree_seg(m, aura); +@@ -4452,7 +4449,7 @@ dlb2_eventdev_stop(struct rte_eventdev *dev) + rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); + return; + } else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) { +- DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n", ++ DLB2_LOG_ERR("Internal error: bad state %d for dev_stop", + (int)dlb2->run_state); + rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); + return; +@@ -4608,7 +4605,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, + + err = dlb2_iface_open(&dlb2->qm_instance, name); + if (err < 0) { +- DLB2_LOG_ERR("could not open event hardware device, err=%d\n", ++ DLB2_LOG_ERR("could not open event hardware device, err=%d", + err); + return err; + } +@@ -4616,14 +4613,14 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, + err = dlb2_iface_get_device_version(&dlb2->qm_instance, + &dlb2->revision); + if (err < 0) { +- DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n", ++ DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d", + err); + return err; } - } -+#if defined(RTE_ARCH_ARM64) -+/* Only called for first segments of single segmented mbufs */ -+static __rte_always_inline void -+cn9k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct rte_mbuf **extm, struct cn9k_eth_txq *txq, -+ uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0, -+ uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1) -+{ -+ struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr; -+ uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask; -+ bool tx_compl_ena = txq->tx_compl.ena; -+ struct rte_mbuf *m0, *m1, *m2, *m3; -+ struct rte_mbuf *cookie; -+ uint64_t w0, w1, aura; -+ uint64_t sqe_id; -+ -+ m0 = mbufs[0]; -+ m1 = mbufs[1]; -+ m2 = mbufs[2]; -+ m3 = mbufs[3]; -+ -+ /* mbuf 0 */ -+ w0 = vgetq_lane_u64(*senddesc01_w0, 0); -+ if (RTE_MBUF_HAS_EXTBUF(m0)) { -+ w0 |= BIT_ULL(19); -+ w1 = vgetq_lane_u64(*senddesc01_w1, 0); -+ w1 &= ~0xFFFF000000000000UL; -+ if (unlikely(!tx_compl_ena)) { -+ m0->next = *extm; -+ *extm = m0; -+ } else { -+ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, -+ rte_memory_order_relaxed); -+ sqe_id = sqe_id & nb_desc_mask; -+ /* Set PNC */ -+ w0 |= BIT_ULL(43); -+ w1 |= sqe_id << 48; -+ tx_compl_ptr[sqe_id] = m0; -+ *senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0); -+ } -+ } else { -+ cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0); -+ aura = (w0 >> 20) & 0xFFFFF; -+ w0 &= ~0xFFFFF00000UL; -+ w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19; -+ w0 |= aura << 20; -+ -+ if ((w0 & BIT_ULL(19)) == 0) -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+ } -+ *senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0); -+ -+ /* mbuf1 */ -+ w0 = vgetq_lane_u64(*senddesc01_w0, 1); -+ if (RTE_MBUF_HAS_EXTBUF(m1)) { -+ w0 |= BIT_ULL(19); -+ w1 = vgetq_lane_u64(*senddesc01_w1, 1); -+ w1 &= ~0xFFFF000000000000UL; -+ if (unlikely(!tx_compl_ena)) { -+ m1->next = *extm; -+ *extm = m1; -+ } else { -+ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, -+ rte_memory_order_relaxed); -+ sqe_id = sqe_id & nb_desc_mask; -+ /* Set PNC */ -+ w0 |= BIT_ULL(43); -+ w1 |= sqe_id << 48; -+ tx_compl_ptr[sqe_id] = m1; -+ *senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1); -+ } -+ } else { -+ cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1); -+ aura = (w0 >> 20) & 0xFFFFF; -+ w0 &= ~0xFFFFF00000UL; -+ w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19; -+ w0 |= aura << 20; -+ -+ if ((w0 & BIT_ULL(19)) == 0) -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+ } -+ *senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1); -+ -+ /* mbuf 2 */ -+ w0 = vgetq_lane_u64(*senddesc23_w0, 0); -+ if (RTE_MBUF_HAS_EXTBUF(m2)) { -+ w0 |= BIT_ULL(19); -+ w1 = vgetq_lane_u64(*senddesc23_w1, 0); -+ w1 &= ~0xFFFF000000000000UL; -+ if (unlikely(!tx_compl_ena)) { -+ m2->next = *extm; -+ *extm = m2; -+ } else { -+ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, -+ rte_memory_order_relaxed); -+ sqe_id = sqe_id & nb_desc_mask; -+ /* Set PNC */ -+ w0 |= BIT_ULL(43); -+ w1 |= sqe_id << 48; -+ tx_compl_ptr[sqe_id] = m2; -+ *senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0); -+ } -+ } else { -+ cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2); -+ aura = (w0 >> 20) & 0xFFFFF; -+ w0 &= ~0xFFFFF00000UL; -+ w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19; -+ w0 |= aura << 20; -+ -+ if ((w0 & BIT_ULL(19)) == 0) -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+ } -+ *senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0); -+ -+ /* mbuf3 */ -+ w0 = vgetq_lane_u64(*senddesc23_w0, 1); -+ if (RTE_MBUF_HAS_EXTBUF(m3)) { -+ w0 |= BIT_ULL(19); -+ w1 = vgetq_lane_u64(*senddesc23_w1, 1); -+ w1 &= ~0xFFFF000000000000UL; -+ if (unlikely(!tx_compl_ena)) { -+ m3->next = *extm; -+ *extm = m3; -+ } else { -+ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, -+ rte_memory_order_relaxed); -+ sqe_id = sqe_id & nb_desc_mask; -+ /* Set PNC */ -+ w0 |= BIT_ULL(43); -+ w1 |= sqe_id << 48; -+ tx_compl_ptr[sqe_id] = m3; -+ *senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1); -+ } -+ } else { -+ cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3); -+ aura = (w0 >> 20) & 0xFFFFF; -+ w0 &= ~0xFFFFF00000UL; -+ w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19; -+ w0 |= aura << 20; -+ -+ if ((w0 & BIT_ULL(19)) == 0) -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+ } -+ *senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1); -+#ifndef RTE_LIBRTE_MEMPOOL_DEBUG -+ RTE_SET_USED(cookie); -+#endif -+} -+#endif -+ - static __rte_always_inline void - cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) - { -@@ -161,10 +327,9 @@ cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) - } + err = dlb2_hw_query_resources(dlb2); + if (err) { +- DLB2_LOG_ERR("get resources err=%d for %s\n", ++ DLB2_LOG_ERR("get resources err=%d for %s", + err, name); + return err; + } +@@ -4646,7 +4643,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, + break; + } + if (ret) { +- DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d\n", ++ DLB2_LOG_ERR("dlb2: failed to configure class of service, err=%d", + err); + return err; + } +@@ -4654,7 +4651,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, - static __rte_always_inline void --cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, -- struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags, -- const uint64_t lso_tun_fmt, uint8_t mark_flag, -- uint64_t mark_fmt) -+cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm, -+ uint64_t *cmd, const uint16_t flags, const uint64_t lso_tun_fmt, -+ uint8_t mark_flag, uint64_t mark_fmt) - { - uint8_t mark_off = 0, mark_vlan = 0, markptr = 0; - struct nix_send_ext_s *send_hdr_ext; -@@ -191,6 +356,8 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, - ol_flags = m->ol_flags; - w1.u = 0; + err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode); + if (err < 0) { +- DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n", ++ DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d", + err); + return err; + } +@@ -4662,7 +4659,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, + /* Complete xtstats runtime initialization */ + err = dlb2_xstats_init(dlb2); + if (err) { +- DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err); ++ DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d", err); + return err; } -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) -+ send_hdr->w0.pnc = 0; - if (!(flags & NIX_TX_MULTI_SEG_F)) - send_hdr->w0.total = m->data_len; -@@ -345,23 +512,33 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, - send_hdr->w1.u = w1.u; +@@ -4692,14 +4689,14 @@ dlb2_secondary_eventdev_probe(struct rte_eventdev *dev, - if (!(flags & NIX_TX_MULTI_SEG_F)) { -+ struct rte_mbuf *cookie; -+ - sg->seg1_size = m->data_len; - *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m); -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); + err = dlb2_iface_open(&dlb2->qm_instance, name); + if (err < 0) { +- DLB2_LOG_ERR("could not open event hardware device, err=%d\n", ++ DLB2_LOG_ERR("could not open event hardware device, err=%d", + err); + return err; + } - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { -+ uint64_t aura; - /* DF bit = 1 if refcount of current mbuf or parent mbuf - * is greater than 1 - * DF bit = 0 otherwise - */ -- send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr); -+ aura = send_hdr->w0.aura; -+ send_hdr->w0.df = cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura); -+ send_hdr->w0.aura = aura; - /* Ensuring mbuf fields which got updated in - * cnxk_nix_prefree_seg are written before LMTST. - */ - rte_io_wmb(); + err = dlb2_hw_query_resources(dlb2); + if (err) { +- DLB2_LOG_ERR("get resources err=%d for %s\n", ++ DLB2_LOG_ERR("get resources err=%d for %s", + err, name); + return err; + } +diff --git a/dpdk/drivers/event/dlb2/dlb2_xstats.c b/dpdk/drivers/event/dlb2/dlb2_xstats.c +index ff15271dda..28de48e24e 100644 +--- a/dpdk/drivers/event/dlb2/dlb2_xstats.c ++++ b/dpdk/drivers/event/dlb2/dlb2_xstats.c +@@ -766,7 +766,7 @@ dlb2_xstats_update(struct dlb2_eventdev *dlb2, + fn = get_queue_stat; + break; + default: +- DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id); ++ DLB2_LOG_ERR("Unexpected xstat fn_id %d", xs->fn_id); + goto invalid_value; } -+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG - /* Mark mempool object as "put" since it is freed by NIX */ - if (!send_hdr->w0.df) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); -+#else -+ RTE_SET_USED(cookie); -+#endif - } else { - sg->seg1_size = m->data_len; - *(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m); -@@ -383,7 +560,7 @@ cn9k_nix_xmit_prepare_tstamp(struct cn9k_eth_txq *txq, uint64_t *cmd, - - send_mem = (struct nix_send_mem_s *)(cmd + off); -- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp -+ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, Tx tstamp - * should not be recorded, hence changing the alg type to - * NIX_SENDMEMALG_SUB and also changing send mem addr field to - * next 8 bytes as it corrupts the actual Tx tstamp registered -@@ -439,10 +616,12 @@ cn9k_nix_xmit_submit_lmt_release(const rte_iova_t io_addr) - } +@@ -827,7 +827,7 @@ dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev, + fn = get_queue_stat; + break; + default: +- DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", ++ DLB2_LOG_ERR("Unexpected xstat fn_id %d", + xs->fn_id); + return (uint64_t)-1; + } +@@ -865,7 +865,7 @@ dlb2_xstats_reset_range(struct dlb2_eventdev *dlb2, uint32_t start, + fn = get_queue_stat; + break; + default: +- DLB2_LOG_ERR("Unexpected xstat fn_id %d\n", xs->fn_id); ++ DLB2_LOG_ERR("Unexpected xstat fn_id %d", xs->fn_id); + return; + } - static __rte_always_inline uint16_t --cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, -- struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) -+cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm, -+ uint64_t *cmd, const uint16_t flags) - { - struct nix_send_hdr_s *send_hdr; -+ uint64_t prefree = 0, aura; -+ struct rte_mbuf *cookie; - union nix_send_sg_s *sg; - struct rte_mbuf *m_next; - uint64_t *slist, sg_u; -@@ -467,17 +646,27 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, - m_next = m->next; - slist = &cmd[3 + off + 1]; +diff --git a/dpdk/drivers/event/dlb2/pf/dlb2_main.c b/dpdk/drivers/event/dlb2/pf/dlb2_main.c +index a95d3227a4..89eabc2a93 100644 +--- a/dpdk/drivers/event/dlb2/pf/dlb2_main.c ++++ b/dpdk/drivers/event/dlb2/pf/dlb2_main.c +@@ -72,7 +72,7 @@ static int dlb2_pf_wait_for_device_ready(struct dlb2_dev *dlb2_dev, + }; -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); - /* Set invert df if buffer is not to be freed by H/W */ - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { -- sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55); -+ aura = send_hdr->w0.aura; -+ prefree = (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << 55); -+ send_hdr->w0.aura = aura; -+ sg_u |= prefree; - rte_io_wmb(); + if (retries == DLB2_READY_RETRY_LIMIT) { +- DLB2_LOG_ERR("[%s()] wait for device ready timed out\n", ++ DLB2_LOG_ERR("[%s()] wait for device ready timed out", + __func__); + return -1; + } +@@ -214,7 +214,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + pcie_cap_offset = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_EXP); + + if (pcie_cap_offset < 0) { +- DLB2_LOG_ERR("[%s()] failed to find the pcie capability\n", ++ DLB2_LOG_ERR("[%s()] failed to find the pcie capability", + __func__); + return pcie_cap_offset; + } +@@ -261,7 +261,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = RTE_PCI_COMMAND; + cmd = 0; + if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pci command\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pci command", + __func__); + return ret; + } +@@ -273,7 +273,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_DEVSTA; + ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to read the pci device status\n", ++ DLB2_LOG_ERR("[%s()] failed to read the pci device status", + __func__); + return ret; + } +@@ -286,7 +286,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) } - /* Mark mempool object as "put" since it is freed by NIX */ - #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - if (!(sg_u & (1ULL << 55))) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); - rte_io_wmb(); -+#else -+ RTE_SET_USED(cookie); -+#endif -+#ifdef RTE_ENABLE_ASSERT -+ m->next = NULL; -+ m->nb_segs = 1; - #endif - m = m_next; - if (!m) -@@ -488,16 +677,17 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, - m_next = m->next; - sg_u = sg_u | ((uint64_t)m->data_len << (i << 4)); - *slist = rte_mbuf_data_iova(m); -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); - /* Set invert df if buffer is not to be freed by H/W */ - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { -- sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55)); -+ sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, NULL) << (i + 55)); - /* Commit changes to mbuf */ - rte_io_wmb(); + if (wait_count == 4) { +- DLB2_LOG_ERR("[%s()] wait for pci pending transactions timed out\n", ++ DLB2_LOG_ERR("[%s()] wait for pci pending transactions timed out", + __func__); + return -1; + } +@@ -294,7 +294,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL; + ret = rte_pci_read_config(pdev, &devctl_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to read the pcie device control\n", ++ DLB2_LOG_ERR("[%s()] failed to read the pcie device control", + __func__); + return ret; + } +@@ -303,7 +303,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + + ret = rte_pci_write_config(pdev, &devctl_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie device control\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie device control", + __func__); + return ret; + } +@@ -316,7 +316,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL; + ret = rte_pci_write_config(pdev, &dev_ctl_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie device control at offset %d", + __func__, (int)off); + return ret; } - /* Mark mempool object as "put" since it is freed by NIX */ - #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - if (!(sg_u & (1ULL << (i + 55)))) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); - rte_io_wmb(); - #endif - slist++; -@@ -513,6 +703,9 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, - sg_u = sg->u; - slist++; +@@ -324,7 +324,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_LNKCTL; + ret = rte_pci_write_config(pdev, &lnk_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; } -+#ifdef RTE_ENABLE_ASSERT -+ m->next = NULL; -+#endif - m = m_next; - } while (nb_segs); +@@ -332,7 +332,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_SLTCTL; + ret = rte_pci_write_config(pdev, &slt_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -340,7 +340,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_RTCTL; + ret = rte_pci_write_config(pdev, &rt_ctl_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -348,7 +348,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL2; + ret = rte_pci_write_config(pdev, &dev_ctl2_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -356,7 +356,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_LNKCTL2; + ret = rte_pci_write_config(pdev, &lnk_word2, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -364,7 +364,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pcie_cap_offset + RTE_PCI_EXP_SLTCTL2; + ret = rte_pci_write_config(pdev, &slt_word2, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -376,7 +376,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pri_cap_offset + RTE_PCI_PRI_ALLOC_REQ; + ret = rte_pci_write_config(pdev, &pri_reqs_dword, 4, off); + if (ret != 4) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -384,7 +384,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = pri_cap_offset + RTE_PCI_PRI_CTRL; + ret = rte_pci_write_config(pdev, &pri_ctrl_word, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -402,7 +402,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) -@@ -526,6 +719,9 @@ done: - segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); - send_hdr->w0.sizem1 = segdw - 1; + ret = rte_pci_write_config(pdev, &tmp, 4, off); + if (ret != 4) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -413,7 +413,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) -+#ifdef RTE_ENABLE_ASSERT -+ rte_io_wmb(); -+#endif - return segdw; - } + ret = rte_pci_write_config(pdev, &tmp, 4, off); + if (ret != 4) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -424,7 +424,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) -@@ -568,6 +764,7 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts, - const rte_iova_t io_addr = txq->io_addr; - uint64_t lso_tun_fmt = 0, mark_fmt = 0; - void *lmt_addr = txq->lmt_addr; -+ struct rte_mbuf *extm = NULL; - uint8_t mark_flag = 0; - uint16_t i; + ret = rte_pci_write_config(pdev, &tmp, 4, off); + if (ret != 4) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -434,7 +434,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = (i - 1) * 4; + ret = rte_pci_write_config(pdev, &dword[i - 1], 4, off); + if (ret != 4) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -444,7 +444,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) { + cmd &= ~RTE_PCI_COMMAND_INTX_DISABLE; + if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pci command\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pci command", + __func__); + return ret; + } +@@ -457,7 +457,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + cmd |= RTE_PCI_MSIX_FLAGS_ENABLE; + cmd |= RTE_PCI_MSIX_FLAGS_MASKALL; + if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) { +- DLB2_LOG_ERR("[%s()] failed to write msix flags\n", ++ DLB2_LOG_ERR("[%s()] failed to write msix flags", + __func__); + return ret; + } +@@ -467,7 +467,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) { + cmd &= ~RTE_PCI_MSIX_FLAGS_MASKALL; + if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) { +- DLB2_LOG_ERR("[%s()] failed to write msix flags\n", ++ DLB2_LOG_ERR("[%s()] failed to write msix flags", + __func__); + return ret; + } +@@ -493,7 +493,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) -@@ -598,13 +795,16 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts, - rte_io_wmb(); + ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -509,7 +509,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + off = acs_cap_offset + RTE_PCI_ACS_CTRL; + ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off); + if (ret != 2) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return ret; + } +@@ -520,7 +520,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev) + */ + off = DLB2_PCI_PASID_CAP_OFFSET; + if (rte_pci_pasid_set_state(pdev, off, false) < 0) { +- DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n", ++ DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d", + __func__, (int)off); + return -1; + } +diff --git a/dpdk/drivers/event/dlb2/pf/dlb2_pf.c b/dpdk/drivers/event/dlb2/pf/dlb2_pf.c +index 3d15250e11..019e90f7e7 100644 +--- a/dpdk/drivers/event/dlb2/pf/dlb2_pf.c ++++ b/dpdk/drivers/event/dlb2/pf/dlb2_pf.c +@@ -336,7 +336,7 @@ dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle, + /* Lock the page in memory */ + ret = rte_mem_lock_page(port_base); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n"); ++ DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o"); + goto create_port_err; + } - for (i = 0; i < pkts; i++) { -- cn9k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt, -+ cn9k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt, - mark_flag, mark_fmt); - cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, 4, - flags); - cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags); +@@ -411,7 +411,7 @@ dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle, + /* Lock the page in memory */ + ret = rte_mem_lock_page(port_base); + if (ret < 0) { +- DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n"); ++ DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o"); + goto create_port_err; } -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) -+ cn9k_nix_free_extmbuf(extm); -+ - /* Reduce the cached count */ - txq->fc_cache_pkts -= pkts; +@@ -737,7 +737,7 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) + &dlb2_args, + dlb2->version); + if (ret) { +- DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n", ++ DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d", + ret, rte_errno); + goto dlb2_probe_failed; + } +@@ -748,7 +748,7 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) + dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev, probe_args); + + if (dlb2->qm_instance.pf_dev == NULL) { +- DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n", ++ DLB2_LOG_ERR("DLB2 PF Probe failed with error %d", + rte_errno); + ret = -rte_errno; + goto dlb2_probe_failed; +@@ -766,13 +766,13 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) + if (ret) + goto dlb2_probe_failed; -@@ -619,6 +819,7 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, - const rte_iova_t io_addr = txq->io_addr; - uint64_t lso_tun_fmt = 0, mark_fmt = 0; - void *lmt_addr = txq->lmt_addr; -+ struct rte_mbuf *extm = NULL; - uint8_t mark_flag = 0; - uint16_t segdw; - uint64_t i; -@@ -650,14 +851,17 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, - rte_io_wmb(); +- DLB2_LOG_INFO("DLB2 PF Probe success\n"); ++ DLB2_LOG_INFO("DLB2 PF Probe success"); - for (i = 0; i < pkts; i++) { -- cn9k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt, -+ cn9k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt, - mark_flag, mark_fmt); -- segdw = cn9k_nix_prepare_mseg(txq, tx_pkts[i], cmd, flags); -+ segdw = cn9k_nix_prepare_mseg(txq, tx_pkts[i], &extm, cmd, flags); - cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, - segdw, flags); - cn9k_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw); - } + return 0; -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) -+ cn9k_nix_free_extmbuf(extm); -+ - /* Reduce the cached count */ - txq->fc_cache_pkts -= pkts; + dlb2_probe_failed: -@@ -705,12 +909,12 @@ cn9k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1, +- DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret); ++ DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d", ret); - static __rte_always_inline uint8_t - cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq, -- struct rte_mbuf *m, uint64_t *cmd, -+ struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd, - struct nix_send_hdr_s *send_hdr, - union nix_send_sg_s *sg, const uint32_t flags) - { -- struct rte_mbuf *m_next; -- uint64_t *slist, sg_u; -+ struct rte_mbuf *m_next, *cookie; -+ uint64_t *slist, sg_u, aura; - uint16_t nb_segs; - uint64_t segdw; - int i = 1; -@@ -727,29 +931,40 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq, - m_next = m->next; + return ret; + } +@@ -811,7 +811,7 @@ event_dlb2_pci_probe(struct rte_pci_driver *pci_drv, + event_dlb2_pf_name); + if (ret) { + DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, " +- "ret=%d\n", ret); ++ "ret=%d", ret); + } - /* Set invert df if buffer is not to be freed by H/W */ -- if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) -- sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55); -- /* Mark mempool object as "put" since it is freed by NIX */ -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { -+ aura = send_hdr->w0.aura; -+ sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << 55); -+ send_hdr->w0.aura = aura; -+ } -+ /* Mark mempool object as "put" since it is freed by NIX */ - #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - if (!(sg_u & (1ULL << 55))) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); - rte_io_wmb(); -+#else -+ RTE_SET_USED(cookie); - #endif + return ret; +@@ -826,7 +826,7 @@ event_dlb2_pci_remove(struct rte_pci_device *pci_dev) -+#ifdef RTE_ENABLE_ASSERT -+ m->next = NULL; -+ m->nb_segs = 1; -+#endif - m = m_next; - /* Fill mbuf segments */ - do { - m_next = m->next; - sg_u = sg_u | ((uint64_t)m->data_len << (i << 4)); - *slist = rte_mbuf_data_iova(m); -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); - /* Set invert df if buffer is not to be freed by H/W */ - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) -- sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55)); -+ sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << (i + 55)); - /* Mark mempool object as "put" since it is freed by NIX - */ - #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - if (!(sg_u & (1ULL << (i + 55)))) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); - rte_io_wmb(); - #endif - slist++; -@@ -765,6 +980,9 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq, - sg_u = sg->u; - slist++; - } -+#ifdef RTE_ENABLE_ASSERT -+ m->next = NULL; -+#endif - m = m_next; - } while (nb_segs); + if (ret) { + DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, " +- "ret=%d\n", ret); ++ "ret=%d", ret); + } -@@ -780,24 +998,31 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq, - !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); - send_hdr->w0.sizem1 = segdw - 1; + return ret; +@@ -845,7 +845,7 @@ event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv, + event_dlb2_pf_name); + if (ret) { + DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, " +- "ret=%d\n", ret); ++ "ret=%d", ret); + } -+#ifdef RTE_ENABLE_ASSERT -+ rte_io_wmb(); -+#endif - return segdw; - } + return ret; +@@ -860,7 +860,7 @@ event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev) - static __rte_always_inline uint8_t --cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, -- struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0, -- uint64x2_t *cmd1, const uint32_t flags) -+cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm, -+ uint64_t *cmd, uint64x2_t *cmd0, uint64x2_t *cmd1, const uint32_t flags) - { - struct nix_send_hdr_s send_hdr; -+ struct rte_mbuf *cookie; - union nix_send_sg_s sg; -+ uint64_t aura; - uint8_t ret; - - if (m->nb_segs == 1) { -+ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { - send_hdr.w0.u = vgetq_lane_u64(cmd0[0], 0); - send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1); - sg.u = vgetq_lane_u64(cmd1[0], 0); -- sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr) << 55); -+ aura = send_hdr.w0.aura; -+ sg.u |= (cn9k_nix_prefree_seg(m, extm, txq, &send_hdr, &aura) << 55); -+ send_hdr.w0.aura = aura; - cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0); - cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0); - cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1); -@@ -806,8 +1031,10 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, - #ifdef RTE_LIBRTE_MEMPOOL_DEBUG - sg.u = vgetq_lane_u64(cmd1[0], 0); - if (!(sg.u & (1ULL << 55))) -- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); -+ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); - rte_io_wmb(); -+#else -+ RTE_SET_USED(cookie); - #endif - return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) + - !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); -@@ -817,7 +1044,7 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, - send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1); - sg.u = vgetq_lane_u64(cmd1[0], 0); + if (ret) { + DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, " +- "ret=%d\n", ret); ++ "ret=%d", ret); + } -- ret = cn9k_nix_prepare_mseg_vec_list(txq, m, cmd, &send_hdr, &sg, flags); -+ ret = cn9k_nix_prepare_mseg_vec_list(txq, m, extm, cmd, &send_hdr, &sg, flags); + return ret; +diff --git a/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c b/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c +index dd4e64395f..4658eaf3a2 100644 +--- a/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c ++++ b/dpdk/drivers/event/dpaa2/dpaa2_eventdev.c +@@ -74,7 +74,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], + ret = dpaa2_affine_qbman_swp(); + if (ret < 0) { + DPAA2_EVENTDEV_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -276,7 +276,7 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], + ret = dpaa2_affine_qbman_swp(); + if (ret < 0) { + DPAA2_EVENTDEV_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -849,7 +849,7 @@ dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, + for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { + ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev); + if (ret) { +- DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n", ++ DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d", + ret); + goto fail; + } +@@ -883,7 +883,7 @@ dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev, + dpcon, &conf->ev); + if (ret) { + DPAA2_EVENTDEV_ERR( +- "dpaa2_sec_eventq_attach failed: ret: %d\n", ret); ++ "dpaa2_sec_eventq_attach failed: ret: %d", ret); + return ret; + } + return 0; +@@ -903,7 +903,7 @@ dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev, + ret = dpaa2_sec_eventq_detach(cdev, i); + if (ret) { + DPAA2_EVENTDEV_ERR( +- "dpaa2_sec_eventq_detach failed:ret %d\n", ret); ++ "dpaa2_sec_eventq_detach failed:ret %d", ret); + return ret; + } + } +@@ -926,7 +926,7 @@ dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev, + ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id); + if (ret) { + DPAA2_EVENTDEV_ERR( +- "dpaa2_sec_eventq_detach failed: ret: %d\n", ret); ++ "dpaa2_sec_eventq_detach failed: ret: %d", ret); + return ret; + } - cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0); - cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1); -@@ -962,11 +1189,9 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, - uint64x2_t sgdesc01_w1, sgdesc23_w1; - struct cn9k_eth_txq *txq = tx_queue; - uint64_t *lmt_addr = txq->lmt_addr; -- uint64x2_t xmask01_w0, xmask23_w0; -- uint64x2_t xmask01_w1, xmask23_w1; - rte_iova_t io_addr = txq->io_addr; -- struct nix_send_hdr_s send_hdr; - uint64x2_t ltypes01, ltypes23; -+ struct rte_mbuf *extm = NULL; - uint64x2_t xtmp128, ytmp128; - uint64x2_t xmask01, xmask23; - uint64_t lmt_status, i; -@@ -1028,7 +1253,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, - for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) { - /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */ - senddesc01_w0 = -- vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF)); -+ vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF)); - sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF)); +@@ -1159,7 +1159,7 @@ dpaa2_eventdev_destroy(const char *name) - senddesc23_w0 = senddesc01_w0; -@@ -1732,74 +1957,8 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, - if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && - !(flags & NIX_TX_MULTI_SEG_F)) { - /* Set don't free bit if reference count > 1 */ -- xmask01_w0 = vdupq_n_u64(0); -- xmask01_w1 = vdupq_n_u64(0); -- xmask23_w0 = xmask01_w0; -- xmask23_w1 = xmask01_w1; -- -- /* Move mbufs to iova */ -- mbuf0 = (uint64_t *)tx_pkts[0]; -- mbuf1 = (uint64_t *)tx_pkts[1]; -- mbuf2 = (uint64_t *)tx_pkts[2]; -- mbuf3 = (uint64_t *)tx_pkts[3]; -- -- send_hdr.w0.u = 0; -- send_hdr.w1.u = 0; -- -- if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) { -- send_hdr.w0.df = 1; -- xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0); -- xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0); -- } else { -- RTE_MEMPOOL_CHECK_COOKIES( -- ((struct rte_mbuf *)mbuf0)->pool, -- (void **)&mbuf0, 1, 0); -- } -- -- send_hdr.w0.u = 0; -- send_hdr.w1.u = 0; -- -- if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) { -- send_hdr.w0.df = 1; -- xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1); -- xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1); -- } else { -- RTE_MEMPOOL_CHECK_COOKIES( -- ((struct rte_mbuf *)mbuf1)->pool, -- (void **)&mbuf1, 1, 0); -- } -- -- send_hdr.w0.u = 0; -- send_hdr.w1.u = 0; -- -- if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) { -- send_hdr.w0.df = 1; -- xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0); -- xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0); -- } else { -- RTE_MEMPOOL_CHECK_COOKIES( -- ((struct rte_mbuf *)mbuf2)->pool, -- (void **)&mbuf2, 1, 0); -- } -- -- send_hdr.w0.u = 0; -- send_hdr.w1.u = 0; -- -- if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) { -- send_hdr.w0.df = 1; -- xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1); -- xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1); -- } else { -- RTE_MEMPOOL_CHECK_COOKIES( -- ((struct rte_mbuf *)mbuf3)->pool, -- (void **)&mbuf3, 1, 0); -- } -- -- senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0); -- senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0); -- senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1); -- senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1); -- -+ cn9k_nix_prefree_seg_vec(tx_pkts, &extm, txq, &senddesc01_w0, -+ &senddesc23_w0, &senddesc01_w1, &senddesc23_w1); - /* Ensuring mbuf fields which got updated in - * cnxk_nix_prefree_seg are written before LMTST. - */ -@@ -1860,7 +2019,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, - /* Build mseg list for each packet individually. */ - for (j = 0; j < NIX_DESCS_PER_LOOP; j++) - segdw[j] = cn9k_nix_prepare_mseg_vec(txq, -- tx_pkts[j], -+ tx_pkts[j], &extm, - seg_list[j], &cmd0[j], - &cmd1[j], flags); - segdw[4] = 8; -@@ -1935,6 +2094,9 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, - tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP; + eventdev = rte_event_pmd_get_named_dev(name); + if (eventdev == NULL) { +- RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name); ++ DPAA2_EVENTDEV_ERR("eventdev with name %s not allocated", name); + return -1; } -+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) -+ cn9k_nix_free_extmbuf(extm); -+ - if (unlikely(pkts_left)) { - if (flags & NIX_TX_MULTI_SEG_F) - pkts += cn9k_nix_xmit_pkts_mseg(tx_queue, tx_pkts, -diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c -index 5e11bbb017..c841b31051 100644 ---- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c -+++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c -@@ -1384,6 +1384,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) - goto free_nix_lf; - } +diff --git a/dpdk/drivers/event/octeontx/ssovf_evdev.c b/dpdk/drivers/event/octeontx/ssovf_evdev.c +index a16f24e088..c0129328ef 100644 +--- a/dpdk/drivers/event/octeontx/ssovf_evdev.c ++++ b/dpdk/drivers/event/octeontx/ssovf_evdev.c +@@ -714,10 +714,20 @@ ssovf_close(struct rte_eventdev *dev) + } -+ /* Overwrite default RSS setup if requested by user */ -+ rc = cnxk_nix_rss_hash_update(eth_dev, &conf->rx_adv_conf.rss_conf); -+ if (rc) { -+ plt_err("Failed to configure rss rc=%d", rc); -+ goto free_nix_lf; + static int +-ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) ++ssovf_parsekv(const char *key, const char *value, void *opaque) + { +- int *flag = opaque; +- *flag = !!atoi(value); ++ uint8_t *flag = opaque; ++ uint64_t v; ++ char *end; ++ ++ errno = 0; ++ v = strtoul(value, &end, 0); ++ if ((errno != 0) || (value == end) || *end != '\0' || v > 1) { ++ ssovf_log_err("invalid %s value %s", key, value); ++ return -EINVAL; + } + - /* Init the default TM scheduler hierarchy */ - rc = roc_nix_tm_init(nix); - if (rc) { -diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c -index 8e862be933..a0e9300cff 100644 ---- a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c -+++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c -@@ -75,7 +75,7 @@ parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args) - if (errno) - val = 0; - -- *(uint16_t *)extra_args = val; -+ *(uint32_t *)extra_args = val; - ++ *flag = !!v; return 0; } -diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h b/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h -index c1f99a2616..67f40b8e25 100644 ---- a/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h -+++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h -@@ -84,7 +84,7 @@ struct cnxk_timesync_info { - /* Inlines */ - static __rte_always_inline uint64_t --cnxk_pktmbuf_detach(struct rte_mbuf *m) -+cnxk_pktmbuf_detach(struct rte_mbuf *m, uint64_t *aura) - { - struct rte_mempool *mp = m->pool; - uint32_t mbuf_size, buf_len; -@@ -94,6 +94,8 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m) +diff --git a/dpdk/drivers/event/octeontx/timvf_evdev.c b/dpdk/drivers/event/octeontx/timvf_evdev.c +index 090b3ed183..82f17144a6 100644 +--- a/dpdk/drivers/event/octeontx/timvf_evdev.c ++++ b/dpdk/drivers/event/octeontx/timvf_evdev.c +@@ -196,7 +196,7 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr) + timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz()); + timr->fast_div = rte_reciprocal_value_u64(timr->tck_int); + timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64"" +- " maxtmo %"PRIu64"\n", ++ " maxtmo %"PRIu64, + timr->nb_bkts, timr->tck_nsec, interval, + timr->max_tout); + +diff --git a/dpdk/drivers/event/opdl/opdl_evdev.c b/dpdk/drivers/event/opdl/opdl_evdev.c +index 0cccaf7e97..fe0c0ede6f 100644 +--- a/dpdk/drivers/event/opdl/opdl_evdev.c ++++ b/dpdk/drivers/event/opdl/opdl_evdev.c +@@ -99,7 +99,7 @@ opdl_port_link(struct rte_eventdev *dev, + + if (unlikely(dev->data->dev_started)) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "Attempt to link queue (%u) to port %d while device started\n", ++ "Attempt to link queue (%u) to port %d while device started", + dev->data->dev_id, + queues[0], + p->id); +@@ -110,7 +110,7 @@ opdl_port_link(struct rte_eventdev *dev, + /* Max of 1 queue per port */ + if (num > 1) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "Attempt to link more than one queue (%u) to port %d requested\n", ++ "Attempt to link more than one queue (%u) to port %d requested", + dev->data->dev_id, + num, + p->id); +@@ -120,7 +120,7 @@ opdl_port_link(struct rte_eventdev *dev, + + if (!p->configured) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "port %d not configured, cannot link to %u\n", ++ "port %d not configured, cannot link to %u", + dev->data->dev_id, + p->id, + queues[0]); +@@ -130,7 +130,7 @@ opdl_port_link(struct rte_eventdev *dev, + + if (p->external_qid != OPDL_INVALID_QID) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "port %d already linked to queue %u, cannot link to %u\n", ++ "port %d already linked to queue %u, cannot link to %u", + dev->data->dev_id, + p->id, + p->external_qid, +@@ -157,7 +157,7 @@ opdl_port_unlink(struct rte_eventdev *dev, + + if (unlikely(dev->data->dev_started)) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "Attempt to unlink queue (%u) to port %d while device started\n", ++ "Attempt to unlink queue (%u) to port %d while device started", + dev->data->dev_id, + queues[0], + p->id); +@@ -188,7 +188,7 @@ opdl_port_setup(struct rte_eventdev *dev, + /* Check if port already configured */ + if (p->configured) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "Attempt to setup port %d which is already setup\n", ++ "Attempt to setup port %d which is already setup", + dev->data->dev_id, + p->id); + return -EDQUOT; +@@ -244,7 +244,7 @@ opdl_queue_setup(struct rte_eventdev *dev, + /* Extra sanity check, probably not needed */ + if (queue_id == OPDL_INVALID_QID) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "Invalid queue id %u requested\n", ++ "Invalid queue id %u requested", + dev->data->dev_id, + queue_id); + return -EINVAL; +@@ -252,7 +252,7 @@ opdl_queue_setup(struct rte_eventdev *dev, + + if (device->nb_q_md > device->max_queue_nb) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "Max number of queues %u exceeded by request %u\n", ++ "Max number of queues %u exceeded by request %u", + dev->data->dev_id, + device->max_queue_nb, + device->nb_q_md); +@@ -262,7 +262,7 @@ opdl_queue_setup(struct rte_eventdev *dev, + if (RTE_EVENT_QUEUE_CFG_ALL_TYPES + & conf->event_queue_cfg) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "QUEUE_CFG_ALL_TYPES not supported\n", ++ "QUEUE_CFG_ALL_TYPES not supported", + dev->data->dev_id); + return -ENOTSUP; + } else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK +@@ -281,7 +281,7 @@ opdl_queue_setup(struct rte_eventdev *dev, + break; + default: + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "Unknown queue type %d requested\n", ++ "Unknown queue type %d requested", + dev->data->dev_id, + conf->event_queue_cfg); + return -EINVAL; +@@ -292,7 +292,7 @@ opdl_queue_setup(struct rte_eventdev *dev, + for (i = 0; i < device->nb_q_md; i++) { + if (device->q_md[i].ext_id == queue_id) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "queue id %u already setup\n", ++ "queue id %u already setup", + dev->data->dev_id, + queue_id); + return -EINVAL; +@@ -352,7 +352,7 @@ opdl_dev_configure(const struct rte_eventdev *dev) - /* Update refcount of direct mbuf */ - md = rte_mbuf_from_indirect(m); -+ if (aura) -+ *aura = roc_npa_aura_handle_to_aura(md->pool->pool_id); - refcount = rte_mbuf_refcnt_update(md, -1); + if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { + PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " +- "DEQUEUE_TIMEOUT not supported\n", ++ "DEQUEUE_TIMEOUT not supported", + dev->data->dev_id); + return -ENOTSUP; + } +@@ -659,7 +659,7 @@ opdl_probe(struct rte_vdev_device *vdev) - priv_size = rte_pktmbuf_priv_size(mp); -@@ -126,18 +128,18 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m) + if (!kvlist) { + PMD_DRV_LOG(INFO, +- "Ignoring unsupported parameters when creating device '%s'\n", ++ "Ignoring unsupported parameters when creating device '%s'", + name); + } else { + int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG, +@@ -706,7 +706,7 @@ opdl_probe(struct rte_vdev_device *vdev) + + PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : " + "Success - creating eventdev device %s, numa_node:[%d], do_validation:[%s]" +- " , self_test:[%s]\n", ++ " , self_test:[%s]", + dev->data->dev_id, + name, + socket_id, +@@ -750,7 +750,7 @@ opdl_remove(struct rte_vdev_device *vdev) + if (name == NULL) + return -EINVAL; + +- PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s\n", name); ++ PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s", name); + + return rte_event_pmd_vdev_uninit(name); } +diff --git a/dpdk/drivers/event/opdl/opdl_test.c b/dpdk/drivers/event/opdl/opdl_test.c +index b69c4769dc..9b0c4db5ce 100644 +--- a/dpdk/drivers/event/opdl/opdl_test.c ++++ b/dpdk/drivers/event/opdl/opdl_test.c +@@ -101,7 +101,7 @@ init(struct test *t, int nb_queues, int nb_ports) - static __rte_always_inline uint64_t --cnxk_nix_prefree_seg(struct rte_mbuf *m) -+cnxk_nix_prefree_seg(struct rte_mbuf *m, uint64_t *aura) - { - if (likely(rte_mbuf_refcnt_read(m) == 1)) { - if (!RTE_MBUF_DIRECT(m)) -- return cnxk_pktmbuf_detach(m); -+ return cnxk_pktmbuf_detach(m, aura); + ret = rte_event_dev_configure(evdev, &config); + if (ret < 0) +- PMD_DRV_LOG(ERR, "%d: Error configuring device\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Error configuring device", __LINE__); + return ret; + }; - m->next = NULL; - m->nb_segs = 1; - return 0; - } else if (rte_mbuf_refcnt_update(m, -1) == 0) { - if (!RTE_MBUF_DIRECT(m)) -- return cnxk_pktmbuf_detach(m); -+ return cnxk_pktmbuf_detach(m, aura); +@@ -119,7 +119,7 @@ create_ports(struct test *t, int num_ports) - rte_mbuf_refcnt_set(m, 1); - m->next = NULL; -diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -index 5de2919047..c8f4848f92 100644 ---- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -+++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c -@@ -20,8 +20,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo) - devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; - devinfo->max_mac_addrs = dev->max_mac_entries; - devinfo->max_vfs = pci_dev->max_vfs; -- devinfo->max_mtu = devinfo->max_rx_pktlen - -- (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); -+ devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD; - devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD; + for (i = 0; i < num_ports; i++) { + if (rte_event_port_setup(evdev, i, &conf) < 0) { +- PMD_DRV_LOG(ERR, "Error setting up port %d\n", i); ++ PMD_DRV_LOG(ERR, "Error setting up port %d", i); + return -1; + } + t->port[i] = i; +@@ -158,7 +158,7 @@ create_queues_type(struct test *t, int num_qids, enum queue_type flags) + + for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) { + if (rte_event_queue_setup(evdev, i, &conf) < 0) { +- PMD_DRV_LOG(ERR, "%d: error creating qid %d\n ", ++ PMD_DRV_LOG(ERR, "%d: error creating qid %d ", + __LINE__, i); + return -1; + } +@@ -180,7 +180,7 @@ cleanup(struct test *t __rte_unused) + { + rte_event_dev_stop(evdev); + rte_event_dev_close(evdev); +- PMD_DRV_LOG(ERR, "clean up for test done\n"); ++ PMD_DRV_LOG(ERR, "clean up for test done"); + return 0; + }; - devinfo->rx_offload_capa = dev->rx_offload_capa; -@@ -448,6 +447,13 @@ cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) - roc_nix_npc_mac_addr_set(nix, dev->mac_addr); - goto exit; +@@ -202,7 +202,7 @@ ordered_basic(struct test *t) + if (init(t, 2, tx_port+1) < 0 || + create_ports(t, tx_port+1) < 0 || + create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) { +- PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); + return -1; + } + +@@ -226,7 +226,7 @@ ordered_basic(struct test *t) + err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL, + 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", ++ PMD_DRV_LOG(ERR, "%d: error mapping lb qid", + __LINE__); + cleanup(t); + return -1; +@@ -236,13 +236,13 @@ ordered_basic(struct test *t) + err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL, + 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: error mapping TX qid", __LINE__); + cleanup(t); + return -1; + } + + if (rte_event_dev_start(evdev) < 0) { +- PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Error with start call", __LINE__); + return -1; + } + /* Enqueue 3 packets to the rx port */ +@@ -250,7 +250,7 @@ ordered_basic(struct test *t) + struct rte_event ev; + mbufs[i] = rte_gen_arp(0, t->mbuf_pool); + if (!mbufs[i]) { +- PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__); + return -1; + } + +@@ -262,7 +262,7 @@ ordered_basic(struct test *t) + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n", ++ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u", + __LINE__, i, err); + return -1; + } +@@ -278,7 +278,7 @@ ordered_basic(struct test *t) + deq_pkts = rte_event_dequeue_burst(evdev, t->port[i], + &deq_ev[i], 1, 0); + if (deq_pkts != 1) { +- PMD_DRV_LOG(ERR, "%d: Failed to deq\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Failed to deq", __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; + } +@@ -286,7 +286,7 @@ ordered_basic(struct test *t) + + if (seq != (i-1)) { + PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , " +- "port number is %u\n", seq, i); ++ "port number is %u", seq, i); + return -1; } -+ -+ if (eth_dev->data->promiscuous) { -+ rc = roc_nix_mac_promisc_mode_enable(nix, true); -+ if (rc) -+ plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc, -+ roc_error_msg_get(rc)); -+ } } +@@ -298,7 +298,7 @@ ordered_basic(struct test *t) + deq_ev[i].queue_id = t->qid[1]; + err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: Failed to enqueue\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Failed to enqueue", __LINE__); + return -1; + } + } +@@ -309,7 +309,7 @@ ordered_basic(struct test *t) - /* Update mac address to cnxk ethernet device */ -@@ -544,8 +550,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) - struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); - struct rte_eth_dev_data *data = eth_dev->data; - struct roc_nix *nix = &dev->nix; -+ struct cnxk_eth_rxq_sp *rxq_sp; -+ uint32_t buffsz = 0; - int rc = -EINVAL; -- uint32_t buffsz; + /* Check to see if we've got all 3 packets */ + if (deq_pkts != 3) { +- PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n", ++ PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d", + __LINE__, deq_pkts, tx_port); + rte_event_dev_dump(evdev, stdout); + return 1; +@@ -339,7 +339,7 @@ atomic_basic(struct test *t) + if (init(t, 2, tx_port+1) < 0 || + create_ports(t, tx_port+1) < 0 || + create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) { +- PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); + return -1; + } - frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en; +@@ -364,7 +364,7 @@ atomic_basic(struct test *t) + err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL, + 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", ++ PMD_DRV_LOG(ERR, "%d: error mapping lb qid", + __LINE__); + cleanup(t); + return -1; +@@ -374,13 +374,13 @@ atomic_basic(struct test *t) + err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL, + 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: error mapping TX qid", __LINE__); + cleanup(t); + return -1; + } -@@ -561,8 +568,24 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) - goto exit; + if (rte_event_dev_start(evdev) < 0) { +- PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Error with start call", __LINE__); + return -1; } -- buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; -- old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD; -+ if (!eth_dev->data->nb_rx_queues) -+ goto skip_buffsz_check; -+ -+ /* Perform buff size check */ -+ if (data->min_rx_buf_size) { -+ buffsz = data->min_rx_buf_size; -+ } else if (eth_dev->data->rx_queues && eth_dev->data->rx_queues[0]) { -+ rxq_sp = cnxk_eth_rxq_to_sp(data->rx_queues[0]); -+ -+ if (rxq_sp->qconf.mp) -+ buffsz = rte_pktmbuf_data_room_size(rxq_sp->qconf.mp); -+ } -+ -+ /* Skip validation if RQ's are not yet setup */ -+ if (!buffsz) -+ goto skip_buffsz_check; -+ -+ buffsz -= RTE_PKTMBUF_HEADROOM; +@@ -389,7 +389,7 @@ atomic_basic(struct test *t) + struct rte_event ev; + mbufs[i] = rte_gen_arp(0, t->mbuf_pool); + if (!mbufs[i]) { +- PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__); + return -1; + } - /* Refuse MTU that requires the support of scattered packets - * when this feature has not been enabled before. -@@ -580,6 +603,8 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) - goto exit; +@@ -402,7 +402,7 @@ atomic_basic(struct test *t) + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n", ++ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u", + __LINE__, i, err); + return -1; + } +@@ -419,7 +419,7 @@ atomic_basic(struct test *t) + + if (t->port[i] != 2) { + if (deq_pkts != 0) { +- PMD_DRV_LOG(ERR, "%d: deq none zero !\n", ++ PMD_DRV_LOG(ERR, "%d: deq none zero !", + __LINE__); + rte_event_dev_dump(evdev, stdout); + return -1; +@@ -427,7 +427,7 @@ atomic_basic(struct test *t) + } else { + + if (deq_pkts != 3) { +- PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !\n", ++ PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !", + __LINE__, deq_pkts); + rte_event_dev_dump(evdev, stdout); + return -1; +@@ -444,7 +444,7 @@ atomic_basic(struct test *t) + + if (err != 3) { + PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, " +- "retval = %u\n", ++ "retval = %u", + t->port[i], 3, err); + return -1; + } +@@ -460,7 +460,7 @@ atomic_basic(struct test *t) + + /* Check to see if we've got all 3 packets */ + if (deq_pkts != 3) { +- PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n", ++ PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d", + __LINE__, deq_pkts, tx_port); + rte_event_dev_dump(evdev, stdout); + return 1; +@@ -568,7 +568,7 @@ single_link_w_stats(struct test *t) + create_ports(t, 3) < 0 || /* 0,1,2 */ + create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 || + create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) { +- PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); + return -1; } -+skip_buffsz_check: -+ old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD; - /* if new MTU was smaller than old one, then flush all SQs before MTU change */ - if (old_frame_size > frame_size) { - if (data->dev_started) { -@@ -591,19 +616,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) +@@ -587,7 +587,7 @@ single_link_w_stats(struct test *t) + err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, + 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n", ++ PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]", + __LINE__, + t->port[1], + t->qid[0]); +@@ -598,7 +598,7 @@ single_link_w_stats(struct test *t) + err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL, + 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n", ++ PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]", + __LINE__, + t->port[2], + t->qid[1]); +@@ -607,7 +607,7 @@ single_link_w_stats(struct test *t) + } + + if (rte_event_dev_start(evdev) != 0) { +- PMD_DRV_LOG(ERR, "%d: failed to start device\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: failed to start device", __LINE__); + cleanup(t); + return -1; + } +@@ -619,7 +619,7 @@ single_link_w_stats(struct test *t) + struct rte_event ev; + mbufs[i] = rte_gen_arp(0, t->mbuf_pool); + if (!mbufs[i]) { +- PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__); + return -1; + } - frame_size -= RTE_ETHER_CRC_LEN; +@@ -631,7 +631,7 @@ single_link_w_stats(struct test *t) + /* generate pkt and enqueue */ + err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n", ++ PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u", + __LINE__, + t->port[rx_port], + err); +@@ -647,7 +647,7 @@ single_link_w_stats(struct test *t) + deq_ev, 3, 0); + + if (deq_pkts != 3) { +- PMD_DRV_LOG(ERR, "%d: deq not 3 !\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: deq not 3 !", __LINE__); + cleanup(t); + return -1; + } +@@ -662,7 +662,7 @@ single_link_w_stats(struct test *t) + NEW_NUM_PACKETS); -- /* Update mtu on Tx */ -- rc = roc_nix_mac_mtu_set(nix, frame_size); -- if (rc) { -- plt_err("Failed to set MTU, rc=%d", rc); -- goto exit; -- } -- -- /* Sync same frame size on Rx */ -+ /* Set frame size on Rx */ - rc = roc_nix_mac_max_rx_len_set(nix, frame_size); - if (rc) { -- /* Rollback to older mtu */ -- roc_nix_mac_mtu_set(nix, -- old_frame_size - RTE_ETHER_CRC_LEN); - plt_err("Failed to max Rx frame length, rc=%d", rc); - goto exit; + if (deq_pkts != 2) { +- PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!\n", __LINE__, deq_pkts); ++ PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!", __LINE__, deq_pkts); + cleanup(t); + return -1; } -diff --git a/dpdk/drivers/net/cnxk/cnxk_flow.c b/dpdk/drivers/net/cnxk/cnxk_flow.c -index 08ab75e2bb..be0330fa04 100644 ---- a/dpdk/drivers/net/cnxk/cnxk_flow.c -+++ b/dpdk/drivers/net/cnxk/cnxk_flow.c -@@ -102,15 +102,19 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev, - } +@@ -676,7 +676,7 @@ single_link_w_stats(struct test *t) - static void --npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, -- const struct roc_npc_action *rss_action, -- uint32_t *flowkey_cfg) -+npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, const struct roc_npc_action *rss_action, -+ uint32_t *flowkey_cfg, uint64_t default_rss_types) - { - const struct roc_npc_action_rss *rss; -+ uint64_t rss_types; + /* Check to see if we've got all 2 packets */ + if (deq_pkts != 2) { +- PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d\n", ++ PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d", + __LINE__, deq_pkts, tx_port); + cleanup(t); + return -1; +@@ -706,7 +706,7 @@ single_link(struct test *t) + create_ports(t, 3) < 0 || /* 0,1,2 */ + create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 || + create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) { +- PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); + return -1; + } - rss = (const struct roc_npc_action_rss *)rss_action->conf; -+ rss_types = rss->types; -+ /* If no RSS types are specified, use default one */ -+ if (rss_types == 0) -+ rss_types = default_rss_types; +@@ -725,7 +725,7 @@ single_link(struct test *t) + err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, + 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: error mapping lb qid", __LINE__); + cleanup(t); + return -1; + } +@@ -733,14 +733,14 @@ single_link(struct test *t) + err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, + 1); + if (err != 1) { +- PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: error mapping lb qid", __LINE__); + cleanup(t); + return -1; + } -- *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss->types, rss->level); -+ *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss_types, rss->level); - } + if (rte_event_dev_start(evdev) == 0) { + PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 " +- "SINGLE_LINK PORT\n", __LINE__); ++ "SINGLE_LINK PORT", __LINE__); + cleanup(t); + return -1; + } +@@ -789,7 +789,7 @@ qid_basic(struct test *t) + if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 || + create_ports(t, NUM_QUEUES+1) < 0 || + create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) { +- PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__); ++ PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); + return -1; + } - static int -@@ -204,7 +208,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, - goto err_exit; - in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; - in_actions[i].conf = actions->conf; -- npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg); -+ npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, -+ eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); - break; +@@ -805,7 +805,7 @@ qid_basic(struct test *t) - case RTE_FLOW_ACTION_TYPE_SECURITY: -@@ -503,6 +508,9 @@ cnxk_flow_get_aged_flows(struct rte_eth_dev *eth_dev, void **context, + if (nb_linked != 1) { - flow_age = &roc_npc->flow_age; +- PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u\n", ++ PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u", + __FILE__, + __LINE__, + i + 1, +@@ -826,7 +826,7 @@ qid_basic(struct test *t) + &t_qid, + NULL, + 1) > 0) { +- PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail", + __FILE__, + __LINE__); + err = -1; +@@ -841,7 +841,7 @@ qid_basic(struct test *t) + BATCH_SIZE, + 0); + if (test_num_events != 0) { +- PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device", + __FILE__, + __LINE__, + p_id); +@@ -855,7 +855,7 @@ qid_basic(struct test *t) + ev, + BATCH_SIZE); + if (test_num_events != 0) { +- PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device", + __FILE__, + __LINE__, + p_id); +@@ -868,7 +868,7 @@ qid_basic(struct test *t) + /* Start the device */ + if (!err) { + if (rte_event_dev_start(evdev) < 0) { +- PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Error with start call", + __FILE__, + __LINE__); + err = -1; +@@ -884,7 +884,7 @@ qid_basic(struct test *t) + &t_qid, + NULL, + 1) > 0) { +- PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail", + __FILE__, + __LINE__); + err = -1; +@@ -904,7 +904,7 @@ qid_basic(struct test *t) + ev, + BATCH_SIZE); + if (num_events != BATCH_SIZE) { +- PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets", + __FILE__, + __LINE__); + err = -1; +@@ -921,7 +921,7 @@ qid_basic(struct test *t) + 0); + + if (num_events != BATCH_SIZE) { +- PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u", + __FILE__, + __LINE__, + p_id); +@@ -930,7 +930,7 @@ qid_basic(struct test *t) + } -+ if (!flow_age->age_flow_refcnt) -+ return 0; -+ - do { - sn = plt_seqcount_read_begin(&flow_age->seq_cnt); + if (ev[0].queue_id != q_id) { +- PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]", + __FILE__, + __LINE__, + p_id, +@@ -949,7 +949,7 @@ qid_basic(struct test *t) + ev, + BATCH_SIZE); + if (num_events != BATCH_SIZE) { +- PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u", + __FILE__, + __LINE__, + p_id, +@@ -967,7 +967,7 @@ qid_basic(struct test *t) + BATCH_SIZE, + 0); + if (num_events != BATCH_SIZE) { +- PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u\n", ++ PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u", + __FILE__, + __LINE__, + p_id); +@@ -993,17 +993,17 @@ opdl_selftest(void) + evdev = rte_event_dev_get_dev_id(eventdev_name); + + if (evdev < 0) { +- PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.\n", ++ PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.", + __LINE__, eventdev_name); + /* turn on stats by default */ + if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) { +- PMD_DRV_LOG(ERR, "Error creating eventdev\n"); ++ PMD_DRV_LOG(ERR, "Error creating eventdev"); + free(t); + return -1; + } + evdev = rte_event_dev_get_dev_id(eventdev_name); + if (evdev < 0) { +- PMD_DRV_LOG(ERR, "Error finding newly created eventdev\n"); ++ PMD_DRV_LOG(ERR, "Error finding newly created eventdev"); + free(t); + return -1; + } +@@ -1019,27 +1019,27 @@ opdl_selftest(void) + 512, /* use very small mbufs */ + rte_socket_id()); + if (!eventdev_func_mempool) { +- PMD_DRV_LOG(ERR, "ERROR creating mempool\n"); ++ PMD_DRV_LOG(ERR, "ERROR creating mempool"); + free(t); + return -1; + } + } + t->mbuf_pool = eventdev_func_mempool; -diff --git a/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c -index 8a4e1419b4..f6bd1f7599 100644 ---- a/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c -+++ b/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c -@@ -95,7 +95,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev, +- PMD_DRV_LOG(ERR, "*** Running Ordered Basic test...\n"); ++ PMD_DRV_LOG(ERR, "*** Running Ordered Basic test..."); + ret = ordered_basic(t); - ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], - rim->rules, rim->rule_num, true); -- if (ret < 0) { -+ if (ret != 0) { - rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "cpfl filter create flow fail"); - rte_free(rim); -diff --git a/dpdk/drivers/net/cpfl/cpfl_flow_parser.c b/dpdk/drivers/net/cpfl/cpfl_flow_parser.c -index a8f0488f21..40569ddc6f 100644 ---- a/dpdk/drivers/net/cpfl/cpfl_flow_parser.c -+++ b/dpdk/drivers/net/cpfl/cpfl_flow_parser.c -@@ -1696,7 +1696,7 @@ cpfl_parse_check_prog_action(struct cpfl_flow_js_mr_key_action *key_act, - bool check_name; +- PMD_DRV_LOG(ERR, "*** Running Atomic Basic test...\n"); ++ PMD_DRV_LOG(ERR, "*** Running Atomic Basic test..."); + ret = atomic_basic(t); - check_name = key_act->prog.has_name ? strcmp(prog->name, key_act->prog.name) == 0 -- : atol(prog->name) == key_act->prog.id; -+ : (uint32_t)atol(prog->name) == key_act->prog.id; - if (!check_name) { - PMD_DRV_LOG(ERR, "Not support this prog type: %s.", prog->name); - return -EINVAL; -diff --git a/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c b/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c -index 0e710a007b..be34da9fa2 100644 ---- a/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c -+++ b/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c -@@ -92,6 +92,14 @@ cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_m - /* TODO - process rx controlq message */ - for (i = 0; i < num_q_msg; i++) { -+ ret = q_msg[i].status; -+ if (ret != CPFL_CFG_PKT_ERR_OK && -+ q_msg[i].opcode != cpfl_ctlq_sem_query_del_rule_hash_addr) { -+ PMD_INIT_LOG(ERR, "Failed to process rx_ctrlq msg: %s", -+ cpfl_cfg_pkt_errormsg[ret]); -+ return ret; -+ } -+ - if (q_msg[i].data_len > 0) - dma = q_msg[i].ctx.indirect.payload; - else -diff --git a/dpdk/drivers/net/cpfl/cpfl_rules.h b/dpdk/drivers/net/cpfl/cpfl_rules.h -index d23eae8e91..10569b1fdc 100644 ---- a/dpdk/drivers/net/cpfl/cpfl_rules.h -+++ b/dpdk/drivers/net/cpfl/cpfl_rules.h -@@ -62,6 +62,17 @@ enum cpfl_cfg_pkt_error_code { - CPFL_CFG_PKT_ERR_EMAXCOL = 9 /* Max Hash Collision */ - }; +- PMD_DRV_LOG(ERR, "*** Running QID Basic test...\n"); ++ PMD_DRV_LOG(ERR, "*** Running QID Basic test..."); + ret = qid_basic(t); -+static const char * const cpfl_cfg_pkt_errormsg[] = { -+ [CPFL_CFG_PKT_ERR_ESRCH] = "Bad opcode", -+ [CPFL_CFG_PKT_ERR_EEXIST] = "The rule conflicts with already existed one", -+ [CPFL_CFG_PKT_ERR_ENOSPC] = "No space left in the table", -+ [CPFL_CFG_PKT_ERR_ERANGE] = "Parameter out of range", -+ [CPFL_CFG_PKT_ERR_ESBCOMP] = "Completion error", -+ [CPFL_CFG_PKT_ERR_ENOPIN] = "Entry cannot be pinned in cache", -+ [CPFL_CFG_PKT_ERR_ENOTFND] = "Entry does not exist", -+ [CPFL_CFG_PKT_ERR_EMAXCOL] = "Maximum Hash Collisions reached", -+}; -+ - /* macros for creating context for rule descriptor */ - #define MEV_RULE_VSI_ID_S 0 - #define MEV_RULE_VSI_ID_M \ -diff --git a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c -index 8cc3d9f257..781f48cfac 100644 ---- a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c -+++ b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c -@@ -211,9 +211,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, - unsigned int i, work_done, budget = 32; - struct link_config *lc = &pi->link_cfg; - struct adapter *adapter = pi->adapter; -- struct rte_eth_link new_link = { 0 }; - u8 old_link = pi->link_cfg.link_ok; - struct sge *s = &adapter->sge; -+ struct rte_eth_link new_link; - - for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { - if (!s->fw_evtq.desc) -@@ -232,6 +232,7 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, - rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS); - } - -+ memset(&new_link, 0, sizeof(new_link)); - new_link.link_status = cxgbe_force_linkup(adapter) ? - RTE_ETH_LINK_UP : pi->link_cfg.link_ok; - new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0; -diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c -index ef4c06db6a..bcb28f33ee 100644 ---- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c -+++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -165,9 +166,15 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) - uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN - + VLAN_TAG_SIZE; - uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; -+ struct fman_if *fif = dev->process_private; +- PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test...\n"); ++ PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test..."); + ret = single_link(t); - PMD_INIT_FUNC_TRACE(); +- PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test...\n"); ++ PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test..."); + ret = single_link_w_stats(t); -+ if (fif->is_shared_mac) { -+ DPAA_PMD_ERR("Cannot configure mtu from DPDK in VSP mode."); -+ return -ENOTSUP; -+ } -+ /* - * Refuse mtu that requires the support of scattered packets - * when this feature has not been enabled before. -@@ -206,7 +213,8 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) - struct rte_intr_handle *intr_handle; - uint32_t max_rx_pktlen; - int speed, duplex; -- int ret, rx_status; -+ int ret, rx_status, socket_fd; -+ struct ifreq ifr; - - PMD_INIT_FUNC_TRACE(); +diff --git a/dpdk/drivers/event/sw/iq_chunk.h b/dpdk/drivers/event/sw/iq_chunk.h +index 31d013eab7..7820815c38 100644 +--- a/dpdk/drivers/event/sw/iq_chunk.h ++++ b/dpdk/drivers/event/sw/iq_chunk.h +@@ -9,8 +9,6 @@ + #include + #include -@@ -222,6 +230,26 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) - dpaa_intf->name); - return -EHOSTDOWN; - } -+ -+ socket_fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); -+ if (socket_fd == -1) { -+ DPAA_PMD_ERR("Cannot open IF socket"); -+ return -errno; -+ } -+ -+ strncpy(ifr.ifr_name, dpaa_intf->name, IFNAMSIZ - 1); -+ -+ if (ioctl(socket_fd, SIOCGIFMTU, &ifr) < 0) { -+ DPAA_PMD_ERR("Cannot get interface mtu"); -+ close(socket_fd); -+ return -errno; -+ } -+ -+ close(socket_fd); -+ DPAA_PMD_INFO("Using kernel configured mtu size(%u)", -+ ifr.ifr_mtu); -+ -+ eth_conf->rxmode.mtu = ifr.ifr_mtu; +-#define IQ_ROB_NAMESIZE 12 +- + struct sw_queue_chunk { + struct rte_event events[SW_EVS_PER_Q_CHUNK]; + struct sw_queue_chunk *next; +diff --git a/dpdk/drivers/event/sw/sw_evdev.c b/dpdk/drivers/event/sw/sw_evdev.c +index 55e7735cb0..babe77a20f 100644 +--- a/dpdk/drivers/event/sw/sw_evdev.c ++++ b/dpdk/drivers/event/sw/sw_evdev.c +@@ -173,7 +173,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, + dev->data->socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ); + if (p->rx_worker_ring == NULL) { +- SW_LOG_ERR("Error creating RX worker ring for port %d\n", ++ SW_LOG_ERR("Error creating RX worker ring for port %d", + port_id); + return -1; } - - /* Rx offloads which are enabled by default */ -@@ -249,7 +277,8 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) - max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; +@@ -193,7 +193,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, + RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ); + if (p->cq_worker_ring == NULL) { + rte_event_ring_free(p->rx_worker_ring); +- SW_LOG_ERR("Error creating CQ worker ring for port %d\n", ++ SW_LOG_ERR("Error creating CQ worker ring for port %d", + port_id); + return -1; } +@@ -228,9 +228,7 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, + const struct rte_event_queue_conf *queue_conf) + { + unsigned int i; +- int dev_id = sw->data->dev_id; + int socket_id = sw->data->socket_id; +- char buf[IQ_ROB_NAMESIZE]; + struct sw_qid *qid = &sw->qids[idx]; -- fman_if_set_maxfrm(dev->process_private, max_rx_pktlen); -+ if (!fif->is_shared_mac) -+ fman_if_set_maxfrm(dev->process_private, max_rx_pktlen); - - if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { - DPAA_PMD_DEBUG("enabling scatter mode"); -@@ -363,7 +392,8 @@ dpaa_supported_ptypes_get(struct rte_eth_dev *dev) - RTE_PTYPE_L4_TCP, - RTE_PTYPE_L4_UDP, - RTE_PTYPE_L4_SCTP, -- RTE_PTYPE_TUNNEL_ESP -+ RTE_PTYPE_TUNNEL_ESP, -+ RTE_PTYPE_UNKNOWN - }; - - PMD_INIT_FUNC_TRACE(); -diff --git a/dpdk/drivers/net/e1000/base/e1000_base.c b/dpdk/drivers/net/e1000/base/e1000_base.c -index ab73e1e59e..3ec32e7240 100644 ---- a/dpdk/drivers/net/e1000/base/e1000_base.c -+++ b/dpdk/drivers/net/e1000/base/e1000_base.c -@@ -107,7 +107,7 @@ void e1000_power_down_phy_copper_base(struct e1000_hw *hw) - return; + /* Initialize the FID structures to no pinning (-1), and zero packets */ +@@ -255,17 +253,16 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, - /* If the management interface is not enabled, then power down */ -- if (phy->ops.check_reset_block(hw)) -+ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) - e1000_power_down_phy_copper(hw); - } + if (!window_size) { + SW_LOG_DBG( +- "invalid reorder_window_size for ordered queue\n" ++ "invalid reorder_window_size for ordered queue" + ); + goto cleanup; + } -diff --git a/dpdk/drivers/net/ena/base/ena_com.c b/dpdk/drivers/net/ena/base/ena_com.c -index 6953a1fa33..2f438597e6 100644 ---- a/dpdk/drivers/net/ena/base/ena_com.c -+++ b/dpdk/drivers/net/ena/base/ena_com.c -@@ -34,6 +34,8 @@ +- snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i); +- qid->reorder_buffer = rte_zmalloc_socket(buf, ++ qid->reorder_buffer = rte_zmalloc_socket(NULL, + window_size * sizeof(qid->reorder_buffer[0]), + 0, socket_id); + if (!qid->reorder_buffer) { +- SW_LOG_DBG("reorder_buffer malloc failed\n"); ++ SW_LOG_DBG("reorder_buffer malloc failed"); + goto cleanup; + } - #define ENA_REGS_ADMIN_INTR_MASK 1 +@@ -337,7 +334,7 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, + type = SW_SCHED_TYPE_DIRECT; + } else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES + & conf->event_queue_cfg) { +- SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n"); ++ SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported"); + return -ENOTSUP; + } -+#define ENA_MAX_BACKOFF_DELAY_EXP 16U -+ - #define ENA_MIN_ADMIN_POLL_US 100 +@@ -772,7 +769,7 @@ sw_start(struct rte_eventdev *dev) - #define ENA_MAX_ADMIN_POLL_US 5000 -@@ -177,6 +179,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, - static void comp_ctxt_release(struct ena_com_admin_queue *queue, - struct ena_comp_ctx *comp_ctx) - { -+ comp_ctx->user_cqe = NULL; - comp_ctx->occupied = false; - ATOMIC32_DEC(&queue->outstanding_cmds); - } -@@ -470,6 +473,9 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a - return; + /* check a service core is mapped to this service */ + if (!rte_service_runstate_get(sw->service_id)) { +- SW_LOG_ERR("Warning: No Service core enabled on service %s\n", ++ SW_LOG_ERR("Warning: No Service core enabled on service %s", + sw->service_name); + return -ENOENT; } +@@ -780,7 +777,7 @@ sw_start(struct rte_eventdev *dev) + /* check all ports are set up */ + for (i = 0; i < sw->port_count; i++) + if (sw->ports[i].rx_worker_ring == NULL) { +- SW_LOG_ERR("Port %d not configured\n", i); ++ SW_LOG_ERR("Port %d not configured", i); + return -ESTALE; + } -+ if (!comp_ctx->occupied) -+ return; -+ - comp_ctx->status = ENA_CMD_COMPLETED; - comp_ctx->comp_status = cqe->acq_common_descriptor.status; - -@@ -545,8 +551,9 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, +@@ -788,7 +785,7 @@ sw_start(struct rte_eventdev *dev) + for (i = 0; i < sw->qid_count; i++) + if (!sw->qids[i].initialized || + sw->qids[i].cq_num_mapped_cqs == 0) { +- SW_LOG_ERR("Queue %d not configured\n", i); ++ SW_LOG_ERR("Queue %d not configured", i); + return -ENOLINK; + } - static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) - { -+ exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp); - delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us); -- delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); -+ delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp)); - ENA_USLEEP(delay_us); - } +@@ -1000,7 +997,7 @@ sw_probe(struct rte_vdev_device *vdev) -@@ -3134,16 +3141,18 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, - int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev) - { - struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics; -+ customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE; -+ customer_metrics->buffer_virt_addr = NULL; + if (!kvlist) { + SW_LOG_INFO( +- "Ignoring unsupported parameters when creating device '%s'\n", ++ "Ignoring unsupported parameters when creating device '%s'", + name); + } else { + int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG, +@@ -1070,7 +1067,7 @@ sw_probe(struct rte_vdev_device *vdev) + SW_LOG_INFO( + "Creating eventdev sw device %s, numa_node=%d, " + "sched_quanta=%d, credit_quanta=%d " +- "min_burst=%d, deq_burst=%d, refill_once=%d\n", ++ "min_burst=%d, deq_burst=%d, refill_once=%d", + name, socket_id, sched_quanta, credit_quanta, + min_burst_size, deq_burst_size, refill_once); + +@@ -1134,7 +1131,7 @@ sw_remove(struct rte_vdev_device *vdev) + if (name == NULL) + return -EINVAL; - ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, - customer_metrics->buffer_len, - customer_metrics->buffer_virt_addr, - customer_metrics->buffer_dma_addr, - customer_metrics->buffer_dma_handle); -- if (unlikely(customer_metrics->buffer_virt_addr == NULL)) -+ if (unlikely(customer_metrics->buffer_virt_addr == NULL)) { -+ customer_metrics->buffer_len = 0; - return ENA_COM_NO_MEM; -- -- customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE; -+ } +- SW_LOG_INFO("Closing eventdev sw device %s\n", name); ++ SW_LOG_INFO("Closing eventdev sw device %s", name); - return 0; + return rte_event_pmd_vdev_uninit(name); } -diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c -index dc846d2e84..f3962aa76e 100644 ---- a/dpdk/drivers/net/ena/ena_ethdev.c -+++ b/dpdk/drivers/net/ena/ena_ethdev.c -@@ -37,10 +37,10 @@ - #define ENA_MIN_RING_DESC 128 +diff --git a/dpdk/drivers/event/sw/sw_evdev_xstats.c b/dpdk/drivers/event/sw/sw_evdev_xstats.c +index fbac8f3ab5..076b982ab8 100644 +--- a/dpdk/drivers/event/sw/sw_evdev_xstats.c ++++ b/dpdk/drivers/event/sw/sw_evdev_xstats.c +@@ -419,7 +419,7 @@ sw_xstats_get_names(const struct rte_eventdev *dev, + start_offset = sw->xstats_offset_for_qid[queue_port_id]; + break; + default: +- SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n"); ++ SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()"); + return -EINVAL; + }; - /* -- * We should try to keep ENA_CLEANUP_BUF_SIZE lower than -+ * We should try to keep ENA_CLEANUP_BUF_THRESH lower than - * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache. - */ --#define ENA_CLEANUP_BUF_SIZE 256 -+#define ENA_CLEANUP_BUF_THRESH 256 +@@ -470,7 +470,7 @@ sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode, + xstats_mode_count = sw->xstats_count_per_qid[queue_port_id]; + break; + default: +- SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n"); ++ SW_LOG_ERR("Invalid mode received in sw_xstats_get()"); + goto invalid_value; + }; - #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) +diff --git a/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c +index 84371d5d1a..b0c6d153e4 100644 +--- a/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c ++++ b/dpdk/drivers/mempool/dpaa2/dpaa2_hw_mempool.c +@@ -67,7 +67,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_MEMPOOL_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + goto err1; + } +@@ -198,7 +198,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, + ret = dpaa2_affine_qbman_swp(); + if (ret != 0) { + DPAA2_MEMPOOL_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return; + } +@@ -342,7 +342,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool, + ret = dpaa2_affine_qbman_swp(); + if (ret != 0) { + DPAA2_MEMPOOL_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return ret; + } +@@ -457,7 +457,7 @@ dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs, + msl = rte_mem_virt2memseg_list(vaddr); + + if (!msl) { +- DPAA2_MEMPOOL_DEBUG("Memsegment is External.\n"); ++ DPAA2_MEMPOOL_DEBUG("Memsegment is External."); + rte_fslmc_vfio_mem_dmamap((size_t)vaddr, + (size_t)paddr, (size_t)len); + } +diff --git a/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c b/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c +index 1513c632c6..966fee8bfe 100644 +--- a/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c ++++ b/dpdk/drivers/mempool/octeontx/octeontx_fpavf.c +@@ -134,7 +134,7 @@ octeontx_fpa_gpool_alloc(unsigned int object_size) + + if (res->sz128 == 0) { + res->sz128 = sz128; +- fpavf_log_dbg("gpool %d blk_sz %d\n", res->vf_id, ++ fpavf_log_dbg("gpool %d blk_sz %d", res->vf_id, + sz128); + + return res->vf_id; +@@ -273,7 +273,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, + goto err; + } -@@ -648,18 +648,13 @@ static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, - packet_type |= RTE_PTYPE_L3_IPV6; +- fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n", ++ fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64, + fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg, + cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg); + +@@ -351,8 +351,7 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index) + sizeof(struct octeontx_mbox_fpa_cfg), + &resp, sizeof(resp)); + if (ret < 0) { +- fpavf_log_err("Could not attach fpa "); +- fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n", ++ fpavf_log_err("Could not attach fpa aura %d to pool %d. Err=%d. FuncErr=%d", + FPA_AURA_IDX(gpool_index), gpool_index, ret, + hdr.res_code); + ret = -EACCES; +@@ -380,7 +379,7 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index) + hdr.vfid = gpool_index; + ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); + if (ret < 0) { +- fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n", ++ fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d", + FPA_AURA_IDX(gpool_index), ret, + hdr.res_code); + ret = -EINVAL; +@@ -428,8 +427,7 @@ octeontx_fpapf_start_count(uint16_t gpool_index) + hdr.vfid = gpool_index; + ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0); + if (ret < 0) { +- fpavf_log_err("Could not start buffer counting for "); +- fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n", ++ fpavf_log_err("Could not start buffer counting for FPA pool %d. Err=%d. FuncErr=%d", + gpool_index, ret, hdr.res_code); + ret = -EINVAL; + goto err; +@@ -636,7 +634,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) + cnt = fpavf_read64((void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT(gaura))); + if (cnt) { +- fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt); ++ fpavf_log_dbg("buffer exist in pool cnt %" PRId64, cnt); + return -EBUSY; + } + +@@ -664,7 +662,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) + (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura))); + + if (node == NULL) { +- fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n", ++ fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf", + gaura, avail); + break; + } +@@ -684,7 +682,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) + curr = curr[0]) { + if (curr == curr[0] || + ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) { +- fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n", ++ fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)", + gpool, curr, curr[0]); + } } +@@ -705,7 +703,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) -- if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { -+ if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag || -+ !(packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP))) { - ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; + ret = octeontx_fpapf_aura_detach(gpool); + if (ret) { +- fpavf_log_err("Failed to detach gaura %u. error code=%d\n", ++ fpavf_log_err("Failed to detach gaura %u. error code=%d", + gpool, ret); + } + +@@ -757,7 +755,7 @@ octeontx_fpavf_identify(void *bar0) + stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 + + FPA_VF_VHPOOL_THRESHOLD(0))); + if (vf_idx >= FPA_VF_MAX) { +- fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id); ++ fpavf_log_err("vf_id(%d) greater than max vf (32)", vf_id); + return -E2BIG; + } + +diff --git a/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c b/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c +index f4de1c8412..631e521b58 100644 +--- a/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c ++++ b/dpdk/drivers/mempool/octeontx/rte_mempool_octeontx.c +@@ -27,11 +27,11 @@ octeontx_fpavf_alloc(struct rte_mempool *mp) + goto _end; + + if ((uint32_t)rc != object_size) +- fpavf_log_err("buffer size mismatch: %d instead of %u\n", ++ fpavf_log_err("buffer size mismatch: %d instead of %u", + rc, object_size); + +- fpavf_log_info("Pool created %p with .. ", (void *)pool); +- fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count); ++ fpavf_log_info("Pool created %p with .. obj_sz %d, cnt %d", ++ (void *)pool, object_size, memseg_count); + + /* assign pool handle to mempool */ + mp->pool_id = (uint64_t)pool; +diff --git a/dpdk/drivers/meson.build b/dpdk/drivers/meson.build +index 5ba534049a..f2be71bc05 100644 +--- a/dpdk/drivers/meson.build ++++ b/dpdk/drivers/meson.build +@@ -93,7 +93,7 @@ foreach subpath:subdirs + if skip_class + drv_path = join_paths(class, '*') + dpdk_drvs_disabled += drv_path +- set_variable(drv_path.underscorify() + '_disable_reason', reason) ++ set_variable('drv_' + drv_path.underscorify() + '_disable_reason', reason) + continue + endif + endif +@@ -199,7 +199,7 @@ foreach subpath:subdirs + # component disable printout in those cases + if reason != '' + dpdk_drvs_disabled += drv_path +- set_variable(drv_path.underscorify() + '_disable_reason', reason) ++ set_variable('drv_' + drv_path.underscorify() + '_disable_reason', reason) + endif + continue + endif +diff --git a/dpdk/drivers/ml/cnxk/cn10k_ml_dev.c b/dpdk/drivers/ml/cnxk/cn10k_ml_dev.c +index 41f3b7a95d..3c328d9d0e 100644 +--- a/dpdk/drivers/ml/cnxk/cn10k_ml_dev.c ++++ b/dpdk/drivers/ml/cnxk/cn10k_ml_dev.c +@@ -108,14 +108,14 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10 + + kvlist = rte_kvargs_parse(devargs->args, valid_args); + if (kvlist == NULL) { +- plt_err("Error parsing devargs\n"); ++ plt_err("Error parsing devargs"); + return -EINVAL; + } + + if (rte_kvargs_count(kvlist, CN10K_ML_FW_PATH) == 1) { + ret = rte_kvargs_process(kvlist, CN10K_ML_FW_PATH, &parse_string_arg, &fw_path); + if (ret < 0) { +- plt_err("Error processing arguments, key = %s\n", CN10K_ML_FW_PATH); ++ plt_err("Error processing arguments, key = %s", CN10K_ML_FW_PATH); + ret = -EINVAL; + goto exit; + } +@@ -126,7 +126,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10 + ret = rte_kvargs_process(kvlist, CN10K_ML_FW_ENABLE_DPE_WARNINGS, + &parse_integer_arg, &cn10k_mldev->fw.enable_dpe_warnings); + if (ret < 0) { +- plt_err("Error processing arguments, key = %s\n", ++ plt_err("Error processing arguments, key = %s", + CN10K_ML_FW_ENABLE_DPE_WARNINGS); + ret = -EINVAL; + goto exit; +@@ -138,7 +138,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10 + ret = rte_kvargs_process(kvlist, CN10K_ML_FW_REPORT_DPE_WARNINGS, + &parse_integer_arg, &cn10k_mldev->fw.report_dpe_warnings); + if (ret < 0) { +- plt_err("Error processing arguments, key = %s\n", ++ plt_err("Error processing arguments, key = %s", + CN10K_ML_FW_REPORT_DPE_WARNINGS); + ret = -EINVAL; + goto exit; +@@ -150,7 +150,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10 + ret = rte_kvargs_process(kvlist, CN10K_ML_DEV_CACHE_MODEL_DATA, &parse_integer_arg, + &cn10k_mldev->cache_model_data); + if (ret < 0) { +- plt_err("Error processing arguments, key = %s\n", ++ plt_err("Error processing arguments, key = %s", + CN10K_ML_DEV_CACHE_MODEL_DATA); + ret = -EINVAL; + goto exit; +@@ -162,7 +162,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10 + ret = rte_kvargs_process(kvlist, CN10K_ML_OCM_ALLOC_MODE, &parse_string_arg, + &ocm_alloc_mode); + if (ret < 0) { +- plt_err("Error processing arguments, key = %s\n", CN10K_ML_OCM_ALLOC_MODE); ++ plt_err("Error processing arguments, key = %s", CN10K_ML_OCM_ALLOC_MODE); + ret = -EINVAL; + goto exit; + } +@@ -173,7 +173,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10 + ret = rte_kvargs_process(kvlist, CN10K_ML_DEV_HW_QUEUE_LOCK, &parse_integer_arg, + &cn10k_mldev->hw_queue_lock); + if (ret < 0) { +- plt_err("Error processing arguments, key = %s\n", ++ plt_err("Error processing arguments, key = %s", + CN10K_ML_DEV_HW_QUEUE_LOCK); + ret = -EINVAL; + goto exit; +@@ -185,7 +185,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10 + ret = rte_kvargs_process(kvlist, CN10K_ML_OCM_PAGE_SIZE, &parse_integer_arg, + &cn10k_mldev->ocm_page_size); + if (ret < 0) { +- plt_err("Error processing arguments, key = %s\n", CN10K_ML_OCM_PAGE_SIZE); ++ plt_err("Error processing arguments, key = %s", CN10K_ML_OCM_PAGE_SIZE); + ret = -EINVAL; + goto exit; + } +@@ -204,7 +204,7 @@ check_args: } else { - if (unlikely(ena_rx_ctx->l4_csum_err)) { - ++rx_stats->l4_csum_bad; -- /* -- * For the L4 Rx checksum offload the HW may indicate -- * bad checksum although it's valid. Because of that, -- * we're setting the UNKNOWN flag to let the app -- * re-verify the checksum. -- */ -- ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; -+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; - } else { - ++rx_stats->l4_csum_good; - ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; -@@ -797,7 +792,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) + if ((cn10k_mldev->fw.enable_dpe_warnings < 0) || + (cn10k_mldev->fw.enable_dpe_warnings > 1)) { +- plt_err("Invalid argument, %s = %d\n", CN10K_ML_FW_ENABLE_DPE_WARNINGS, ++ plt_err("Invalid argument, %s = %d", CN10K_ML_FW_ENABLE_DPE_WARNINGS, + cn10k_mldev->fw.enable_dpe_warnings); + ret = -EINVAL; + goto exit; +@@ -218,7 +218,7 @@ check_args: + } else { + if ((cn10k_mldev->fw.report_dpe_warnings < 0) || + (cn10k_mldev->fw.report_dpe_warnings > 1)) { +- plt_err("Invalid argument, %s = %d\n", CN10K_ML_FW_REPORT_DPE_WARNINGS, ++ plt_err("Invalid argument, %s = %d", CN10K_ML_FW_REPORT_DPE_WARNINGS, + cn10k_mldev->fw.report_dpe_warnings); + ret = -EINVAL; + goto exit; +@@ -231,7 +231,7 @@ check_args: + cn10k_mldev->cache_model_data = CN10K_ML_DEV_CACHE_MODEL_DATA_DEFAULT; + } else { + if ((cn10k_mldev->cache_model_data < 0) || (cn10k_mldev->cache_model_data > 1)) { +- plt_err("Invalid argument, %s = %d\n", CN10K_ML_DEV_CACHE_MODEL_DATA, ++ plt_err("Invalid argument, %s = %d", CN10K_ML_DEV_CACHE_MODEL_DATA, + cn10k_mldev->cache_model_data); + ret = -EINVAL; + goto exit; +@@ -244,7 +244,7 @@ check_args: + } else { + if (!((strcmp(ocm_alloc_mode, "lowest") == 0) || + (strcmp(ocm_alloc_mode, "largest") == 0))) { +- plt_err("Invalid argument, %s = %s\n", CN10K_ML_OCM_ALLOC_MODE, ++ plt_err("Invalid argument, %s = %s", CN10K_ML_OCM_ALLOC_MODE, + ocm_alloc_mode); + ret = -EINVAL; + goto exit; +@@ -257,7 +257,7 @@ check_args: + cn10k_mldev->hw_queue_lock = CN10K_ML_DEV_HW_QUEUE_LOCK_DEFAULT; + } else { + if ((cn10k_mldev->hw_queue_lock < 0) || (cn10k_mldev->hw_queue_lock > 1)) { +- plt_err("Invalid argument, %s = %d\n", CN10K_ML_DEV_HW_QUEUE_LOCK, ++ plt_err("Invalid argument, %s = %d", CN10K_ML_DEV_HW_QUEUE_LOCK, + cn10k_mldev->hw_queue_lock); + ret = -EINVAL; + goto exit; +@@ -269,7 +269,7 @@ check_args: + cn10k_mldev->ocm_page_size = CN10K_ML_OCM_PAGE_SIZE_DEFAULT; + } else { + if (cn10k_mldev->ocm_page_size < 0) { +- plt_err("Invalid argument, %s = %d\n", CN10K_ML_OCM_PAGE_SIZE, ++ plt_err("Invalid argument, %s = %d", CN10K_ML_OCM_PAGE_SIZE, + cn10k_mldev->ocm_page_size); + ret = -EINVAL; + goto exit; +@@ -284,7 +284,7 @@ check_args: + } - rc = ena_com_set_host_attributes(ena_dev); - if (rc) { -- if (rc == -ENA_COM_UNSUPPORTED) -+ if (rc == ENA_COM_UNSUPPORTED) - PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); - else - PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); -@@ -841,7 +836,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter) + if (!found) { +- plt_err("Unsupported ocm_page_size = %d\n", cn10k_mldev->ocm_page_size); ++ plt_err("Unsupported ocm_page_size = %d", cn10k_mldev->ocm_page_size); + ret = -EINVAL; + goto exit; + } +@@ -773,7 +773,7 @@ cn10k_ml_fw_load(struct cnxk_ml_dev *cnxk_mldev) + /* Read firmware image to a buffer */ + ret = rte_firmware_read(fw->path, &fw_buffer, &fw_size); + if ((ret < 0) || (fw_buffer == NULL)) { +- plt_err("Unable to read firmware data: %s\n", fw->path); ++ plt_err("Unable to read firmware data: %s", fw->path); + return ret; + } - rc = ena_com_set_host_attributes(&adapter->ena_dev); - if (rc) { -- if (rc == -ENA_COM_UNSUPPORTED) -+ if (rc == ENA_COM_UNSUPPORTED) - PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); - else - PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); -@@ -3105,33 +3100,12 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) - return 0; +diff --git a/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c b/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c +index 7f7e5efcea..5370038733 100644 +--- a/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c ++++ b/dpdk/drivers/ml/cnxk/cn10k_ml_ops.c +@@ -288,6 +288,7 @@ cn10k_ml_model_xstat_get(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *l + static int + cn10k_ml_cache_model_data(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer *layer) + { ++ struct cn10k_ml_layer_xstats *xstats; + char str[RTE_MEMZONE_NAMESIZE]; + const struct plt_memzone *mz; + uint64_t isize = 0; +@@ -309,6 +310,16 @@ cn10k_ml_cache_model_data(struct cnxk_ml_dev *cnxk_mldev, struct cnxk_ml_layer * + PLT_PTR_ADD(mz->addr, isize), 1); + plt_memzone_free(mz); + ++ /* Reset sync xstats. */ ++ xstats = layer->glow.sync_xstats; ++ xstats->hw_latency_tot = 0; ++ xstats->hw_latency_min = UINT64_MAX; ++ xstats->hw_latency_max = 0; ++ xstats->fw_latency_tot = 0; ++ xstats->fw_latency_min = UINT64_MAX; ++ xstats->fw_latency_max = 0; ++ xstats->dequeued_count = 0; ++ + return ret; } --static __rte_always_inline size_t --ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean, -- struct rte_mbuf *mbuf, -- size_t mbuf_cnt, -- size_t buf_size) --{ -- struct rte_mbuf *m_next; -- -- while (mbuf != NULL) { -- m_next = mbuf->next; -- mbufs_to_clean[mbuf_cnt++] = mbuf; -- if (mbuf_cnt == buf_size) { -- rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean, -- (unsigned int)mbuf_cnt); -- mbuf_cnt = 0; -- } -- mbuf = m_next; -- } -- -- return mbuf_cnt; --} -- - static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) - { -- struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE]; -+ struct rte_mbuf *pkts_to_clean[ENA_CLEANUP_BUF_THRESH]; - struct ena_ring *tx_ring = (struct ena_ring *)txp; - size_t mbuf_cnt = 0; -+ size_t pkt_cnt = 0; - unsigned int total_tx_descs = 0; - unsigned int total_tx_pkts = 0; - uint16_t cleanup_budget; -@@ -3162,8 +3136,13 @@ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) +diff --git a/dpdk/drivers/ml/cnxk/cnxk_ml_ops.c b/dpdk/drivers/ml/cnxk/cnxk_ml_ops.c +index 971362b242..8863633155 100644 +--- a/dpdk/drivers/ml/cnxk/cnxk_ml_ops.c ++++ b/dpdk/drivers/ml/cnxk/cnxk_ml_ops.c +@@ -437,7 +437,7 @@ cnxk_ml_model_xstats_reset(struct cnxk_ml_dev *cnxk_mldev, int32_t model_id, - mbuf = tx_info->mbuf; - if (fast_free) { -- mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt, -- ENA_CLEANUP_BUF_SIZE); -+ pkts_to_clean[pkt_cnt++] = mbuf; -+ mbuf_cnt += mbuf->nb_segs; -+ if (mbuf_cnt >= ENA_CLEANUP_BUF_THRESH) { -+ rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt); -+ mbuf_cnt = 0; -+ pkt_cnt = 0; -+ } - } else { - rte_pktmbuf_free(mbuf); + model = cnxk_mldev->mldev->data->models[model_id]; + if (model == NULL) { +- plt_err("Invalid model_id = %d\n", model_id); ++ plt_err("Invalid model_id = %d", model_id); + return -EINVAL; + } } -@@ -3186,8 +3165,7 @@ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) - } - - if (mbuf_cnt != 0) -- rte_mempool_put_bulk(mbufs_to_clean[0]->pool, -- (void **)mbufs_to_clean, mbuf_cnt); -+ rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt); +@@ -454,7 +454,7 @@ cnxk_ml_model_xstats_reset(struct cnxk_ml_dev *cnxk_mldev, int32_t model_id, + } else { + for (j = 0; j < nb_ids; j++) { + if (stat_ids[j] < start_id || stat_ids[j] > end_id) { +- plt_err("Invalid stat_ids[%d] = %d for model_id = %d\n", j, ++ plt_err("Invalid stat_ids[%d] = %d for model_id = %d", j, + stat_ids[j], lcl_model_id); + return -EINVAL; + } +@@ -510,12 +510,12 @@ cnxk_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *co - /* Notify completion handler that full cleanup was performed */ - if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) -diff --git a/dpdk/drivers/net/failsafe/failsafe_args.c b/dpdk/drivers/net/failsafe/failsafe_args.c -index b203e02d9a..3b867437d7 100644 ---- a/dpdk/drivers/net/failsafe/failsafe_args.c -+++ b/dpdk/drivers/net/failsafe/failsafe_args.c -@@ -248,7 +248,7 @@ fs_parse_device_param(struct rte_eth_dev *dev, const char *param, - goto free_args; - } else { - ERROR("Unrecognized device type: %.*s", (int)b, param); -- return -EINVAL; -+ ret = -EINVAL; + cnxk_ml_dev_info_get(dev, &dev_info); + if (conf->nb_models > dev_info.max_models) { +- plt_err("Invalid device config, nb_models > %u\n", dev_info.max_models); ++ plt_err("Invalid device config, nb_models > %u", dev_info.max_models); + return -EINVAL; } - free_args: - free(args); -diff --git a/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/dpdk/drivers/net/fm10k/fm10k_ethdev.c -index 4d3c4c10cf..cc2012786d 100644 ---- a/dpdk/drivers/net/fm10k/fm10k_ethdev.c -+++ b/dpdk/drivers/net/fm10k/fm10k_ethdev.c -@@ -3057,7 +3057,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) - struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = pdev->intr_handle; -- int diag, i; -+ int diag, i, ret; - struct fm10k_macvlan_filter_info *macvlan; - PMD_INIT_FUNC_TRACE(); -@@ -3146,21 +3146,24 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) - diag = fm10k_stats_reset(dev); - if (diag != 0) { - PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag); -- return diag; -+ ret = diag; -+ goto err_stat; + if (conf->nb_queue_pairs > dev_info.max_queue_pairs) { +- plt_err("Invalid device config, nb_queue_pairs > %u\n", dev_info.max_queue_pairs); ++ plt_err("Invalid device config, nb_queue_pairs > %u", dev_info.max_queue_pairs); + return -EINVAL; } - /* Reset the hw */ - diag = fm10k_reset_hw(hw); - if (diag != FM10K_SUCCESS) { - PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag); -- return -EIO; -+ ret = -EIO; -+ goto err_reset_hw; +@@ -533,10 +533,10 @@ cnxk_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *co + plt_ml_dbg("Re-configuring ML device, nb_queue_pairs = %u, nb_models = %u", + conf->nb_queue_pairs, conf->nb_models); + } else if (cnxk_mldev->state == ML_CNXK_DEV_STATE_STARTED) { +- plt_err("Device can't be reconfigured in started state\n"); ++ plt_err("Device can't be reconfigured in started state"); + return -ENOTSUP; + } else if (cnxk_mldev->state == ML_CNXK_DEV_STATE_CLOSED) { +- plt_err("Device can't be reconfigured after close\n"); ++ plt_err("Device can't be reconfigured after close"); + return -ENOTSUP; } - /* Setup mailbox service */ - diag = fm10k_setup_mbx_service(hw); - if (diag != FM10K_SUCCESS) { - PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag); -- return -EIO; -+ ret = -EIO; -+ goto err_mbx; +@@ -853,7 +853,7 @@ cnxk_ml_dev_queue_pair_setup(struct rte_ml_dev *dev, uint16_t queue_pair_id, + uint32_t nb_desc; + + if (queue_pair_id >= dev->data->nb_queue_pairs) { +- plt_err("Queue-pair id = %u (>= max queue pairs supported, %u)\n", queue_pair_id, ++ plt_err("Queue-pair id = %u (>= max queue pairs supported, %u)", queue_pair_id, + dev->data->nb_queue_pairs); + return -EINVAL; } +@@ -1249,11 +1249,11 @@ cnxk_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params, u + } + + if ((total_wb_pages + max_scratch_pages) > ocm->num_pages) { +- plt_err("model_id = %u: total_wb_pages (%u) + scratch_pages (%u) > %u\n", ++ plt_err("model_id = %u: total_wb_pages (%u) + scratch_pages (%u) > %u", + lcl_model_id, total_wb_pages, max_scratch_pages, ocm->num_pages); + + if (model->type == ML_CNXK_MODEL_TYPE_GLOW) { +- plt_ml_dbg("layer_id = %u: wb_pages = %u, scratch_pages = %u\n", layer_id, ++ plt_ml_dbg("layer_id = %u: wb_pages = %u, scratch_pages = %u", layer_id, + model->layer[layer_id].glow.ocm_map.wb_pages, + model->layer[layer_id].glow.ocm_map.scratch_pages); + #ifdef RTE_MLDEV_CNXK_ENABLE_MVTVM +@@ -1262,7 +1262,7 @@ cnxk_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params, u + layer_id++) { + if (model->layer[layer_id].type == ML_CNXK_LAYER_TYPE_MRVL) { + plt_ml_dbg( +- "layer_id = %u: wb_pages = %u, scratch_pages = %u\n", ++ "layer_id = %u: wb_pages = %u, scratch_pages = %u", + layer_id, + model->layer[layer_id].glow.ocm_map.wb_pages, + model->layer[layer_id].glow.ocm_map.scratch_pages); +@@ -1462,7 +1462,8 @@ cnxk_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_buf + d_offset = 0; + q_offset = 0; + for (i = 0; i < info->nb_inputs; i++) { +- if (model->type == ML_CNXK_MODEL_TYPE_TVM) { ++ if (model->type == ML_CNXK_MODEL_TYPE_TVM && ++ model->subtype != ML_CNXK_MODEL_SUBTYPE_TVM_MRVL) { + lcl_dbuffer = dbuffer[i]->addr; + lcl_qbuffer = qbuffer[i]->addr; + } else { +@@ -1474,7 +1475,8 @@ cnxk_ml_io_quantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_buf + if (ret < 0) + return ret; - /*PF/VF has different interrupt handling mechanism */ -@@ -3199,7 +3202,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) +- if (model->type == ML_CNXK_MODEL_TYPE_GLOW) { ++ if ((model->type == ML_CNXK_MODEL_TYPE_GLOW) || ++ (model->subtype == ML_CNXK_MODEL_SUBTYPE_TVM_MRVL)) { + d_offset += info->input[i].sz_d; + q_offset += info->input[i].sz_q; + } +@@ -1516,7 +1518,8 @@ cnxk_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_b + q_offset = 0; + d_offset = 0; + for (i = 0; i < info->nb_outputs; i++) { +- if (model->type == ML_CNXK_MODEL_TYPE_TVM) { ++ if (model->type == ML_CNXK_MODEL_TYPE_TVM && ++ model->subtype != ML_CNXK_MODEL_SUBTYPE_TVM_MRVL) { + lcl_qbuffer = qbuffer[i]->addr; + lcl_dbuffer = dbuffer[i]->addr; + } else { +@@ -1528,7 +1531,8 @@ cnxk_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t model_id, struct rte_ml_b + if (ret < 0) + return ret; - if (switch_ready == false) { - PMD_INIT_LOG(ERR, "switch is not ready"); -- return -1; -+ ret = -1; -+ goto err_switch_ready; +- if (model->type == ML_CNXK_MODEL_TYPE_GLOW) { ++ if ((model->type == ML_CNXK_MODEL_TYPE_GLOW) || ++ (model->subtype == ML_CNXK_MODEL_SUBTYPE_TVM_MRVL)) { + q_offset += info->output[i].sz_q; + d_offset += info->output[i].sz_d; } - } +diff --git a/dpdk/drivers/ml/cnxk/mvtvm_ml_model.c b/dpdk/drivers/ml/cnxk/mvtvm_ml_model.c +index 0dbe08e988..bbda907714 100644 +--- a/dpdk/drivers/ml/cnxk/mvtvm_ml_model.c ++++ b/dpdk/drivers/ml/cnxk/mvtvm_ml_model.c +@@ -352,7 +352,7 @@ tvm_mrvl_model: + metadata = &model->mvtvm.metadata; + strlcpy(info->name, metadata->model.name, TVMDP_NAME_STRLEN); -@@ -3234,7 +3238,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) +- info->io_layout = RTE_ML_IO_LAYOUT_SPLIT; ++ info->io_layout = RTE_ML_IO_LAYOUT_PACKED; + } - if (!hw->mac.default_vid) { - PMD_INIT_LOG(ERR, "default VID is not ready"); -- return -1; -+ ret = -1; -+ goto err_vid; - } - } + void +diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +index 397a32db58..6b7b16f348 100644 +--- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c ++++ b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c +@@ -6,6 +6,7 @@ + * All rights reserved. + */ -@@ -3243,6 +3248,28 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) - MAIN_VSI_POOL_NUMBER); ++#include + #include + #include + #include +@@ -39,7 +40,7 @@ + #define DFLT_FRAME_SIZE (1 << 11) + #define DFLT_FRAME_COUNT (1 << 9) - return 0; -+ -+err_vid: -+err_switch_ready: -+ rte_intr_disable(intr_handle); -+ -+ if (hw->mac.type == fm10k_mac_pf) { -+ fm10k_dev_disable_intr_pf(dev); -+ rte_intr_callback_unregister(intr_handle, -+ fm10k_dev_interrupt_handler_pf, (void *)dev); -+ } else { -+ fm10k_dev_disable_intr_vf(dev); -+ rte_intr_callback_unregister(intr_handle, -+ fm10k_dev_interrupt_handler_vf, (void *)dev); -+ } -+ -+err_mbx: -+err_reset_hw: -+err_stat: -+ rte_free(dev->data->mac_addrs); -+ dev->data->mac_addrs = NULL; -+ -+ return ret; +-struct pkt_rx_queue { ++struct __rte_cache_aligned pkt_rx_queue { + int sockfd; + + struct iovec *rd; +@@ -55,7 +56,7 @@ struct pkt_rx_queue { + volatile unsigned long rx_bytes; + }; + +-struct pkt_tx_queue { ++struct __rte_cache_aligned pkt_tx_queue { + int sockfd; + unsigned int frame_data_size; + +diff --git a/dpdk/drivers/net/af_xdp/compat.h b/dpdk/drivers/net/af_xdp/compat.h +index 28ea64aeaa..3b5a5c1ed5 100644 +--- a/dpdk/drivers/net/af_xdp/compat.h ++++ b/dpdk/drivers/net/af_xdp/compat.h +@@ -46,6 +46,21 @@ create_shared_socket(struct xsk_socket **xsk_ptr __rte_unused, } + #endif ++#ifdef ETH_AF_XDP_UPDATE_XSKMAP ++static __rte_always_inline int ++update_xskmap(struct xsk_socket *xsk, int map_fd, int xsk_queue_idx __rte_unused) ++{ ++ return xsk_socket__update_xskmap(xsk, map_fd); ++} ++#else ++static __rte_always_inline int ++update_xskmap(struct xsk_socket *xsk, int map_fd, int xsk_queue_idx) ++{ ++ int fd = xsk_socket__fd(xsk); ++ return bpf_map_update_elem(map_fd, &xsk_queue_idx, &fd, 0); ++} ++#endif ++ + #ifdef XDP_USE_NEED_WAKEUP static int -diff --git a/dpdk/drivers/net/gve/gve_rx_dqo.c b/dpdk/drivers/net/gve/gve_rx_dqo.c -index 7c7a8c48d0..a56cdbf11b 100644 ---- a/dpdk/drivers/net/gve/gve_rx_dqo.c -+++ b/dpdk/drivers/net/gve/gve_rx_dqo.c -@@ -127,7 +127,7 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) - rxm->ol_flags = 0; + tx_syscall_needed(struct xsk_ring_prod *q) +diff --git a/dpdk/drivers/net/af_xdp/meson.build b/dpdk/drivers/net/af_xdp/meson.build +index 9f33e57fa2..69d109ff46 100644 +--- a/dpdk/drivers/net/af_xdp/meson.build ++++ b/dpdk/drivers/net/af_xdp/meson.build +@@ -7,6 +7,12 @@ if is_windows + subdir_done() + endif - rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; -- rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash); -+ rxm->hash.rss = rte_le_to_cpu_32(rx_desc->hash); ++if arch_subdir == 'x86' and dpdk_conf.get('RTE_ARCH_32') ++ build = false ++ reason = 'not supported on 32-bit x86' ++ subdir_done() ++endif ++ + sources = files('rte_eth_af_xdp.c') - rx_pkts[nb_rx++] = rxm; - bytes += pkt_len; -diff --git a/dpdk/drivers/net/gve/gve_tx.c b/dpdk/drivers/net/gve/gve_tx.c -index 2e0d001109..bb21b90635 100644 ---- a/dpdk/drivers/net/gve/gve_tx.c -+++ b/dpdk/drivers/net/gve/gve_tx.c -@@ -681,7 +681,7 @@ gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) + libxdp_ver = '>=1.2.2' +@@ -77,6 +83,10 @@ if build + dependencies : bpf_dep, args: cflags) + cflags += ['-DRTE_NET_AF_XDP_LIBBPF_XDP_ATTACH'] + endif ++ if cc.has_function('xsk_socket__update_xskmap', prefix : xsk_check_prefix, ++ dependencies : ext_deps, args: cflags) ++ cflags += ['-DETH_AF_XDP_UPDATE_XSKMAP'] ++ endif + endif - rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); + require_iova_in_mbuf = false +diff --git a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +index 353c8688ec..74f750dbb3 100644 +--- a/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c ++++ b/dpdk/drivers/net/af_xdp/rte_eth_af_xdp.c +@@ -83,12 +83,13 @@ RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE); -- dev->data->rx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; -+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + #define ETH_AF_XDP_MP_KEY "afxdp_mp_send_fds" - return 0; - } -diff --git a/dpdk/drivers/net/gve/gve_tx_dqo.c b/dpdk/drivers/net/gve/gve_tx_dqo.c -index 16101de84f..97d9c6549b 100644 ---- a/dpdk/drivers/net/gve/gve_tx_dqo.c -+++ b/dpdk/drivers/net/gve/gve_tx_dqo.c -@@ -13,7 +13,7 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq) - struct gve_tx_compl_desc *compl_desc; - struct gve_tx_queue *aim_txq; - uint16_t nb_desc_clean; -- struct rte_mbuf *txe; -+ struct rte_mbuf *txe, *txe_next; - uint16_t compl_tag; - uint16_t next; ++#define DP_BASE_PATH "/tmp/afxdp_dp" ++#define DP_UDS_SOCK "afxdp.sock" + #define MAX_LONG_OPT_SZ 64 + #define UDS_MAX_FD_NUM 2 + #define UDS_MAX_CMD_LEN 64 + #define UDS_MAX_CMD_RESP 128 + #define UDS_XSK_MAP_FD_MSG "/xsk_map_fd" +-#define UDS_SOCK "/tmp/afxdp.sock" + #define UDS_CONNECT_MSG "/connect" + #define UDS_HOST_OK_MSG "/host_ok" + #define UDS_HOST_NAK_MSG "/host_nak" +@@ -123,7 +124,7 @@ struct xsk_umem_info { + struct rx_stats { + uint64_t rx_pkts; + uint64_t rx_bytes; +- uint64_t rx_dropped; ++ uint64_t imissed_offset; + }; -@@ -43,10 +43,15 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq) - PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_REINJECTION !!!"); - /* FALLTHROUGH */ - case GVE_COMPL_TYPE_DQO_PKT: -+ /* free all segments. */ - txe = aim_txq->sw_ring[compl_tag]; -- if (txe != NULL) { -+ while (txe != NULL) { -+ txe_next = txe->next; - rte_pktmbuf_free_seg(txe); -- txe = NULL; -+ if (aim_txq->sw_ring[compl_tag] == txe) -+ aim_txq->sw_ring[compl_tag] = NULL; -+ txe = txe_next; -+ compl_tag = (compl_tag + 1) & (aim_txq->sw_size - 1); - } - break; - case GVE_COMPL_TYPE_DQO_MISS: -@@ -83,6 +88,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) - uint16_t tx_id; - uint16_t sw_id; - uint64_t bytes; -+ uint16_t first_sw_id; + struct pkt_rx_queue { +@@ -131,6 +132,7 @@ struct pkt_rx_queue { + struct xsk_umem_info *umem; + struct xsk_socket *xsk; + struct rte_mempool *mb_pool; ++ uint16_t port; - sw_ring = txq->sw_ring; - txr = txq->tx_ring; -@@ -107,23 +113,25 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + struct rx_stats stats; - ol_flags = tx_pkt->ol_flags; - nb_used = tx_pkt->nb_segs; -- -+ first_sw_id = sw_id; - do { -- txd = &txr[tx_id]; -+ if (sw_ring[sw_id] != NULL) -+ PMD_DRV_LOG(DEBUG, "Overwriting an entry in sw_ring"); +@@ -171,6 +173,7 @@ struct pmd_internals { + bool custom_prog_configured; + bool force_copy; + bool use_cni; ++ char dp_path[PATH_MAX]; + struct bpf_map *map; -+ txd = &txr[tx_id]; - sw_ring[sw_id] = tx_pkt; + struct rte_ether_addr eth_addr; +@@ -191,6 +194,7 @@ struct pmd_process_private { + #define ETH_AF_XDP_BUDGET_ARG "busy_budget" + #define ETH_AF_XDP_FORCE_COPY_ARG "force_copy" + #define ETH_AF_XDP_USE_CNI_ARG "use_cni" ++#define ETH_AF_XDP_DP_PATH_ARG "dp_path" - /* fill Tx descriptor */ - txd->pkt.buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt)); - txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO; -- txd->pkt.compl_tag = rte_cpu_to_le_16(sw_id); -+ txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id); - txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, GVE_TX_MAX_BUF_SIZE_DQO); + static const char * const valid_arguments[] = { + ETH_AF_XDP_IFACE_ARG, +@@ -201,6 +205,7 @@ static const char * const valid_arguments[] = { + ETH_AF_XDP_BUDGET_ARG, + ETH_AF_XDP_FORCE_COPY_ARG, + ETH_AF_XDP_USE_CNI_ARG, ++ ETH_AF_XDP_DP_PATH_ARG, + NULL + }; - /* size of desc_ring and sw_ring could be different */ - tx_id = (tx_id + 1) & mask; - sw_id = (sw_id + 1) & sw_mask; +@@ -311,6 +316,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + unsigned long rx_bytes = 0; + int i; + struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE]; ++ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port]; -- bytes += tx_pkt->pkt_len; -+ bytes += tx_pkt->data_len; - tx_pkt = tx_pkt->next; - } while (tx_pkt); + nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx); -@@ -384,7 +392,7 @@ gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -338,6 +344,8 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; ++ dev->data->rx_mbuf_alloc_failed += nb_pkts; ++ + return 0; + } - rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); +@@ -360,6 +368,7 @@ af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + bufs[i]->data_off = offset - sizeof(struct rte_mbuf) - + rte_pktmbuf_priv_size(umem->mb_pool) - + umem->mb_pool->header_size; ++ bufs[i]->port = rxq->port; -- dev->data->rx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; -+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + rte_pktmbuf_pkt_len(bufs[i]) = len; + rte_pktmbuf_data_len(bufs[i]) = len; +@@ -388,6 +397,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + int i; + uint32_t free_thresh = fq->size >> 1; + struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE]; ++ struct rte_eth_dev *dev = &rte_eth_devices[rxq->port]; - return 0; - } -diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c -index 2c1664485b..001ff49b36 100644 ---- a/dpdk/drivers/net/hns3/hns3_cmd.c -+++ b/dpdk/drivers/net/hns3/hns3_cmd.c -@@ -545,7 +545,9 @@ hns3_set_dcb_capability(struct hns3_hw *hw) - if (device_id == HNS3_DEV_ID_25GE_RDMA || - device_id == HNS3_DEV_ID_50GE_RDMA || - device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || -- device_id == HNS3_DEV_ID_200G_RDMA) -+ device_id == HNS3_DEV_ID_200G_RDMA || -+ device_id == HNS3_DEV_ID_100G_ROH || -+ device_id == HNS3_DEV_ID_200G_ROH) - hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); - } + if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh) + (void)reserve_fill_queue(umem, nb_pkts, NULL, fq); +@@ -406,6 +416,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + * xsk_ring_cons__peek + */ + rx->cached_cons -= nb_pkts; ++ dev->data->rx_mbuf_alloc_failed += nb_pkts; + return 0; + } -diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c -index 8f224aa00c..5e6cdfdaa0 100644 ---- a/dpdk/drivers/net/hns3/hns3_common.c -+++ b/dpdk/drivers/net/hns3/hns3_common.c -@@ -85,7 +85,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) - RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | - RTE_ETH_TX_OFFLOAD_VLAN_INSERT); +@@ -426,6 +437,7 @@ af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + rte_pktmbuf_data_len(mbufs[i]) = len; + rx_bytes += len; + bufs[i] = mbufs[i]; ++ bufs[i]->port = rxq->port; + } -- if (!hw->port_base_vlan_cfg.state) -+ if (!hns->is_vf && !hw->port_base_vlan_cfg.state) - info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT; + xsk_ring_cons__release(rx, nb_pkts); +@@ -867,7 +879,6 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) - if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) -@@ -224,7 +224,7 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) - static int - hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) + stats->ipackets += stats->q_ipackets[i]; + stats->ibytes += stats->q_ibytes[i]; +- stats->imissed += rxq->stats.rx_dropped; + stats->oerrors += txq->stats.tx_dropped; + fd = process_private->rxq_xsk_fds[i]; + ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS, +@@ -876,7 +887,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n"); + return -1; + } +- stats->imissed += xdp_stats.rx_dropped; ++ stats->imissed += xdp_stats.rx_dropped - rxq->stats.imissed_offset; + + stats->opackets += stats->q_opackets[i]; + stats->obytes += stats->q_obytes[i]; +@@ -889,13 +900,25 @@ static int + eth_stats_reset(struct rte_eth_dev *dev) { -- uint32_t val; -+ uint64_t val; + struct pmd_internals *internals = dev->data->dev_private; +- int i; ++ struct pmd_process_private *process_private = dev->process_private; ++ struct xdp_statistics xdp_stats; ++ socklen_t optlen; ++ int i, ret, fd; - RTE_SET_USED(key); + for (i = 0; i < internals->queue_cnt; i++) { + memset(&internals->rx_queues[i].stats, 0, + sizeof(struct rx_stats)); + memset(&internals->tx_queues[i].stats, 0, + sizeof(struct tx_stats)); ++ fd = process_private->rxq_xsk_fds[i]; ++ optlen = sizeof(struct xdp_statistics); ++ ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS, ++ &xdp_stats, &optlen) : -1; ++ if (ret != 0) { ++ AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n"); ++ return -1; ++ } ++ internals->rx_queues[i].stats.imissed_offset = xdp_stats.rx_dropped; + } -diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c -index 2831d3dc62..915e4eb768 100644 ---- a/dpdk/drivers/net/hns3/hns3_dcb.c -+++ b/dpdk/drivers/net/hns3/hns3_dcb.c -@@ -1499,7 +1499,6 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc) - static int - hns3_dcb_hw_configure(struct hns3_adapter *hns) + return 0; +@@ -960,6 +983,9 @@ remove_xdp_program(struct pmd_internals *internals) + static void + xdp_umem_destroy(struct xsk_umem_info *umem) { -- struct rte_eth_dcb_rx_conf *dcb_rx_conf; - struct hns3_pf *pf = &hns->pf; - struct hns3_hw *hw = &hns->hw; - enum hns3_fc_status fc_status = hw->current_fc_status; -@@ -1519,12 +1518,8 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns) - } ++ (void)xsk_umem__delete(umem->umem); ++ umem->umem = NULL; ++ + #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) + umem->mb_pool = NULL; + #else +@@ -992,11 +1018,8 @@ eth_dev_close(struct rte_eth_dev *dev) + break; + xsk_socket__delete(rxq->xsk); - if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) { -- dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf; -- if (dcb_rx_conf->nb_tcs == 0) -- hw->dcb_info.pfc_en = 1; /* tc0 only */ -- else -- hw->dcb_info.pfc_en = -- RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t); -+ hw->dcb_info.pfc_en = -+ RTE_LEN2MASK((uint8_t)HNS3_MAX_USER_PRIO, uint8_t); +- if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 +- == 0) { +- (void)xsk_umem__delete(rxq->umem->umem); ++ if (__atomic_fetch_sub(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) + xdp_umem_destroy(rxq->umem); +- } - hw->dcb_info.hw_pfc_map = - hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en); -diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c -index ae81368f68..6e72730d75 100644 ---- a/dpdk/drivers/net/hns3/hns3_ethdev.c -+++ b/dpdk/drivers/net/hns3/hns3_ethdev.c -@@ -380,7 +380,7 @@ hns3_interrupt_handler(void *param) - hns3_warn(hw, "received reset interrupt"); - hns3_schedule_reset(hns); - } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { -- hns3_dev_handle_mbx_msg(hw); -+ hns3pf_handle_mbx_msg(hw); - } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { - hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " - "ras_int_stat:0x%x cmdq_int_stat:0x%x", -@@ -2738,6 +2738,7 @@ hns3_get_capability(struct hns3_hw *hw) - hw->rss_info.ipv6_sctp_offload_supported = false; - hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; - pf->support_multi_tc_pause = false; -+ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; - return 0; + /* free pkt_tx_queue */ + rte_free(rxq->pair); +@@ -1234,6 +1257,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, + AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n"); + goto err; } ++ umem->mz = mz; -@@ -2758,6 +2759,7 @@ hns3_get_capability(struct hns3_hw *hw) - hw->rss_info.ipv6_sctp_offload_supported = true; - hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; - pf->support_multi_tc_pause = true; -+ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; + ret = xsk_umem__create(&umem->umem, mz->addr, + ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, +@@ -1244,7 +1268,6 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, + AF_XDP_LOG(ERR, "Failed to create umem\n"); + goto err; + } +- umem->mz = mz; - return 0; + return umem; + +@@ -1351,7 +1374,7 @@ err_prefer: } -@@ -5545,28 +5547,14 @@ is_pf_reset_done(struct hns3_hw *hw) - static enum hns3_reset_level - hns3_detect_reset_event(struct hns3_hw *hw) + + static int +-init_uds_sock(struct sockaddr_un *server) ++init_uds_sock(struct sockaddr_un *server, const char *dp_path) { -- struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); - enum hns3_reset_level new_req = HNS3_NONE_RESET; -- enum hns3_reset_level last_req; - uint32_t vector0_intr_state; + int sock; -- last_req = hns3_get_reset_level(hns, &hw->reset.pending); - vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); -- if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { -- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); -+ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) - new_req = HNS3_IMP_RESET; -- } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { -- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); -+ else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) - new_req = HNS3_GLOBAL_RESET; -- } -- -- if (new_req == HNS3_NONE_RESET) -- return HNS3_NONE_RESET; -- -- if (last_req == HNS3_NONE_RESET || last_req < new_req) { -- hns3_schedule_delayed_reset(hns); -- hns3_warn(hw, "High level reset detected, delay do reset"); -- } +@@ -1362,7 +1385,7 @@ init_uds_sock(struct sockaddr_un *server) + } - return new_req; - } -@@ -5586,10 +5574,14 @@ hns3_is_reset_pending(struct hns3_adapter *hns) - return false; + server->sun_family = AF_UNIX; +- strlcpy(server->sun_path, UDS_SOCK, sizeof(server->sun_path)); ++ strlcpy(server->sun_path, dp_path, sizeof(server->sun_path)); - new_req = hns3_detect_reset_event(hw); -+ if (new_req == HNS3_NONE_RESET) -+ return false; -+ - last_req = hns3_get_reset_level(hns, &hw->reset.pending); -- if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET && -- new_req < last_req) { -- hns3_warn(hw, "High level reset %d is pending", last_req); -+ if (last_req == HNS3_NONE_RESET || last_req < new_req) { -+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); -+ hns3_schedule_delayed_reset(hns); -+ hns3_warn(hw, "High level reset detected, delay do reset"); - return true; - } - last_req = hns3_get_reset_level(hns, &hw->reset.request); -@@ -6054,7 +6046,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) + if (connect(sock, (struct sockaddr *)server, sizeof(struct sockaddr_un)) < 0) { + close(sock); +@@ -1382,7 +1405,7 @@ struct msg_internal { + }; + + static int +-send_msg(int sock, char *request, int *fd) ++send_msg(int sock, char *request, int *fd, const char *dp_path) { - struct hns3_sfp_info_cmd *resp; - uint32_t tmp_fec_capa; -- uint8_t auto_state; -+ uint8_t auto_state = 0; - struct hns3_cmd_desc desc; - int ret; + int snd; + struct iovec iov; +@@ -1393,7 +1416,7 @@ send_msg(int sock, char *request, int *fd) -@@ -6658,6 +6650,8 @@ static const struct rte_pci_id pci_id_hns3_map[] = { - { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, - { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, - { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, -+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_ROH) }, -+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_ROH) }, - { .vendor_id = 0, }, /* sentinel */ - }; + memset(&dst, 0, sizeof(dst)); + dst.sun_family = AF_UNIX; +- strlcpy(dst.sun_path, UDS_SOCK, sizeof(dst.sun_path)); ++ strlcpy(dst.sun_path, dp_path, sizeof(dst.sun_path)); -diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h -index 12d8299def..c190d5109b 100644 ---- a/dpdk/drivers/net/hns3/hns3_ethdev.h -+++ b/dpdk/drivers/net/hns3/hns3_ethdev.h -@@ -28,7 +28,9 @@ - #define HNS3_DEV_ID_25GE_RDMA 0xA222 - #define HNS3_DEV_ID_50GE_RDMA 0xA224 - #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 -+#define HNS3_DEV_ID_100G_ROH 0xA227 - #define HNS3_DEV_ID_200G_RDMA 0xA228 -+#define HNS3_DEV_ID_200G_ROH 0xA22C - #define HNS3_DEV_ID_100G_VF 0xA22E - #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F - -@@ -485,6 +487,9 @@ struct hns3_queue_intr { - #define HNS3_PKTS_DROP_STATS_MODE1 0 - #define HNS3_PKTS_DROP_STATS_MODE2 1 + /* Initialize message header structure */ + memset(&msgh, 0, sizeof(msgh)); +@@ -1470,8 +1493,8 @@ read_msg(int sock, char *response, struct sockaddr_un *s, int *fd) + } -+#define HNS3_RX_DMA_ADDR_ALIGN_128 128 -+#define HNS3_RX_DMA_ADDR_ALIGN_64 64 -+ - struct hns3_hw { - struct rte_eth_dev_data *data; - void *io_base; -@@ -552,6 +557,11 @@ struct hns3_hw { - * direction. - */ - uint8_t min_tx_pkt_len; -+ /* -+ * The required alignment of the DMA address of the RX buffer. -+ * See HNS3_RX_DMA_ADDR_ALIGN_XXX for available values. -+ */ -+ uint16_t rx_dma_addr_align; - - struct hns3_queue_intr intr; - /* -diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -index 916cc0fb1b..d4d691ad86 100644 ---- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -+++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c -@@ -91,11 +91,13 @@ hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + static int +-make_request_cni(int sock, struct sockaddr_un *server, char *request, +- int *req_fd, char *response, int *out_fd) ++make_request_dp(int sock, struct sockaddr_un *server, char *request, ++ int *req_fd, char *response, int *out_fd, const char *dp_path) { - /* mac address was checked by upper level interface */ - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; -+ struct hns3_vf_to_pf_msg req; - int ret; + int rval; -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, -- HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, -- RTE_ETHER_ADDR_LEN, false, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, -+ HNS3_MBX_MAC_VLAN_UC_ADD); -+ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); -+ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); - if (ret) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); -@@ -110,12 +112,13 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) +@@ -1483,7 +1506,7 @@ make_request_cni(int sock, struct sockaddr_un *server, char *request, + if (req_fd == NULL) + rval = write(sock, request, strlen(request)); + else +- rval = send_msg(sock, request, req_fd); ++ rval = send_msg(sock, request, req_fd, dp_path); + + if (rval < 0) { + AF_XDP_LOG(ERR, "Write error %s\n", strerror(errno)); +@@ -1507,7 +1530,7 @@ check_response(char *response, char *exp_resp, long size) + } + + static int +-get_cni_fd(char *if_name) ++uds_get_xskmap_fd(char *if_name, const char *dp_path) { - /* mac address was checked by upper level interface */ - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; -+ struct hns3_vf_to_pf_msg req; - int ret; + char request[UDS_MAX_CMD_LEN], response[UDS_MAX_CMD_RESP]; + char hostname[MAX_LONG_OPT_SZ], exp_resp[UDS_MAX_CMD_RESP]; +@@ -1520,14 +1543,14 @@ get_cni_fd(char *if_name) + return -1; -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, -- HNS3_MBX_MAC_VLAN_UC_REMOVE, -- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, -- false, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, -+ HNS3_MBX_MAC_VLAN_UC_REMOVE); -+ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); -+ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); - if (ret) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); -@@ -134,6 +137,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, - struct rte_ether_addr *old_addr; - uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */ - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; -+ struct hns3_vf_to_pf_msg req; - int ret; + memset(&server, 0, sizeof(server)); +- sock = init_uds_sock(&server); ++ sock = init_uds_sock(&server, dp_path); + if (sock < 0) + return -1; - /* -@@ -146,9 +150,10 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, - memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes, - RTE_ETHER_ADDR_LEN); +- /* Initiates handshake to CNI send: /connect,hostname */ ++ /* Initiates handshake to the AF_XDP Device Plugin send: /connect,hostname */ + snprintf(request, sizeof(request), "%s,%s", UDS_CONNECT_MSG, hostname); + memset(response, 0, sizeof(response)); +- if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) { ++ if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) { + AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request); + goto err_close; + } +@@ -1541,7 +1564,7 @@ get_cni_fd(char *if_name) + /* Request for "/version" */ + strlcpy(request, UDS_VERSION_MSG, UDS_MAX_CMD_LEN); + memset(response, 0, sizeof(response)); +- if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) { ++ if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) { + AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request); + goto err_close; + } +@@ -1549,7 +1572,7 @@ get_cni_fd(char *if_name) + /* Request for file descriptor for netdev name*/ + snprintf(request, sizeof(request), "%s,%s", UDS_XSK_MAP_FD_MSG, if_name); + memset(response, 0, sizeof(response)); +- if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) { ++ if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) { + AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request); + goto err_close; + } +@@ -1571,7 +1594,7 @@ get_cni_fd(char *if_name) + /* Initiate close connection */ + strlcpy(request, UDS_FIN_MSG, UDS_MAX_CMD_LEN); + memset(response, 0, sizeof(response)); +- if (make_request_cni(sock, &server, request, NULL, response, &out_fd) < 0) { ++ if (make_request_dp(sock, &server, request, NULL, response, &out_fd, dp_path) < 0) { + AF_XDP_LOG(ERR, "Error in processing cmd [%s]\n", request); + goto err_close; + } +@@ -1695,21 +1718,21 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq, + } -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, -- HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes, -- HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, -+ HNS3_MBX_MAC_VLAN_UC_MODIFY); -+ memcpy(req.data, addr_bytes, HNS3_TWO_ETHER_ADDR_LEN); -+ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); - if (ret) { - /* - * The hns3 VF PMD depends on the hns3 PF kernel ethdev -@@ -185,12 +190,13 @@ hns3vf_add_mc_mac_addr(struct hns3_hw *hw, - struct rte_ether_addr *mac_addr) - { - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; -+ struct hns3_vf_to_pf_msg req; - int ret; + if (internals->use_cni) { +- int err, fd, map_fd; ++ int err, map_fd; -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, -- HNS3_MBX_MAC_VLAN_MC_ADD, -- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, -- NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, -+ HNS3_MBX_MAC_VLAN_MC_ADD); -+ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); -+ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); - if (ret) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); -@@ -206,12 +212,13 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, - struct rte_ether_addr *mac_addr) - { - char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; -+ struct hns3_vf_to_pf_msg req; - int ret; +- /* get socket fd from CNI plugin */ +- map_fd = get_cni_fd(internals->if_name); ++ /* get socket fd from AF_XDP Device Plugin */ ++ map_fd = uds_get_xskmap_fd(internals->if_name, internals->dp_path); + if (map_fd < 0) { +- AF_XDP_LOG(ERR, "Failed to receive CNI plugin fd\n"); ++ AF_XDP_LOG(ERR, "Failed to receive xskmap fd from AF_XDP Device Plugin\n"); + goto out_xsk; + } +- /* get socket fd */ +- fd = xsk_socket__fd(rxq->xsk); +- err = bpf_map_update_elem(map_fd, &rxq->xsk_queue_idx, &fd, 0); ++ ++ err = update_xskmap(rxq->xsk, map_fd, rxq->xsk_queue_idx); + if (err) { +- AF_XDP_LOG(ERR, "Failed to insert unprivileged xsk in map.\n"); ++ AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n"); + goto out_xsk; + } ++ + } else if (rxq->busy_budget) { + ret = configure_preferred_busy_poll(rxq); + if (ret) { +@@ -1779,6 +1802,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, -- HNS3_MBX_MAC_VLAN_MC_REMOVE, -- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, -- NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, -+ HNS3_MBX_MAC_VLAN_MC_REMOVE); -+ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); -+ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); - if (ret) { - hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, - mac_addr); -@@ -254,11 +261,12 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, - * the packets with vlan tag in promiscuous mode. - */ - hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); -- req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; -- req->msg[1] = en_bc_pmc ? 1 : 0; -- req->msg[2] = en_uc_pmc ? 1 : 0; -- req->msg[3] = en_mc_pmc ? 1 : 0; -- req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; -+ req->msg.code = HNS3_MBX_SET_PROMISC_MODE; -+ req->msg.en_bc = en_bc_pmc ? 1 : 0; -+ req->msg.en_uc = en_uc_pmc ? 1 : 0; -+ req->msg.en_mc = en_mc_pmc ? 1 : 0; -+ req->msg.en_limit_promisc = -+ hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; + process_private->rxq_xsk_fds[rx_queue_id] = rxq->fds[0].fd; - ret = hns3_cmd_send(hw, &desc, 1); - if (ret) -@@ -347,30 +355,26 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, - bool mmap, enum hns3_ring_type queue_type, - uint16_t queue_id) ++ rxq->port = dev->data->port_id; ++ + dev->data->rx_queues[rx_queue_id] = rxq; + return 0; + +@@ -1881,13 +1906,13 @@ static const struct eth_dev_ops ops = { + .get_monitor_addr = eth_get_monitor_addr, + }; + +-/* CNI option works in unprivileged container environment +- * and ethernet device functionality will be reduced. So +- * additional customiszed eth_dev_ops struct is needed +- * for cni. Promiscuous enable and disable functionality +- * is removed. ++/* AF_XDP Device Plugin option works in unprivileged ++ * container environments and ethernet device functionality ++ * will be reduced. So additional customised eth_dev_ops ++ * struct is needed for the Device Plugin. Promiscuous ++ * enable and disable functionality is removed. + **/ +-static const struct eth_dev_ops ops_cni = { ++static const struct eth_dev_ops ops_afxdp_dp = { + .dev_start = eth_dev_start, + .dev_stop = eth_dev_stop, + .dev_close = eth_dev_close, +@@ -2023,7 +2048,8 @@ xdp_get_channels_info(const char *if_name, int *max_queues, + static int + parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, + int *queue_cnt, int *shared_umem, char *prog_path, +- int *busy_budget, int *force_copy, int *use_cni) ++ int *busy_budget, int *force_copy, int *use_cni, ++ char *dp_path) { -- struct hns3_vf_bind_vector_msg bind_msg; -+ struct hns3_vf_to_pf_msg req = {0}; - const char *op_str; -- uint16_t code; int ret; -- memset(&bind_msg, 0, sizeof(bind_msg)); -- code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : -+ req.code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : - HNS3_MBX_UNMAP_RING_TO_VECTOR; -- bind_msg.vector_id = (uint8_t)vector_id; -+ req.vector_id = (uint8_t)vector_id; -+ req.ring_num = 1; - - if (queue_type == HNS3_RING_TYPE_RX) -- bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX; -+ req.ring_param[0].int_gl_index = HNS3_RING_GL_RX; - else -- bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX; -- -- bind_msg.param[0].ring_type = queue_type; -- bind_msg.ring_num = 1; -- bind_msg.param[0].tqp_index = queue_id; -+ req.ring_param[0].int_gl_index = HNS3_RING_GL_TX; -+ req.ring_param[0].ring_type = queue_type; -+ req.ring_param[0].tqp_index = queue_id; - op_str = mmap ? "Map" : "Unmap"; -- ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, -- sizeof(bind_msg), false, NULL, 0); -+ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); - if (ret) -- hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", -- op_str, queue_id, bind_msg.vector_id, ret); -+ hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret = %d.", -+ op_str, queue_id, req.vector_id, ret); +@@ -2069,6 +2095,11 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue, + if (ret < 0) + goto free_kvlist; ++ ret = rte_kvargs_process(kvlist, ETH_AF_XDP_DP_PATH_ARG, ++ &parse_prog_arg, dp_path); ++ if (ret < 0) ++ goto free_kvlist; ++ + free_kvlist: + rte_kvargs_free(kvlist); return ret; - } -@@ -453,10 +457,12 @@ cfg_err: - static int - hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu) +@@ -2108,7 +2139,7 @@ static struct rte_eth_dev * + init_internals(struct rte_vdev_device *dev, const char *if_name, + int start_queue_idx, int queue_cnt, int shared_umem, + const char *prog_path, int busy_budget, int force_copy, +- int use_cni) ++ int use_cni, const char *dp_path) { -+ struct hns3_vf_to_pf_msg req; - int ret; + const char *name = rte_vdev_device_name(dev); + const unsigned int numa_node = dev->device.numa_node; +@@ -2138,6 +2169,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, + internals->shared_umem = shared_umem; + internals->force_copy = force_copy; + internals->use_cni = use_cni; ++ strlcpy(internals->dp_path, dp_path, PATH_MAX); -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu, -- sizeof(mtu), true, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MTU, 0); -+ memcpy(req.data, &mtu, sizeof(mtu)); -+ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); - if (ret) - hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret); + if (xdp_get_channels_info(if_name, &internals->max_queue_cnt, + &internals->combined_queue_cnt)) { +@@ -2199,7 +2231,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, + if (!internals->use_cni) + eth_dev->dev_ops = &ops; + else +- eth_dev->dev_ops = &ops_cni; ++ eth_dev->dev_ops = &ops_afxdp_dp; -@@ -563,13 +569,8 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) - val = hns3_read_dev(hw, HNS3_VF_RST_ING); - hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); - val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); -- if (clearval) { -- hw->reset.stats.global_cnt++; -- hns3_warn(hw, "Global reset detected, clear reset status"); -- } else { -- hns3_schedule_delayed_reset(hns); -- hns3_warn(hw, "Global reset detected, don't clear reset status"); -- } -+ hw->reset.stats.global_cnt++; -+ hns3_warn(hw, "Global reset detected, clear reset status"); + eth_dev->rx_pkt_burst = eth_af_xdp_rx; + eth_dev->tx_pkt_burst = eth_af_xdp_tx; +@@ -2328,6 +2360,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) + int busy_budget = -1, ret; + int force_copy = 0; + int use_cni = 0; ++ char dp_path[PATH_MAX] = {'\0'}; + struct rte_eth_dev *eth_dev = NULL; + const char *name = rte_vdev_device_name(dev); - ret = HNS3VF_VECTOR0_EVENT_RST; - goto out; -@@ -584,9 +585,9 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) +@@ -2370,7 +2403,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) - val = 0; - ret = HNS3VF_VECTOR0_EVENT_OTHER; + if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx, + &xsk_queue_cnt, &shared_umem, prog_path, +- &busy_budget, &force_copy, &use_cni) < 0) { ++ &busy_budget, &force_copy, &use_cni, dp_path) < 0) { + AF_XDP_LOG(ERR, "Invalid kvargs value\n"); + return -EINVAL; + } +@@ -2384,7 +2417,19 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) + if (use_cni && strnlen(prog_path, PATH_MAX)) { + AF_XDP_LOG(ERR, "When '%s' parameter is used, '%s' parameter is not valid\n", + ETH_AF_XDP_USE_CNI_ARG, ETH_AF_XDP_PROG_ARG); +- return -EINVAL; ++ return -EINVAL; ++ } + - out: -- if (clearval) -- *clearval = val; -+ *clearval = val; - return ret; - } ++ if (use_cni && !strnlen(dp_path, PATH_MAX)) { ++ snprintf(dp_path, sizeof(dp_path), "%s/%s/%s", DP_BASE_PATH, if_name, DP_UDS_SOCK); ++ AF_XDP_LOG(INFO, "'%s' parameter not provided, setting value to '%s'\n", ++ ETH_AF_XDP_DP_PATH_ARG, dp_path); ++ } ++ ++ if (!use_cni && strnlen(dp_path, PATH_MAX)) { ++ AF_XDP_LOG(ERR, "'%s' parameter is set, but '%s' was not enabled\n", ++ ETH_AF_XDP_DP_PATH_ARG, ETH_AF_XDP_USE_CNI_ARG); ++ return -EINVAL; + } -@@ -612,7 +613,7 @@ hns3vf_interrupt_handler(void *param) - hns3_schedule_reset(hns); - break; - case HNS3VF_VECTOR0_EVENT_MBX: -- hns3_dev_handle_mbx_msg(hw); -+ hns3vf_handle_mbx_msg(hw); - break; - default: - break; -@@ -647,12 +648,13 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) - uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED; - uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; - struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); -+ struct hns3_vf_to_pf_msg req; + if (strlen(if_name) == 0) { +@@ -2410,7 +2455,7 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev) - __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, - __ATOMIC_RELEASE); + eth_dev = init_internals(dev, if_name, xsk_start_queue_idx, + xsk_queue_cnt, shared_umem, prog_path, +- busy_budget, force_copy, use_cni); ++ busy_budget, force_copy, use_cni, dp_path); + if (eth_dev == NULL) { + AF_XDP_LOG(ERR, "Failed to init internals\n"); + return -1; +@@ -2471,4 +2516,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp, + "xdp_prog= " + "busy_budget= " + "force_copy= " +- "use_cni= "); ++ "use_cni= " ++ "dp_path= "); +diff --git a/dpdk/drivers/net/ark/ark_ethdev_tx.c b/dpdk/drivers/net/ark/ark_ethdev_tx.c +index 4792754f19..8f1f90b1a4 100644 +--- a/dpdk/drivers/net/ark/ark_ethdev_tx.c ++++ b/dpdk/drivers/net/ark/ark_ethdev_tx.c +@@ -39,8 +39,8 @@ struct ark_tx_queue { + uint32_t queue_mask; -- (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, -- NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); -+ (void)hns3vf_mbx_send(hw, &req, false, NULL, 0); + /* 3 indexes to the paired data rings. */ +- int32_t prod_index; /* where to put the next one */ +- int32_t free_index; /* mbuf has been freed */ ++ uint32_t prod_index; /* where to put the next one */ ++ uint32_t free_index; /* mbuf has been freed */ - while (remain_ms > 0) { - rte_delay_ms(HNS3_POLL_RESPONE_MS); -@@ -663,7 +665,7 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) - * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE - * mailbox from PF driver to get this capability. - */ -- hns3_dev_handle_mbx_msg(hw); -+ hns3vf_handle_mbx_msg(hw); - if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != - HNS3_PF_PUSH_LSC_CAP_UNKNOWN) - break; -@@ -705,6 +707,7 @@ hns3vf_get_capability(struct hns3_hw *hw) - hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; - hw->rss_info.ipv6_sctp_offload_supported = false; - hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE; -+ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; - return 0; - } + /* The queue Id is used to identify the HW Q */ + uint16_t phys_qid; +@@ -49,7 +49,7 @@ struct ark_tx_queue { -@@ -722,6 +725,7 @@ hns3vf_get_capability(struct hns3_hw *hw) - hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; - hw->rss_info.ipv6_sctp_offload_supported = true; - hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; -+ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; + /* next cache line - fields written by device */ + RTE_MARKER cacheline1 __rte_cache_min_aligned; +- volatile int32_t cons_index; /* hw is done, can be freed */ ++ volatile uint32_t cons_index; /* hw is done, can be freed */ + } __rte_cache_aligned; - return 0; - } -@@ -747,12 +751,13 @@ hns3vf_check_tqp_info(struct hns3_hw *hw) - static int - hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw) - { -+ struct hns3_vf_to_pf_msg req; - uint8_t resp_msg; - int ret; + /* Forward declarations */ +@@ -108,7 +108,7 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + uint32_t user_meta[5]; -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, -- HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0, -- true, &resp_msg, sizeof(resp_msg)); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, -+ HNS3_MBX_GET_PORT_BASE_VLAN_STATE); -+ ret = hns3vf_mbx_send(hw, &req, true, &resp_msg, sizeof(resp_msg)); - if (ret) { - if (ret == -ETIME) { - /* -@@ -793,10 +798,12 @@ hns3vf_get_queue_info(struct hns3_hw *hw) - { - #define HNS3VF_TQPS_RSS_INFO_LEN 6 - uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN]; -+ struct hns3_vf_to_pf_msg req; - int ret; + int stat; +- int32_t prod_index_limit; ++ uint32_t prod_index_limit; + uint16_t nb; + uint8_t user_len = 0; + const uint32_t min_pkt_len = ARK_MIN_TX_PKTLEN; +@@ -123,8 +123,13 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + /* leave 4 elements mpu data */ + prod_index_limit = queue->queue_size + queue->free_index - 4; -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true, -- resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); -+ hns3vf_mbx_setup(&req, HNS3_MBX_GET_QINFO, 0); -+ ret = hns3vf_mbx_send(hw, &req, true, -+ resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); - if (ret) { - PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret); - return ret; -@@ -834,10 +841,11 @@ hns3vf_get_basic_info(struct hns3_hw *hw) - { - uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE]; - struct hns3_basic_info *basic_info; -+ struct hns3_vf_to_pf_msg req; - int ret; - -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0, -- true, resp_msg, sizeof(resp_msg)); -+ hns3vf_mbx_setup(&req, HNS3_MBX_GET_BASIC_INFO, 0); -+ ret = hns3vf_mbx_send(hw, &req, true, resp_msg, sizeof(resp_msg)); - if (ret) { - hns3_err(hw, "failed to get basic info from PF, ret = %d.", - ret); -@@ -857,10 +865,11 @@ static int - hns3vf_get_host_mac_addr(struct hns3_hw *hw) - { - uint8_t host_mac[RTE_ETHER_ADDR_LEN]; -+ struct hns3_vf_to_pf_msg req; - int ret; - -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0, -- true, host_mac, RTE_ETHER_ADDR_LEN); -+ hns3vf_mbx_setup(&req, HNS3_MBX_GET_MAC_ADDR, 0); -+ ret = hns3vf_mbx_send(hw, &req, true, host_mac, RTE_ETHER_ADDR_LEN); - if (ret) { - hns3_err(hw, "Failed to get mac addr from PF: %d", ret); - return ret; -@@ -909,6 +918,7 @@ static void - hns3vf_request_link_info(struct hns3_hw *hw) - { - struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); -+ struct hns3_vf_to_pf_msg req; - bool send_req; - int ret; - -@@ -920,8 +930,8 @@ hns3vf_request_link_info(struct hns3_hw *hw) - if (!send_req) - return; ++ /* Populate the buffer bringing prod_index up to or slightly beyond ++ * prod_index_limit. Prod_index will increment by 2 or more each ++ * iteration. Note: indexes are uint32_t, cast to (signed) int32_t ++ * to catch the slight overage case; e.g. (200 - 201) ++ */ + for (nb = 0; +- (nb < nb_pkts) && (prod_index_limit - queue->prod_index) > 0; ++ (nb < nb_pkts) && (int32_t)(prod_index_limit - queue->prod_index) > 0; + ++nb) { + mbuf = tx_pkts[nb]; -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, -- NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); -+ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); - if (ret) { - hns3_err(hw, "failed to fetch link status, ret = %d", ret); - return; -@@ -965,19 +975,18 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, - static int - hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) +@@ -194,13 +199,13 @@ eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf, + uint32_t *user_meta, uint8_t meta_cnt) { --#define HNS3VF_VLAN_MBX_MSG_LEN 5 -+ struct hns3_mbx_vlan_filter *vlan_filter; -+ struct hns3_vf_to_pf_msg req = {0}; - struct hns3_hw *hw = &hns->hw; -- uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN]; -- uint16_t proto = htons(RTE_ETHER_TYPE_VLAN); -- uint8_t is_kill = on ? 0 : 1; - -- msg_data[0] = is_kill; -- memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); -- memcpy(&msg_data[3], &proto, sizeof(proto)); -+ req.code = HNS3_MBX_SET_VLAN; -+ req.subcode = HNS3_MBX_VLAN_FILTER; -+ vlan_filter = (struct hns3_mbx_vlan_filter *)req.data; -+ vlan_filter->is_kill = on ? 0 : 1; -+ vlan_filter->proto = rte_cpu_to_le_16(RTE_ETHER_TYPE_VLAN); -+ vlan_filter->vlan_id = rte_cpu_to_le_16(vlan_id); + struct rte_mbuf *next; +- int32_t free_queue_space; ++ uint32_t free_queue_space; + uint8_t flags = ARK_DDM_SOP; -- return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER, -- msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL, -- 0); -+ return hns3vf_mbx_send(hw, &req, true, NULL, 0); - } + free_queue_space = queue->queue_mask - + (queue->prod_index - queue->free_index); + /* We need up to 4 mbufs for first header and 2 for subsequent ones */ +- if (unlikely(free_queue_space < (2 + (2 * mbuf->nb_segs)))) ++ if (unlikely(free_queue_space < (2U + (2U * mbuf->nb_segs)))) + return -1; - static int -@@ -1006,6 +1015,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) - static int - hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + while (mbuf != NULL) { +@@ -392,10 +397,11 @@ free_completed_tx(struct ark_tx_queue *queue) { -+ struct hns3_vf_to_pf_msg req; - uint8_t msg_data; - int ret; - -@@ -1013,9 +1023,10 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) - return 0; + struct rte_mbuf *mbuf; + union ark_tx_meta *meta; +- int32_t top_index; ++ uint32_t top_index; - msg_data = enable ? 1 : 0; -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, -- HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data, -- sizeof(msg_data), true, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, -+ HNS3_MBX_ENABLE_VLAN_FILTER); -+ memcpy(req.data, &msg_data, sizeof(msg_data)); -+ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); - if (ret) - hns3_err(hw, "%s vlan filter failed, ret = %d.", - enable ? "enable" : "disable", ret); -@@ -1026,12 +1037,15 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) - static int - hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) - { -+ struct hns3_vf_to_pf_msg req; - uint8_t msg_data; - int ret; + top_index = queue->cons_index; /* read once */ +- while ((top_index - queue->free_index) > 0) { ++ ++ while ((int32_t)(top_index - queue->free_index) > 0) { + meta = &queue->meta_q[queue->free_index & queue->queue_mask]; + if (likely((meta->flags & ARK_DDM_SOP) != 0)) { + mbuf = queue->bufs[queue->free_index & +diff --git a/dpdk/drivers/net/atlantic/atl_rxtx.c b/dpdk/drivers/net/atlantic/atl_rxtx.c +index cb6f8141a8..0f367faad5 100644 +--- a/dpdk/drivers/net/atlantic/atl_rxtx.c ++++ b/dpdk/drivers/net/atlantic/atl_rxtx.c +@@ -359,13 +359,13 @@ atl_rx_init(struct rte_eth_dev *eth_dev) + buff_size = RTE_ALIGN_FLOOR(buff_size, 1024); + if (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) { + PMD_INIT_LOG(WARNING, +- "Port %d queue %d: mem pool buff size is too big\n", ++ "Port %d queue %d: mem pool buff size is too big", + rxq->port_id, rxq->queue_id); + buff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX; + } + if (buff_size < 1024) { + PMD_INIT_LOG(ERR, +- "Port %d queue %d: mem pool buff size is too small\n", ++ "Port %d queue %d: mem pool buff size is too small", + rxq->port_id, rxq->queue_id); + return -EINVAL; + } +diff --git a/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.c b/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.c +index 84d11ab3a5..06d79115b9 100644 +--- a/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.c ++++ b/dpdk/drivers/net/atlantic/hw_atl/hw_atl_utils.c +@@ -76,7 +76,7 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) + self->fw_ver_actual) == 0) { + *fw_ops = &aq_fw_2x_ops; + } else { +- PMD_DRV_LOG(ERR, "Bad FW version detected: %x\n", ++ PMD_DRV_LOG(ERR, "Bad FW version detected: %x", + self->fw_ver_actual); + return -EOPNOTSUPP; + } +@@ -124,7 +124,7 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) + AQ_HW_SLEEP(10); + } + if (k == 1000) { +- PMD_DRV_LOG(ERR, "MAC kickstart failed\n"); ++ PMD_DRV_LOG(ERR, "MAC kickstart failed"); + return -EIO; + } - msg_data = enable ? 1 : 0; -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, -- &msg_data, sizeof(msg_data), false, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, -+ HNS3_MBX_VLAN_RX_OFF_CFG); -+ memcpy(req.data, &msg_data, sizeof(msg_data)); -+ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); - if (ret) - hns3_err(hw, "vf %s strip failed, ret = %d.", - enable ? "enable" : "disable", ret); -@@ -1175,11 +1189,13 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev) - static int - hns3vf_set_alive(struct hns3_hw *hw, bool alive) - { -+ struct hns3_vf_to_pf_msg req; - uint8_t msg_data; +@@ -152,7 +152,7 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) + AQ_HW_SLEEP(10); + } + if (k == 1000) { +- PMD_DRV_LOG(ERR, "FW kickstart failed\n"); ++ PMD_DRV_LOG(ERR, "FW kickstart failed"); + return -EIO; + } + /* Old FW requires fixed delay after init */ +@@ -209,7 +209,7 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) + aq_hw_write_reg(self, 0x534, 0xA0); - msg_data = alive ? 1 : 0; -- return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data, -- sizeof(msg_data), false, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_SET_ALIVE, 0); -+ memcpy(req.data, &msg_data, sizeof(msg_data)); -+ return hns3vf_mbx_send(hw, &req, false, NULL, 0); - } + if (rbl_status == 0xF1A7) { +- PMD_DRV_LOG(ERR, "No FW detected. Dynamic FW load not implemented\n"); ++ PMD_DRV_LOG(ERR, "No FW detected. Dynamic FW load not implemented"); + return -EOPNOTSUPP; + } - static void -@@ -1187,11 +1203,12 @@ hns3vf_keep_alive_handler(void *param) - { - struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; - struct hns3_adapter *hns = eth_dev->data->dev_private; -+ struct hns3_vf_to_pf_msg req; - struct hns3_hw *hw = &hns->hw; - int ret; +@@ -221,7 +221,7 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) + AQ_HW_SLEEP(10); + } + if (k == 1000) { +- PMD_DRV_LOG(ERR, "FW kickstart failed\n"); ++ PMD_DRV_LOG(ERR, "FW kickstart failed"); + return -EIO; + } + /* Old FW requires fixed delay after init */ +@@ -246,7 +246,7 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) + } -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, -- false, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_KEEP_ALIVE, 0); -+ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); - if (ret) - hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", - ret); -@@ -1330,9 +1347,11 @@ err_init_hardware: - static int - hns3vf_clear_vport_list(struct hns3_hw *hw) - { -- return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL, -- HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false, -- NULL, 0); -+ struct hns3_vf_to_pf_msg req; -+ -+ hns3vf_mbx_setup(&req, HNS3_MBX_HANDLE_VF_TBL, -+ HNS3_MBX_VPORT_LIST_CLEAR); -+ return hns3vf_mbx_send(hw, &req, false, NULL, 0); - } + if (k == 1000) { +- PMD_DRV_LOG(ERR, "Neither RBL nor FLB firmware started\n"); ++ PMD_DRV_LOG(ERR, "Neither RBL nor FLB firmware started"); + return -EOPNOTSUPP; + } - static int -@@ -1709,11 +1728,25 @@ is_vf_reset_done(struct hns3_hw *hw) - return true; +diff --git a/dpdk/drivers/net/axgbe/axgbe_common.h b/dpdk/drivers/net/axgbe/axgbe_common.h +index a5d11c5832..51532fb34a 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_common.h ++++ b/dpdk/drivers/net/axgbe/axgbe_common.h +@@ -407,8 +407,6 @@ + #define MAC_MDIOSCAR_PA_WIDTH 5 + #define MAC_MDIOSCAR_RA_INDEX 0 + #define MAC_MDIOSCAR_RA_WIDTH 16 +-#define MAC_MDIOSCAR_REG_INDEX 0 +-#define MAC_MDIOSCAR_REG_WIDTH 21 + #define MAC_MDIOSCCDR_BUSY_INDEX 22 + #define MAC_MDIOSCCDR_BUSY_WIDTH 1 + #define MAC_MDIOSCCDR_CMD_INDEX 16 +diff --git a/dpdk/drivers/net/axgbe/axgbe_dev.c b/dpdk/drivers/net/axgbe/axgbe_dev.c +index 6a7fddffca..5233633a53 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_dev.c ++++ b/dpdk/drivers/net/axgbe/axgbe_dev.c +@@ -63,15 +63,27 @@ static int mdio_complete(struct axgbe_port *pdata) + return 0; } -+static enum hns3_reset_level -+hns3vf_detect_reset_event(struct hns3_hw *hw) ++static unsigned int axgbe_create_mdio_sca(int port, int reg) +{ -+ enum hns3_reset_level reset = HNS3_NONE_RESET; -+ uint32_t cmdq_stat_reg; ++ unsigned int mdio_sca, da; + -+ cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); -+ if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) -+ reset = HNS3_VF_RESET; ++ da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; + -+ return reset; ++ mdio_sca = 0; ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); ++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); ++ ++ return mdio_sca; +} + - bool - hns3vf_is_reset_pending(struct hns3_adapter *hns) + static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr, + int reg, u16 val) { -+ enum hns3_reset_level last_req; - struct hns3_hw *hw = &hns->hw; -- enum hns3_reset_level reset; -+ enum hns3_reset_level new_req; - - /* - * According to the protocol of PCIe, FLR to a PF device resets the PF -@@ -1736,13 +1769,18 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return false; + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; -- hns3vf_check_event_cause(hns, NULL); -- reset = hns3vf_get_reset_level(hw, &hw->reset.pending); -- if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && -- hw->reset.level < reset) { -- hns3_warn(hw, "High level reset %d is pending", reset); -+ new_req = hns3vf_detect_reset_event(hw); -+ if (new_req == HNS3_NONE_RESET) -+ return false; -+ -+ last_req = hns3vf_get_reset_level(hw, &hw->reset.pending); -+ if (last_req == HNS3_NONE_RESET || last_req < new_req) { -+ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); -+ hns3_schedule_delayed_reset(hns); -+ hns3_warn(hw, "High level reset detected, delay do reset"); - return true; - } -+ - return false; - } +- mdio_sca = 0; +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); ++ mdio_sca = axgbe_create_mdio_sca(addr, reg); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); -@@ -1801,12 +1839,13 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) - static int - hns3vf_prepare_reset(struct hns3_adapter *hns) - { -+ struct hns3_vf_to_pf_msg req; - struct hns3_hw *hw = &hns->hw; - int ret; + mdio_sccd = 0; +@@ -97,9 +109,7 @@ static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr, + unsigned int mdio_sca, mdio_sccd; + uint64_t timeout; - if (hw->reset.level == HNS3_VF_FUNC_RESET) { -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, -- 0, true, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_RESET, 0); -+ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); - if (ret) - return ret; - } -diff --git a/dpdk/drivers/net/hns3/hns3_mbx.c b/dpdk/drivers/net/hns3/hns3_mbx.c -index f1743c195e..9cdbc1668a 100644 ---- a/dpdk/drivers/net/hns3/hns3_mbx.c -+++ b/dpdk/drivers/net/hns3/hns3_mbx.c -@@ -11,8 +11,6 @@ - #include "hns3_intr.h" - #include "hns3_rxtx.h" +- mdio_sca = 0; +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); +- AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); ++ mdio_sca = axgbe_create_mdio_sca(addr, reg); + AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); --#define HNS3_CMD_CODE_OFFSET 2 -- - static const struct errno_respcode_map err_code_map[] = { - {0, 0}, - {1, -EPERM}, -@@ -26,6 +24,14 @@ static const struct errno_respcode_map err_code_map[] = { - {95, -EOPNOTSUPP}, - }; + mdio_sccd = 0; +@@ -259,20 +269,28 @@ static int axgbe_set_speed(struct axgbe_port *pdata, int speed) + return 0; + } -+void -+hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, uint8_t code, uint8_t subcode) ++static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata) +{ -+ memset(req, 0, sizeof(struct hns3_vf_to_pf_msg)); -+ req->code = code; -+ req->subcode = subcode; ++ unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; ++ ++ /* From MAC ver 30H the TFCR is per priority, instead of per queue */ ++ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) ++ return max_q_count; ++ else ++ return (RTE_MIN(pdata->tx_q_count, max_q_count)); +} + - static int - hns3_resp_to_errno(uint16_t resp_code) + static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) { -@@ -72,7 +78,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, - return -EIO; - } +- unsigned int max_q_count, q_count; + unsigned int reg, reg_val; +- unsigned int i; ++ unsigned int i, q_count; -- hns3_dev_handle_mbx_msg(hw); -+ hns3vf_handle_mbx_msg(hw); - rte_delay_us(HNS3_WAIT_RESP_US); + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) + AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); - if (hw->mbx_resp.received_match_resp) -@@ -120,44 +126,24 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) - } + /* Clear MAC flow control */ +- max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; +- q_count = RTE_MIN(pdata->tx_q_count, +- max_q_count); ++ q_count = axgbe_get_fc_queue_count(pdata); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); +@@ -287,9 +305,8 @@ static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) - int --hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, -- const uint8_t *msg_data, uint8_t msg_len, bool need_resp, -- uint8_t *resp_data, uint16_t resp_len) -+hns3vf_mbx_send(struct hns3_hw *hw, -+ struct hns3_vf_to_pf_msg *req, bool need_resp, -+ uint8_t *resp_data, uint16_t resp_len) + static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) { -- struct hns3_mbx_vf_to_pf_cmd *req; -+ struct hns3_mbx_vf_to_pf_cmd *cmd; - struct hns3_cmd_desc desc; -- bool is_ring_vector_msg; -- int offset; - int ret; +- unsigned int max_q_count, q_count; + unsigned int reg, reg_val; +- unsigned int i; ++ unsigned int i, q_count; -- req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; -- -- /* first two bytes are reserved for code & subcode */ -- if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) { -- hns3_err(hw, -- "VF send mbx msg fail, msg len %u exceeds max payload len %d", -- msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET); -- return -EINVAL; -- } -- - hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); -- req->msg[0] = code; -- is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) || -- (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) || -- (code == HNS3_MBX_GET_RING_VECTOR_MAP); -- if (!is_ring_vector_msg) -- req->msg[1] = subcode; -- if (msg_data) { -- offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET; -- memcpy(&req->msg[offset], msg_data, msg_len); -- } -+ cmd = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; -+ cmd->msg = *req; + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { +@@ -306,9 +323,7 @@ static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) + } - /* synchronous send */ - if (need_resp) { -- req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; -+ cmd->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; - rte_spinlock_lock(&hw->mbx_resp.lock); -- hns3_mbx_prepare_resp(hw, code, subcode); -- req->match_id = hw->mbx_resp.match_id; -+ hns3_mbx_prepare_resp(hw, req->code, req->subcode); -+ cmd->match_id = hw->mbx_resp.match_id; - ret = hns3_cmd_send(hw, &desc, 1); - if (ret) { - rte_spinlock_unlock(&hw->mbx_resp.lock); -@@ -166,7 +152,8 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, - return ret; - } + /* Set MAC flow control */ +- max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; +- q_count = RTE_MIN(pdata->tx_q_count, +- max_q_count); ++ q_count = axgbe_get_fc_queue_count(pdata); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + reg_val = AXGMAC_IOREAD(pdata, reg); +@@ -637,23 +652,21 @@ static void axgbe_config_dma_cache(struct axgbe_port *pdata) + unsigned int arcache, awcache, arwcache; -- ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len); -+ ret = hns3_get_mbx_resp(hw, req->code, req->subcode, -+ resp_data, resp_len); - rte_spinlock_unlock(&hw->mbx_resp.lock); - } else { - /* asynchronous send */ -@@ -193,17 +180,17 @@ static void - hns3vf_handle_link_change_event(struct hns3_hw *hw, - struct hns3_mbx_pf_to_vf_cmd *req) - { -+ struct hns3_mbx_link_status *link_info = -+ (struct hns3_mbx_link_status *)req->msg.msg_data; - uint8_t link_status, link_duplex; -- uint16_t *msg_q = req->msg; - uint8_t support_push_lsc; - uint32_t link_speed; + arcache = 0; +- AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf); ++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); -- memcpy(&link_speed, &msg_q[2], sizeof(link_speed)); -- link_status = rte_le_to_cpu_16(msg_q[1]); -- link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); -- hns3vf_update_link_status(hw, link_status, link_speed, -- link_duplex); -- support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u; -+ link_status = (uint8_t)rte_le_to_cpu_16(link_info->link_status); -+ link_speed = rte_le_to_cpu_32(link_info->speed); -+ link_duplex = (uint8_t)rte_le_to_cpu_16(link_info->duplex); -+ hns3vf_update_link_status(hw, link_status, link_speed, link_duplex); -+ support_push_lsc = (link_info->flag) & 1u; - hns3vf_update_push_lsc_cap(hw, support_push_lsc); - } + awcache = 0; +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3); +- AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf); ++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); -@@ -212,7 +199,6 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, - struct hns3_mbx_pf_to_vf_cmd *req) - { - enum hns3_reset_level reset_level; -- uint16_t *msg_q = req->msg; + arwcache = 0; +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1); +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3); +- AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3); ++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf); ++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf); + AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache); + } - /* - * PF has asserted reset hence VF should go in pending -@@ -220,7 +206,7 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, - * has been completely reset. After this stack should - * eventually be re-initialized. - */ -- reset_level = rte_le_to_cpu_16(msg_q[1]); -+ reset_level = rte_le_to_cpu_16(req->msg.reset_level); - hns3_atomic_set_bit(reset_level, &hw->reset.pending); +diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.c b/dpdk/drivers/net/axgbe/axgbe_ethdev.c +index f174d46143..da45ebf45f 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.c ++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.c +@@ -207,6 +207,7 @@ static struct axgbe_version_data axgbe_v2a = { + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, ++ .enable_rrc = 1, + }; - hns3_warn(hw, "PF inform reset level %d", reset_level); -@@ -242,8 +228,9 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) - * to match the request. - */ - if (req->match_id == resp->match_id) { -- resp->resp_status = hns3_resp_to_errno(req->msg[3]); -- memcpy(resp->additional_info, &req->msg[4], -+ resp->resp_status = -+ hns3_resp_to_errno(req->msg.resp_status); -+ memcpy(resp->additional_info, &req->msg.resp_data, - HNS3_MBX_MAX_RESP_DATA_SIZE); - rte_io_wmb(); - resp->received_match_resp = true; -@@ -256,7 +243,8 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) - * support copy request's match_id to its response. So VF follows the - * original scheme to process. - */ -- msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2]; -+ msg_data = (uint32_t)req->msg.vf_mbx_msg_code << -+ HNS3_MBX_RESP_CODE_OFFSET | req->msg.vf_mbx_msg_subcode; - if (resp->req_msg_data != msg_data) { - hns3_warn(hw, - "received response tag (%u) is mismatched with requested tag (%u)", -@@ -264,8 +252,8 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) - return; + static struct axgbe_version_data axgbe_v2b = { +@@ -219,6 +220,7 @@ static struct axgbe_version_data axgbe_v2b = { + .ecc_support = 1, + .i2c_support = 1, + .an_cdr_workaround = 1, ++ .enable_rrc = 1, + }; + + static const struct rte_eth_desc_lim rx_desc_lim = { +@@ -1350,7 +1352,7 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, + tc_num = pdata->pfc_map[pfc_conf->priority]; + + if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) { +- PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n", ++ PMD_INIT_LOG(ERR, "Max supported traffic class: %d", + pdata->hw_feat.tc_cnt); + return -EINVAL; } +@@ -2267,6 +2269,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) -- resp->resp_status = hns3_resp_to_errno(req->msg[3]); -- memcpy(resp->additional_info, &req->msg[4], -+ resp->resp_status = hns3_resp_to_errno(req->msg.resp_status); -+ memcpy(resp->additional_info, &req->msg.resp_data, - HNS3_MBX_MAX_RESP_DATA_SIZE); - rte_io_wmb(); - resp->received_match_resp = true; -@@ -296,11 +284,8 @@ static void - hns3pf_handle_link_change_event(struct hns3_hw *hw, - struct hns3_mbx_vf_to_pf_cmd *req) + /* Yellow Carp devices do not need cdr workaround */ + pdata->vdata->an_cdr_workaround = 0; ++ ++ /* Yellow Carp devices do not need rrc */ ++ pdata->vdata->enable_rrc = 0; + } else { + unknown_cpu = 1; + } +@@ -2404,12 +2409,14 @@ static int + axgbe_dev_close(struct rte_eth_dev *eth_dev) { --#define LINK_STATUS_OFFSET 1 --#define LINK_FAIL_CODE_OFFSET 2 -- -- if (!req->msg[LINK_STATUS_OFFSET]) -- hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]); -+ if (!req->msg.link_status) -+ hns3_link_fail_parse(hw, req->msg.link_fail_code); + struct rte_pci_device *pci_dev; ++ struct axgbe_port *pdata; - hns3_update_linkstatus_and_event(hw, true); - } -@@ -309,8 +294,7 @@ static void - hns3_update_port_base_vlan_info(struct hns3_hw *hw, - struct hns3_mbx_pf_to_vf_cmd *req) - { --#define PVID_STATE_OFFSET 1 -- uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ? -+ uint16_t new_pvid_state = req->msg.pvid_state ? - HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; - /* - * Currently, hardware doesn't support more than two layers VLAN offload -@@ -359,7 +343,7 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) - while (next_to_use != tail) { - desc = &crq->desc[next_to_use]; - req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; -- opcode = req->msg[0] & 0xff; -+ opcode = req->msg.code & 0xff; + PMD_INIT_FUNC_TRACE(); - flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag); - if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B)) -@@ -388,9 +372,57 @@ scan_next: - } + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; - void --hns3_dev_handle_mbx_msg(struct hns3_hw *hw) -+hns3pf_handle_mbx_msg(struct hns3_hw *hw) -+{ -+ struct hns3_cmq_ring *crq = &hw->cmq.crq; -+ struct hns3_mbx_vf_to_pf_cmd *req; -+ struct hns3_cmd_desc *desc; -+ uint16_t flag; -+ -+ rte_spinlock_lock(&hw->cmq.crq.lock); -+ -+ while (!hns3_cmd_crq_empty(hw)) { -+ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { -+ rte_spinlock_unlock(&hw->cmq.crq.lock); -+ return; -+ } -+ desc = &crq->desc[crq->next_to_use]; -+ req = (struct hns3_mbx_vf_to_pf_cmd *)desc->data; -+ -+ flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); -+ if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { -+ hns3_warn(hw, -+ "dropped invalid mailbox message, code = %u", -+ req->msg.code); -+ -+ /* dropping/not processing this invalid message */ -+ crq->desc[crq->next_to_use].flag = 0; -+ hns3_mbx_ring_ptr_move_crq(crq); -+ continue; -+ } -+ -+ switch (req->msg.code) { -+ case HNS3_MBX_PUSH_LINK_STATUS: -+ hns3pf_handle_link_change_event(hw, req); -+ break; -+ default: -+ hns3_err(hw, "received unsupported(%u) mbx msg", -+ req->msg.code); -+ break; -+ } -+ crq->desc[crq->next_to_use].flag = 0; -+ hns3_mbx_ring_ptr_move_crq(crq); -+ } -+ -+ /* Write back CMDQ_RQ header pointer, IMP need this pointer */ -+ hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); -+ -+ rte_spinlock_unlock(&hw->cmq.crq.lock); -+} -+ -+void -+hns3vf_handle_mbx_msg(struct hns3_hw *hw) - { -- struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); - struct hns3_cmq_ring *crq = &hw->cmq.crq; - struct hns3_mbx_pf_to_vf_cmd *req; - struct hns3_cmd_desc *desc; -@@ -401,7 +433,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) - rte_spinlock_lock(&hw->cmq.crq.lock); ++ pdata = eth_dev->data->dev_private; + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); + axgbe_dev_clear_queues(eth_dev); - handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY || -- !rte_thread_is_intr()) && hns->is_vf; -+ !rte_thread_is_intr()); - if (handle_out) { - /* - * Currently, any threads in the primary and secondary processes -@@ -432,7 +464,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) +@@ -2419,6 +2426,9 @@ axgbe_dev_close(struct rte_eth_dev *eth_dev) + axgbe_dev_interrupt_handler, + (void *)eth_dev); - desc = &crq->desc[crq->next_to_use]; - req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; -- opcode = req->msg[0] & 0xff; -+ opcode = req->msg.code & 0xff; ++ /* Disable all interrupts in the hardware */ ++ XP_IOWRITE(pdata, XP_INT_EN, 0x0); ++ + return 0; + } - flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); - if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { -@@ -446,8 +478,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) - continue; - } +diff --git a/dpdk/drivers/net/axgbe/axgbe_ethdev.h b/dpdk/drivers/net/axgbe/axgbe_ethdev.h +index 7f19321d88..b4bd56e239 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_ethdev.h ++++ b/dpdk/drivers/net/axgbe/axgbe_ethdev.h +@@ -111,6 +111,7 @@ + /* Auto-negotiation */ + #define AXGBE_AN_MS_TIMEOUT 500 + #define AXGBE_LINK_TIMEOUT 5 ++#define AXGBE_KR_TRAINING_WAIT_ITER 50 -- handle_out = hns->is_vf && desc->opcode == 0; -- if (handle_out) { -+ if (desc->opcode == 0) { - /* Message already processed by other thread */ - crq->desc[crq->next_to_use].flag = 0; - hns3_mbx_ring_ptr_move_crq(crq); -@@ -464,16 +495,6 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) - case HNS3_MBX_ASSERTING_RESET: - hns3_handle_asserting_reset(hw, req); - break; -- case HNS3_MBX_PUSH_LINK_STATUS: -- /* -- * This message is reported by the firmware and is -- * reported in 'struct hns3_mbx_vf_to_pf_cmd' format. -- * Therefore, we should cast the req variable to -- * 'struct hns3_mbx_vf_to_pf_cmd' and then process it. -- */ -- hns3pf_handle_link_change_event(hw, -- (struct hns3_mbx_vf_to_pf_cmd *)req); -- break; - case HNS3_MBX_PUSH_VLAN_INFO: - /* - * When the PVID configuration status of VF device is -@@ -488,7 +509,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) - * hns3 PF kernel driver, VF driver will receive this - * mailbox message from PF driver. - */ -- hns3_handle_promisc_info(hw, req->msg[1]); -+ hns3_handle_promisc_info(hw, req->msg.promisc_en); - break; - default: - hns3_err(hw, "received unsupported(%u) mbx msg", -diff --git a/dpdk/drivers/net/hns3/hns3_mbx.h b/dpdk/drivers/net/hns3/hns3_mbx.h -index 4a328802b9..2b6cb8f513 100644 ---- a/dpdk/drivers/net/hns3/hns3_mbx.h -+++ b/dpdk/drivers/net/hns3/hns3_mbx.h -@@ -89,7 +89,6 @@ enum hns3_mbx_link_fail_subcode { - HNS3_MBX_LF_XSFP_ABSENT, + #define AXGBE_SGMII_AN_LINK_STATUS BIT(1) + #define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +@@ -463,6 +464,7 @@ struct axgbe_version_data { + unsigned int ecc_support; + unsigned int i2c_support; + unsigned int an_cdr_workaround; ++ unsigned int enable_rrc; }; --#define HNS3_MBX_MAX_MSG_SIZE 16 - #define HNS3_MBX_MAX_RESP_DATA_SIZE 8 - #define HNS3_MBX_DEF_TIME_LIMIT_MS 500 + struct axgbe_mmc_stats { +@@ -653,6 +655,7 @@ struct axgbe_port { + unsigned int parallel_detect; + unsigned int fec_ability; + unsigned long an_start; ++ unsigned long kr_start_time; + enum axgbe_an_mode an_mode; -@@ -107,6 +106,69 @@ struct hns3_mbx_resp_status { - uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE]; - }; + /* I2C support */ +diff --git a/dpdk/drivers/net/axgbe/axgbe_mdio.c b/dpdk/drivers/net/axgbe/axgbe_mdio.c +index 913ceada0d..d95a52659e 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_mdio.c ++++ b/dpdk/drivers/net/axgbe/axgbe_mdio.c +@@ -200,13 +200,14 @@ static void axgbe_switch_mode(struct axgbe_port *pdata) + axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); + } -+struct hns3_ring_chain_param { -+ uint8_t ring_type; -+ uint8_t tqp_index; -+ uint8_t int_gl_index; -+}; -+ -+struct hns3_mbx_vlan_filter { -+ uint8_t is_kill; -+ uint16_t vlan_id; -+ uint16_t proto; -+} __rte_packed; -+ -+struct hns3_mbx_link_status { -+ uint16_t link_status; -+ uint32_t speed; -+ uint16_t duplex; -+ uint8_t flag; -+} __rte_packed; -+ -+#define HNS3_MBX_MSG_MAX_DATA_SIZE 14 -+#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 -+struct hns3_vf_to_pf_msg { -+ uint8_t code; -+ union { -+ struct { -+ uint8_t subcode; -+ uint8_t data[HNS3_MBX_MSG_MAX_DATA_SIZE]; -+ }; -+ struct { -+ uint8_t en_bc; -+ uint8_t en_uc; -+ uint8_t en_mc; -+ uint8_t en_limit_promisc; -+ }; -+ struct { -+ uint8_t vector_id; -+ uint8_t ring_num; -+ struct hns3_ring_chain_param -+ ring_param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; -+ }; -+ struct { -+ uint8_t link_status; -+ uint8_t link_fail_code; -+ }; -+ }; -+}; -+ -+struct hns3_pf_to_vf_msg { -+ uint16_t code; -+ union { -+ struct { -+ uint16_t vf_mbx_msg_code; -+ uint16_t vf_mbx_msg_subcode; -+ uint16_t resp_status; -+ uint8_t resp_data[HNS3_MBX_MAX_RESP_DATA_SIZE]; -+ }; -+ uint16_t promisc_en; -+ uint16_t reset_level; -+ uint16_t pvid_state; -+ uint8_t msg_data[HNS3_MBX_MSG_MAX_DATA_SIZE]; -+ }; -+}; -+ - struct errno_respcode_map { - uint16_t resp_code; - int err_no; -@@ -122,7 +184,7 @@ struct hns3_mbx_vf_to_pf_cmd { - uint8_t msg_len; - uint8_t rsv2; - uint16_t match_id; -- uint8_t msg[HNS3_MBX_MAX_MSG_SIZE]; -+ struct hns3_vf_to_pf_msg msg; - }; +-static void axgbe_set_mode(struct axgbe_port *pdata, ++static bool axgbe_set_mode(struct axgbe_port *pdata, + enum axgbe_mode mode) + { + if (mode == axgbe_cur_mode(pdata)) +- return; ++ return false; - struct hns3_mbx_pf_to_vf_cmd { -@@ -131,20 +193,7 @@ struct hns3_mbx_pf_to_vf_cmd { - uint8_t msg_len; - uint8_t rsv1; - uint16_t match_id; -- uint16_t msg[8]; --}; -- --struct hns3_ring_chain_param { -- uint8_t ring_type; -- uint8_t tqp_index; -- uint8_t int_gl_index; --}; -- --#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 --struct hns3_vf_bind_vector_msg { -- uint8_t vector_id; -- uint8_t ring_num; -- struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; -+ struct hns3_pf_to_vf_msg msg; - }; + axgbe_change_mode(pdata, mode); ++ return true; + } - struct hns3_pf_rst_done_cmd { -@@ -158,8 +207,11 @@ struct hns3_pf_rst_done_cmd { - ((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num) + static bool axgbe_use_mode(struct axgbe_port *pdata, +@@ -357,6 +358,7 @@ static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata, + reg |= AXGBE_KR_TRAINING_ENABLE; + reg |= AXGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); ++ pdata->kr_start_time = rte_get_timer_cycles(); - struct hns3_hw; --void hns3_dev_handle_mbx_msg(struct hns3_hw *hw); --int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, -- const uint8_t *msg_data, uint8_t msg_len, bool need_resp, -- uint8_t *resp_data, uint16_t resp_len); -+void hns3pf_handle_mbx_msg(struct hns3_hw *hw); -+void hns3vf_handle_mbx_msg(struct hns3_hw *hw); -+void hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, -+ uint8_t code, uint8_t subcode); -+int hns3vf_mbx_send(struct hns3_hw *hw, -+ struct hns3_vf_to_pf_msg *req_msg, bool need_resp, -+ uint8_t *resp_data, uint16_t resp_len); - #endif /* HNS3_MBX_H */ -diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c -index 15feb26043..3eae4caf52 100644 ---- a/dpdk/drivers/net/hns3/hns3_rss.c -+++ b/dpdk/drivers/net/hns3/hns3_rss.c -@@ -153,8 +153,7 @@ static const struct { - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | -- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | -- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER), -+ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D), - HNS3_RSS_TUPLE_IPV4_SCTP_M }, + PMD_DRV_LOG(DEBUG, "KR training initiated\n"); + if (pdata->phy_if.phy_impl.kr_training_post) +@@ -487,6 +489,7 @@ static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata) - /* IPV6-FRAG */ -@@ -274,8 +273,7 @@ static const struct { - BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | -- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) | -- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER), -+ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S), - HNS3_RSS_TUPLE_IPV6_SCTP_M }, - }; + axgbe_an_disable(pdata); + axgbe_switch_mode(pdata); ++ pdata->an_result = AXGBE_AN_READY; + axgbe_an_restart(pdata); -diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h -index 9d182a8025..0755760b45 100644 ---- a/dpdk/drivers/net/hns3/hns3_rss.h -+++ b/dpdk/drivers/net/hns3/hns3_rss.h -@@ -49,7 +49,6 @@ enum hns3_tuple_field { - HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S, - HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D, - HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S, -- HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER, + return AXGBE_AN_INCOMPAT_LINK; +@@ -967,11 +970,34 @@ static void axgbe_check_link_timeout(struct axgbe_port *pdata) + { + unsigned long link_timeout; + unsigned long ticks; ++ unsigned long kr_time; ++ int wait; - /* IPV4 ENABLE FIELD */ - HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24, -@@ -74,7 +73,6 @@ enum hns3_tuple_field { - HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S, - HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, - HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, -- HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, + link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT * + 2 * rte_get_timer_hz()); + ticks = rte_get_timer_cycles(); + if (time_after(ticks, link_timeout)) { ++ if ((axgbe_cur_mode(pdata) == AXGBE_MODE_KR) && ++ pdata->phy.autoneg == AUTONEG_ENABLE) { ++ /* AN restart should not happen while KR training is in progress. ++ * The while loop ensures no AN restart during KR training, ++ * waits up to 500ms and AN restart is triggered only if KR ++ * training is failed. ++ */ ++ wait = AXGBE_KR_TRAINING_WAIT_ITER; ++ while (wait--) { ++ kr_time = pdata->kr_start_time + ++ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT); ++ ticks = rte_get_timer_cycles(); ++ if (time_after(ticks, kr_time)) ++ break; ++ /* AN restart is not required, if AN result is COMPLETE */ ++ if (pdata->an_result == AXGBE_AN_COMPLETE) ++ return; ++ rte_delay_us(10500); ++ } ++ } ++ + PMD_DRV_LOG(NOTICE, "AN link timeout\n"); + axgbe_phy_config_aneg(pdata); + } +@@ -982,7 +1008,7 @@ static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata) + return pdata->phy_if.phy_impl.an_outcome(pdata); + } - /* IPV6 ENABLE FIELD */ - HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56, -@@ -96,12 +94,12 @@ enum hns3_tuple_field { +-static void axgbe_phy_status_result(struct axgbe_port *pdata) ++static bool axgbe_phy_status_result(struct axgbe_port *pdata) + { + enum axgbe_mode mode; - #define HNS3_RSS_TUPLE_IPV4_TCP_M GENMASK(3, 0) - #define HNS3_RSS_TUPLE_IPV4_UDP_M GENMASK(11, 8) --#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(20, 16) -+#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(19, 16) - #define HNS3_RSS_TUPLE_IPV4_NONF_M GENMASK(25, 24) - #define HNS3_RSS_TUPLE_IPV4_FLAG_M GENMASK(27, 26) - #define HNS3_RSS_TUPLE_IPV6_TCP_M GENMASK(35, 32) - #define HNS3_RSS_TUPLE_IPV6_UDP_M GENMASK(43, 40) --#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(52, 48) -+#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(51, 48) - #define HNS3_RSS_TUPLE_IPV6_NONF_M GENMASK(57, 56) - #define HNS3_RSS_TUPLE_IPV6_FLAG_M GENMASK(59, 58) +@@ -1016,7 +1042,10 @@ static void axgbe_phy_status_result(struct axgbe_port *pdata) -diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c -index 09b7e90c70..8d0db134d2 100644 ---- a/dpdk/drivers/net/hns3/hns3_rxtx.c -+++ b/dpdk/drivers/net/hns3/hns3_rxtx.c -@@ -86,9 +86,14 @@ hns3_rx_queue_release(void *queue) - struct hns3_rx_queue *rxq = queue; - if (rxq) { - hns3_rx_queue_release_mbufs(rxq); -- if (rxq->mz) -+ if (rxq->mz) { - rte_memzone_free(rxq->mz); -- rte_free(rxq->sw_ring); -+ rxq->mz = NULL; -+ } -+ if (rxq->sw_ring) { -+ rte_free(rxq->sw_ring); -+ rxq->sw_ring = NULL; -+ } - rte_free(rxq); - } + pdata->phy.duplex = DUPLEX_FULL; + +- axgbe_set_mode(pdata, mode); ++ if (axgbe_set_mode(pdata, mode)) ++ return true; ++ else ++ return false; } -@@ -99,10 +104,18 @@ hns3_tx_queue_release(void *queue) - struct hns3_tx_queue *txq = queue; - if (txq) { - hns3_tx_queue_release_mbufs(txq); -- if (txq->mz) -+ if (txq->mz) { - rte_memzone_free(txq->mz); -- rte_free(txq->sw_ring); -- rte_free(txq->free); -+ txq->mz = NULL; -+ } -+ if (txq->sw_ring) { -+ rte_free(txq->sw_ring); -+ txq->sw_ring = NULL; -+ } -+ if (txq->free) { -+ rte_free(txq->free); -+ txq->free = NULL; -+ } - rte_free(txq); + + static int autoneg_time_out(unsigned long autoneg_start_time) +@@ -1051,7 +1080,7 @@ static void axgbe_phy_status(struct axgbe_port *pdata) + &an_restart); + if (an_restart) { + axgbe_phy_config_aneg(pdata); +- return; ++ goto adjust_link; } - } -@@ -260,12 +273,27 @@ hns3_free_all_queues(struct rte_eth_dev *dev) - hns3_free_tx_queues(dev); - } -+static int -+hns3_check_rx_dma_addr(struct hns3_hw *hw, uint64_t dma_addr) -+{ -+ uint64_t rem; + if (pdata->phy.link) { +@@ -1083,7 +1112,10 @@ static void axgbe_phy_status(struct axgbe_port *pdata) + return; + } + } +- axgbe_phy_status_result(pdata); + -+ rem = dma_addr & (hw->rx_dma_addr_align - 1); -+ if (rem > 0) { -+ hns3_err(hw, "The IO address of the beginning of the mbuf data " -+ "must be %u-byte aligned", hw->rx_dma_addr_align); -+ return -EINVAL; -+ } -+ return 0; -+} ++ if (axgbe_phy_status_result(pdata)) ++ return; + - static int - hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) + if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) + rte_bit_relaxed_clear32(AXGBE_LINK_INIT, + &pdata->dev_state); +diff --git a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c +index d97fbbfddd..12908d4e6f 100644 +--- a/dpdk/drivers/net/axgbe/axgbe_phy_impl.c ++++ b/dpdk/drivers/net/axgbe/axgbe_phy_impl.c +@@ -69,6 +69,7 @@ enum axgbe_sfp_cable { + AXGBE_SFP_CABLE_UNKNOWN = 0, + AXGBE_SFP_CABLE_ACTIVE, + AXGBE_SFP_CABLE_PASSIVE, ++ AXGBE_SFP_CABLE_FIBER, + }; + + enum axgbe_sfp_base { +@@ -116,9 +117,7 @@ enum axgbe_sfp_speed { + + #define AXGBE_SFP_BASE_BR 12 + #define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a +-#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d + #define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64 +-#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68 + + #define AXGBE_SFP_BASE_CU_CABLE_LEN 18 + +@@ -535,25 +534,22 @@ static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata) + static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom, + enum axgbe_sfp_speed sfp_speed) { - struct rte_mbuf *mbuf; - uint64_t dma_addr; - uint16_t i; -+ int ret; +- u8 *sfp_base, min, max; ++ u8 *sfp_base, min; - for (i = 0; i < rxq->nb_rx_desc; i++) { - mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); -@@ -286,6 +314,12 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) - dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); - rxq->rx_ring[i].addr = dma_addr; - rxq->rx_ring[i].rx.bd_base_info = 0; + sfp_base = sfp_eeprom->base; + + switch (sfp_speed) { + case AXGBE_SFP_SPEED_1000: + min = AXGBE_SFP_BASE_BR_1GBE_MIN; +- max = AXGBE_SFP_BASE_BR_1GBE_MAX; + break; + case AXGBE_SFP_SPEED_10000: + min = AXGBE_SFP_BASE_BR_10GBE_MIN; +- max = AXGBE_SFP_BASE_BR_10GBE_MAX; + break; + default: + return false; + } + +- return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) && +- (sfp_base[AXGBE_SFP_BASE_BR] <= max)); ++ return sfp_base[AXGBE_SFP_BASE_BR] >= min; + } + + static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata) +@@ -578,6 +574,9 @@ static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata) + AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR))) + return false; + ++ /* Reset PHY - wait for self-clearing reset bit to clear */ ++ pdata->phy_if.phy_impl.reset(pdata); + -+ ret = hns3_check_rx_dma_addr(hw, dma_addr); -+ if (ret != 0) { -+ hns3_rx_queue_release_mbufs(rxq); -+ return ret; -+ } + if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN], + AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) { + phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX; +@@ -613,16 +612,21 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) + + axgbe_phy_sfp_parse_quirks(pdata); + +- /* Assume ACTIVE cable unless told it is PASSIVE */ ++ /* Assume FIBER cable unless told otherwise */ + if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE; + phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN]; +- } else { ++ } else if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_ACTIVE) { + phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE; ++ } else { ++ phy_data->sfp_cable = AXGBE_SFP_CABLE_FIBER; } - return 0; -@@ -686,13 +720,12 @@ tqp_reset_fail: - static int - hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) - { -- uint8_t msg_data[2]; -+ struct hns3_vf_to_pf_msg req; - int ret; + /* Determine the type of SFP */ +- if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) ++ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_FIBER && ++ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) ++ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; ++ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR; + else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR) + phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR; +@@ -639,9 +643,6 @@ static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX; + else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T) + phy_data->sfp_base = AXGBE_SFP_BASE_1000_T; +- else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) && +- axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000)) +- phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR; -- memcpy(msg_data, &queue_id, sizeof(uint16_t)); -- -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, -- sizeof(msg_data), true, NULL, 0); -+ hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); -+ memcpy(req.data, &queue_id, sizeof(uint16_t)); -+ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); - if (ret) - hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", - queue_id, ret); -@@ -769,15 +802,14 @@ static int - hns3vf_reset_all_tqps(struct hns3_hw *hw) - { - #define HNS3VF_RESET_ALL_TQP_DONE 1U -+ struct hns3_vf_to_pf_msg req; - uint8_t reset_status; -- uint8_t msg_data[2]; - int ret; - uint16_t i; + switch (phy_data->sfp_base) { + case AXGBE_SFP_BASE_1000_T: +@@ -1225,6 +1226,10 @@ static void axgbe_phy_rx_reset(struct axgbe_port *pdata) -- memset(msg_data, 0, sizeof(msg_data)); -- ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, -- sizeof(msg_data), true, &reset_status, -- sizeof(reset_status)); -+ hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); -+ ret = hns3vf_mbx_send(hw, &req, true, -+ &reset_status, sizeof(reset_status)); - if (ret) { - hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret); - return ret; -@@ -2390,8 +2422,7 @@ hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf, + static void axgbe_phy_pll_ctrl(struct axgbe_port *pdata, bool enable) { - struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns); - -- mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | -- RTE_MBUF_F_RX_IEEE1588_TMST; -+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST; - if (hns3_timestamp_rx_dynflag > 0) { - *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset, - rte_mbuf_timestamp_t *) = timestamp; -@@ -2670,6 +2701,7 @@ hns3_recv_scattered_pkts(void *rx_queue, - continue; - } ++ /* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */ ++ if (pdata->phy.autoneg != AUTONEG_DISABLE) ++ return; ++ + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0, + XGBE_PMA_PLL_CTRL_MASK, + enable ? XGBE_PMA_PLL_CTRL_SET +@@ -1269,8 +1274,9 @@ static void axgbe_phy_perform_ratechange(struct axgbe_port *pdata, + axgbe_phy_rx_reset(pdata); -+ first_seg->ol_flags = 0; - if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) - hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp); + reenable_pll: +- /* Re-enable the PLL control */ +- axgbe_phy_pll_ctrl(pdata, true); ++ /* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */ ++ if (cmd != 0 && cmd != 5) ++ axgbe_phy_pll_ctrl(pdata, true); -@@ -2699,7 +2731,7 @@ hns3_recv_scattered_pkts(void *rx_queue, + PMD_DRV_LOG(NOTICE, "firmware mailbox command did not complete\n"); + } +@@ -1697,8 +1703,15 @@ static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart) + if (reg & MDIO_STAT1_LSTATUS) + return 1; - first_seg->port = rxq->port_id; - first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); -- first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH; -+ first_seg->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; - if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { - first_seg->hash.fdir.hi = - rte_le_to_cpu_16(rxd.rx.fd_id); -@@ -3617,58 +3649,6 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, - return false; ++ if (pdata->phy.autoneg == AUTONEG_ENABLE && ++ phy_data->port_mode == AXGBE_PORT_MODE_BACKPLANE) { ++ if (rte_bit_relaxed_get32(AXGBE_LINK_INIT, &pdata->dev_state)) { ++ *an_restart = 1; ++ } ++ } ++ + /* No link, attempt a receiver reset cycle */ +- if (phy_data->rrc_count++) { ++ if (pdata->vdata->enable_rrc && phy_data->rrc_count++) { + phy_data->rrc_count = 0; + axgbe_phy_rrc(pdata); + } +diff --git a/dpdk/drivers/net/bnx2x/bnx2x.c b/dpdk/drivers/net/bnx2x/bnx2x.c +index c3283c94f3..51e5cabf7b 100644 +--- a/dpdk/drivers/net/bnx2x/bnx2x.c ++++ b/dpdk/drivers/net/bnx2x/bnx2x.c +@@ -1623,16 +1623,12 @@ static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc) } --static bool --hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, -- uint32_t *l4_proto) --{ -- struct rte_ipv4_hdr *ipv4_hdr; -- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, -- m->outer_l2_len); -- if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) -- ipv4_hdr->hdr_checksum = 0; -- if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { -- struct rte_udp_hdr *udp_hdr; -- /* -- * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo -- * header for TSO packets -- */ -- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) -- return true; -- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, -- m->outer_l2_len + m->outer_l3_len); -- udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); -- -- return true; -- } -- *l4_proto = ipv4_hdr->next_proto_id; -- return false; --} -- --static bool --hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, -- uint32_t *l4_proto) --{ -- struct rte_ipv6_hdr *ipv6_hdr; -- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, -- m->outer_l2_len); -- if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { -- struct rte_udp_hdr *udp_hdr; -- /* -- * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo -- * header for TSO packets -- */ -- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) -- return true; -- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, -- m->outer_l2_len + m->outer_l3_len); -- udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); -- -- return true; -- } -- *l4_proto = ipv6_hdr->proto; -- return false; --} -- - static void - hns3_outer_header_cksum_prepare(struct rte_mbuf *m) + /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ +-static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode) ++static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode __rte_unused) { -@@ -3676,29 +3656,38 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m) - uint32_t paylen, hdr_len, l4_proto; - struct rte_udp_hdr *udp_hdr; + uint32_t reset_code = 0; -- if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6))) -+ if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) && -+ ((ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) || -+ !(ol_flags & RTE_MBUF_F_TX_TCP_SEG))) - return; + /* Select the UNLOAD request mode */ +- if (unload_mode == UNLOAD_NORMAL) { +- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; +- } else { +- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; +- } ++ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { -- if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto)) -- return; -+ struct rte_ipv4_hdr *ipv4_hdr; -+ -+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, -+ m->outer_l2_len); -+ l4_proto = ipv4_hdr->next_proto_id; - } else { -- if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto)) -- return; -+ struct rte_ipv6_hdr *ipv6_hdr; -+ -+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, -+ m->outer_l2_len); -+ l4_proto = ipv6_hdr->proto; + /* Send the request to the MCP */ + if (!BNX2X_NOMCP(sc)) { +@@ -2389,7 +2385,7 @@ int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) + static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) + { + sc->ilt->lines = rte_calloc("", +- sizeof(struct ilt_line), ILT_MAX_LINES, ++ ILT_MAX_LINES, sizeof(struct ilt_line), + RTE_CACHE_LINE_SIZE); + return sc->ilt->lines == NULL; + } +@@ -8124,7 +8120,7 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc) + val = sc->devinfo.bc_ver >> 8; + if (val < BNX2X_BC_VER) { + /* for now only warn later we might need to enforce this */ +- PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC\n", ++ PMD_DRV_LOG(NOTICE, sc, "This driver needs bc_ver %X but found %X, please upgrade BC", + BNX2X_BC_VER, val); + } + sc->link_params.feature_config_flags |= +@@ -9489,16 +9485,16 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc) + hw_lock_val = (REG_RD(sc, hw_lock_reg)); + if (hw_lock_val) { + if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { +- PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock\n"); ++ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock"); + REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, + (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); + } +- PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock\n"); ++ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock"); + REG_WR(sc, hw_lock_reg, 0xffffffff); } -+ if (l4_proto != IPPROTO_UDP) -+ return; -+ - /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ -- if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { -- hdr_len = m->l2_len + m->l3_len + m->l4_len; -- hdr_len += m->outer_l2_len + m->outer_l3_len; -- paylen = m->pkt_len - hdr_len; -- if (paylen <= m->tso_segsz) -- return; -- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, -- m->outer_l2_len + -- m->outer_l3_len); -- udp_hdr->dgram_cksum = 0; -- } -+ hdr_len = m->l2_len + m->l3_len + m->l4_len; -+ hdr_len += m->outer_l2_len + m->outer_l3_len; -+ paylen = m->pkt_len - hdr_len; -+ if (paylen <= m->tso_segsz) -+ return; -+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, -+ m->outer_l2_len + -+ m->outer_l3_len); -+ udp_hdr->dgram_cksum = 0; - } + if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { +- PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR\n"); ++ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR"); + REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); + } - static int -diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c -index 3ca226156b..ffc1f6d874 100644 ---- a/dpdk/drivers/net/i40e/i40e_ethdev.c -+++ b/dpdk/drivers/net/i40e/i40e_ethdev.c -@@ -3724,8 +3724,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) - RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | - RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | - RTE_ETH_TX_OFFLOAD_MULTI_SEGS | -- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | - dev_info->tx_queue_offload_capa; -+ if (hw->mac.type == I40E_MAC_X722) { -+ dev_info->tx_offload_capa |= -+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; -+ } -+ - dev_info->dev_capa = - RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | - RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; -diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c -index 877e49151e..92165c8422 100644 ---- a/dpdk/drivers/net/i40e/i40e_flow.c -+++ b/dpdk/drivers/net/i40e/i40e_flow.c -@@ -1708,8 +1708,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, +@@ -10331,12 +10327,13 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc) + REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); - ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type); + if (!CHIP_IS_E1x(sc)) { +- int factor = 0; ++ int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : ++ (CHIP_REV_IS_FPGA(sc) ? 400 : 0); -- if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || -- ether_type == RTE_ETHER_TYPE_IPV4 || -+ if (ether_type == RTE_ETHER_TYPE_IPV4 || - ether_type == RTE_ETHER_TYPE_IPV6 || - ether_type == i40e_get_outer_vlan(dev)) { - rte_flow_error_set(error, EINVAL, -diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c -index 9aa5facb53..5e693cb1ea 100644 ---- a/dpdk/drivers/net/i40e/i40e_rxtx.c -+++ b/dpdk/drivers/net/i40e/i40e_rxtx.c -@@ -295,6 +295,15 @@ i40e_parse_tunneling_params(uint64_t ol_flags, - */ - *cd_tunneling |= (tx_offload.l2_len >> 1) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; -+ -+ /** -+ * Calculate the tunneling UDP checksum (only supported with X722). -+ * Shall be set only if L4TUNT = 01b and EIPT is not zero -+ */ -+ if ((*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK) && -+ (*cd_tunneling & I40E_TXD_CTX_UDP_TUNNELING) && -+ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) -+ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; - } + ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); + ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); - static inline void -diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c -index f468c1fd90..19cf0ac718 100644 ---- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c -+++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c -@@ -276,46 +276,30 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, - _mm256_loadu_si256((void *)&sw_ring[i + 4])); - #endif +-/* let the HW do it's magic... */ ++ /* let the HW do it's magic... */ + do { + DELAY(200000); + val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); +@@ -11189,11 +11186,9 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc) + /* Turn on a single ISR mode in IGU if driver is going to use + * INT#x or MSI + */ +- if ((sc->interrupt_mode != INTR_MODE_MSIX) +- || (sc->interrupt_mode != INTR_MODE_SINGLE_MSIX)) { ++ if (sc->interrupt_mode == INTR_MODE_INTX || ++ sc->interrupt_mode == INTR_MODE_MSI) + pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; +- } +- + /* + * Timers workaround bug: function init part. + * Need to wait 20msec after initializing ILT, +diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/dpdk/drivers/net/bnx2x/bnx2x_stats.c +index c07b01510a..72a26ed5cc 100644 +--- a/dpdk/drivers/net/bnx2x/bnx2x_stats.c ++++ b/dpdk/drivers/net/bnx2x/bnx2x_stats.c +@@ -75,10 +75,6 @@ bnx2x_storm_stats_post(struct bnx2x_softc *sc) + int rc; -- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; --#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC -- /* for AVX we need alignment otherwise loads are not atomic */ -- if (avx_aligned) { -- /* load in descriptors, 2 at a time, in reverse order */ -- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); -- rte_compiler_barrier(); -- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); -- rte_compiler_barrier(); -- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); -- rte_compiler_barrier(); -- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); -- } else --#endif -- do { -- const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); -- rte_compiler_barrier(); -- const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); -- rte_compiler_barrier(); -- const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); -- rte_compiler_barrier(); -- const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); -- rte_compiler_barrier(); -- const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); -- rte_compiler_barrier(); -- const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); -- rte_compiler_barrier(); -- const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); -- rte_compiler_barrier(); -- const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); + if (!sc->stats_pending) { +- if (sc->stats_pending) { +- return; +- } - -- raw_desc6_7 = _mm256_inserti128_si256( -- _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); -- raw_desc4_5 = _mm256_inserti128_si256( -- _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); -- raw_desc2_3 = _mm256_inserti128_si256( -- _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); -- raw_desc0_1 = _mm256_inserti128_si256( -- _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); -- } while (0); -+ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); -+ -+ const __m256i raw_desc6_7 = _mm256_inserti128_si256( -+ _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); -+ const __m256i raw_desc4_5 = _mm256_inserti128_si256( -+ _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); -+ const __m256i raw_desc2_3 = _mm256_inserti128_si256( -+ _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); -+ const __m256i raw_desc0_1 = _mm256_inserti128_si256( -+ _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + sc->fw_stats_req->hdr.drv_stats_counter = + htole16(sc->stats_counter++); - if (split_packet) { - int j; -diff --git a/dpdk/drivers/net/iavf/iavf.h b/dpdk/drivers/net/iavf/iavf.h -index 10868f2c30..d273d884f5 100644 ---- a/dpdk/drivers/net/iavf/iavf.h -+++ b/dpdk/drivers/net/iavf/iavf.h -@@ -18,7 +18,8 @@ +@@ -114,7 +110,7 @@ bnx2x_hw_stats_post(struct bnx2x_softc *sc) - #define IAVF_AQ_LEN 32 - #define IAVF_AQ_BUF_SZ 4096 --#define IAVF_RESET_WAIT_CNT 500 -+#define IAVF_RESET_WAIT_CNT 2000 -+#define IAVF_RESET_DETECTED_CNT 500 - #define IAVF_BUF_SIZE_MIN 1024 - #define IAVF_FRAME_SIZE_MAX 9728 - #define IAVF_QUEUE_BASE_ADDR_UNIT 128 -@@ -511,5 +512,6 @@ int iavf_flow_sub_check(struct iavf_adapter *adapter, - struct iavf_fsub_conf *filter); - void iavf_dev_watchdog_enable(struct iavf_adapter *adapter); - void iavf_dev_watchdog_disable(struct iavf_adapter *adapter); --int iavf_handle_hw_reset(struct rte_eth_dev *dev); -+void iavf_handle_hw_reset(struct rte_eth_dev *dev); -+void iavf_set_no_poll(struct iavf_adapter *adapter, bool link_change); - #endif /* _IAVF_ETHDEV_H_ */ -diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c -index d1edb0dd5c..c183ede113 100644 ---- a/dpdk/drivers/net/iavf/iavf_ethdev.c -+++ b/dpdk/drivers/net/iavf/iavf_ethdev.c -@@ -296,6 +296,7 @@ iavf_dev_watchdog(void *cb_arg) - PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed", - adapter->vf.eth_dev->data->name); - adapter->vf.vf_reset = false; -+ iavf_set_no_poll(adapter, false); - } - /* If not in reset then poll vfr_inprogress register for VFLR event */ - } else { -@@ -308,6 +309,7 @@ iavf_dev_watchdog(void *cb_arg) + /* Update MCP's statistics if possible */ + if (sc->func_stx) { +- rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, ++ memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, + sizeof(sc->func_stats)); + } - /* enter reset state with VFLR event */ - adapter->vf.vf_reset = true; -+ iavf_set_no_poll(adapter, false); - adapter->vf.link_up = false; +@@ -817,10 +813,10 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc) + etherstatspktsover1522octets); + } - iavf_dev_event_post(adapter->vf.eth_dev, RTE_ETH_EVENT_INTR_RESET, -@@ -628,7 +630,8 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev) - RTE_ETH_VLAN_FILTER_MASK | - RTE_ETH_VLAN_EXTEND_MASK); - if (err) { -- PMD_DRV_LOG(ERR, "Failed to update vlan offload"); -+ PMD_DRV_LOG(INFO, -+ "VLAN offloading is not supported, or offloading was refused by the PF"); - return err; - } +- rte_memcpy(old, new, sizeof(struct nig_stats)); ++ memcpy(old, new, sizeof(struct nig_stats)); -@@ -704,9 +707,7 @@ iavf_dev_configure(struct rte_eth_dev *dev) - vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; +- rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), +- sizeof(struct mac_stx)); ++ memcpy(RTE_PTR_ADD(estats, offsetof(struct bnx2x_eth_stats, rx_stat_ifhcinbadoctets_hi)), ++ &pstats->mac_stx[1], sizeof(struct mac_stx)); + estats->brb_drop_hi = pstats->brb_drop_hi; + estats->brb_drop_lo = pstats->brb_drop_lo; + +@@ -1492,9 +1488,11 @@ bnx2x_stats_init(struct bnx2x_softc *sc) + REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); + if (!CHIP_IS_E3(sc)) { + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, +- &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2); ++ RTE_PTR_ADD(&sc->port.old_nig_stats, ++ offsetof(struct nig_stats, egress_mac_pkt0_lo)), 2); + REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, +- &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2); ++ RTE_PTR_ADD(&sc->port.old_nig_stats, ++ offsetof(struct nig_stats, egress_mac_pkt1_lo)), 2); } -- ret = iavf_dev_init_vlan(dev); -- if (ret) -- PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret); -+ iavf_dev_init_vlan(dev); + /* function stats */ +diff --git a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c +index 63953c2979..5411df3a38 100644 +--- a/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c ++++ b/dpdk/drivers/net/bnx2x/bnx2x_vfpf.c +@@ -52,9 +52,9 @@ bnx2x_check_bull(struct bnx2x_softc *sc) - if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { - if (iavf_init_rss(ad) != 0) { -@@ -1086,9 +1087,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) + /* check the mac address and VLAN and allocate memory if valid */ + if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN)) +- rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); ++ memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); + if (valid_bitmap & (1 << VLAN_VALID)) +- rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, RTE_VLAN_HLEN); ++ memcpy(&bull->vlan, &sc->old_bulletin.vlan, sizeof(bull->vlan)); - PMD_INIT_FUNC_TRACE(); + sc->old_bulletin = *bull; -- if (vf->vf_reset) -- return 0; -- - if (adapter->closed) - return -1; +@@ -569,7 +569,7 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) -@@ -1165,7 +1163,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) - RTE_ETH_TX_OFFLOAD_TCP_CKSUM | - RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | - RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | -- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | - RTE_ETH_TX_OFFLOAD_TCP_TSO | - RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | - RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | -@@ -1174,6 +1171,10 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) - RTE_ETH_TX_OFFLOAD_MULTI_SEGS | - RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; + bnx2x_check_bull(sc); -+ /* X710 does not support outer udp checksum */ -+ if (adapter->hw.mac.type != IAVF_MAC_XL710) -+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; -+ - if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC) - dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; +- rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); ++ memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); -@@ -2874,6 +2875,7 @@ iavf_dev_close(struct rte_eth_dev *dev) - if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) - iavf_config_promisc(adapter, false, false); + bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, + BNX2X_VF_TLV_LIST_END, +@@ -583,9 +583,9 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set) + while (BNX2X_VF_STATUS_FAILURE == reply->status && + bnx2x_check_bull(sc)) { + /* A new mac was configured by PF for us */ +- rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, ++ memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, + ETH_ALEN); +- rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, ++ memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, + ETH_ALEN); -+ iavf_vf_reset(hw); - iavf_shutdown_adminq(hw); - if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { - /* disable uio intr before callback unregister */ -@@ -2916,8 +2918,10 @@ iavf_dev_close(struct rte_eth_dev *dev) - * effect. - */ - out: -- if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) -+ if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) { - vf->vf_reset = false; -+ iavf_set_no_poll(adapter, false); -+ } + rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); +@@ -622,10 +622,10 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc, + BNX2X_VF_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); - /* disable watchdog */ - iavf_dev_watchdog_disable(adapter); -@@ -2948,9 +2952,9 @@ static int - iavf_dev_reset(struct rte_eth_dev *dev) - { - int ret; -+ struct iavf_adapter *adapter = -+ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); -- struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); -- - /* - * Check whether the VF reset has been done and inform application, - * to avoid calling the virtual channel command, which may cause -@@ -2961,7 +2965,7 @@ iavf_dev_reset(struct rte_eth_dev *dev) - PMD_DRV_LOG(ERR, "Wait too long for reset done!\n"); - return ret; - } -- vf->vf_reset = false; -+ iavf_set_no_poll(adapter, false); +- rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); ++ memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); + query->rss_key_size = T_ETH_RSS_KEY; - PMD_DRV_LOG(DEBUG, "Start dev_reset ...\n"); - ret = iavf_dev_uninit(dev); -@@ -2971,16 +2975,49 @@ iavf_dev_reset(struct rte_eth_dev *dev) - return iavf_dev_init(dev); - } +- rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); ++ memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; -+static inline bool -+iavf_is_reset(struct iavf_hw *hw) -+{ -+ return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) & -+ IAVF_VF_ARQLEN1_ARQENABLE_MASK); -+} -+ -+static bool -+iavf_is_reset_detected(struct iavf_adapter *adapter) -+{ -+ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); -+ int i; -+ -+ /* poll until we see the reset actually happen */ -+ for (i = 0; i < IAVF_RESET_DETECTED_CNT; i++) { -+ if (iavf_is_reset(hw)) -+ return true; -+ rte_delay_ms(20); -+ } -+ -+ return false; -+} -+ - /* - * Handle hardware reset - */ --int -+void - iavf_handle_hw_reset(struct rte_eth_dev *dev) - { - struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); -+ struct iavf_adapter *adapter = dev->data->dev_private; - int ret; + query->rss_result_mask = params->rss_result_mask; +diff --git a/dpdk/drivers/net/bnxt/bnxt.h b/dpdk/drivers/net/bnxt/bnxt.h +index 0e01b1d4ba..be2fd689bb 100644 +--- a/dpdk/drivers/net/bnxt/bnxt.h ++++ b/dpdk/drivers/net/bnxt/bnxt.h +@@ -449,8 +449,8 @@ struct bnxt_ring_mem_info { -+ if (!dev->data->dev_started) -+ return; -+ -+ if (!iavf_is_reset_detected(adapter)) { -+ PMD_DRV_LOG(DEBUG, "reset not start\n"); -+ return; -+ } -+ - vf->in_reset_recovery = true; -+ iavf_set_no_poll(adapter, false); + struct bnxt_ctx_pg_info { + uint32_t entries; +- void *ctx_pg_arr[MAX_CTX_PAGES]; +- rte_iova_t ctx_dma_arr[MAX_CTX_PAGES]; ++ void **ctx_pg_arr; ++ rte_iova_t *ctx_dma_arr; + struct bnxt_ring_mem_info ring_mem; + }; - ret = iavf_dev_reset(dev); - if (ret) -@@ -2997,15 +3034,26 @@ iavf_handle_hw_reset(struct rte_eth_dev *dev) - ret = iavf_dev_start(dev); - if (ret) - goto error; -- dev->data->dev_started = 1; +@@ -550,7 +550,6 @@ struct bnxt_mark_info { -- vf->in_reset_recovery = false; -- return 0; -+ dev->data->dev_started = 1; -+ goto exit; + struct bnxt_rep_info { + struct rte_eth_dev *vfr_eth_dev; +- pthread_mutex_t vfr_lock; + pthread_mutex_t vfr_start_lock; + bool conduit_valid; + }; +@@ -896,6 +895,7 @@ struct bnxt { + struct rte_ether_addr *mcast_addr_list; + rte_iova_t mc_list_dma_addr; + uint32_t nb_mc_addr; ++#define BNXT_DFLT_MAX_MC_ADDR 16 /* for compatibility with older firmware */ + uint32_t max_mcast_addr; /* maximum number of mcast filters supported */ - error: - PMD_DRV_LOG(DEBUG, "RESET recover with error code=%d\n", ret); -+exit: - vf->in_reset_recovery = false; -- return ret; -+ iavf_set_no_poll(adapter, false); -+ -+ return; -+} -+ -+void -+iavf_set_no_poll(struct iavf_adapter *adapter, bool link_change) -+{ -+ struct iavf_info *vf = &adapter->vf; -+ -+ adapter->no_poll = (link_change & !vf->link_up) || -+ vf->vf_reset || vf->in_reset_recovery; + struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ +diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c +index acf7e6e46e..988895a065 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c ++++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c +@@ -1673,10 +1673,8 @@ bnxt_uninit_locks(struct bnxt *bp) + pthread_mutex_destroy(&bp->def_cp_lock); + pthread_mutex_destroy(&bp->health_check_lock); + pthread_mutex_destroy(&bp->err_recovery_lock); +- if (bp->rep_info) { +- pthread_mutex_destroy(&bp->rep_info->vfr_lock); ++ if (bp->rep_info) + pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); +- } } - static int -diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c -index 07a69db540..d6c0180ffd 100644 ---- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c -+++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c -@@ -1518,8 +1518,11 @@ iavf_security_ctx_create(struct iavf_adapter *adapter) - if (adapter->security_ctx == NULL) { - adapter->security_ctx = rte_malloc("iavf_security_ctx", - sizeof(struct iavf_security_ctx), 0); -- if (adapter->security_ctx == NULL) -+ if (adapter->security_ctx == NULL) { -+ rte_free(adapter->vf.eth_dev->security_ctx); -+ adapter->vf.eth_dev->security_ctx = NULL; - return -ENOMEM; -+ } + static void bnxt_drv_uninit(struct bnxt *bp) +@@ -4008,7 +4006,6 @@ static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, + + switch (module_info[0]) { + case SFF_MODULE_ID_SFP: +- module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; + if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { + pg_addr[2] = I2C_DEV_ADDR_A2; + pg_addr[3] = I2C_DEV_ADDR_A2; +@@ -4750,7 +4747,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + { + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + const struct rte_memzone *mz = NULL; +- char mz_name[RTE_MEMZONE_NAMESIZE]; ++ char name[RTE_MEMZONE_NAMESIZE]; + rte_iova_t mz_phys_addr; + uint64_t valid_bits = 0; + uint32_t sz; +@@ -4762,6 +4759,19 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / + BNXT_PAGE_SIZE; + rmem->page_size = BNXT_PAGE_SIZE; ++ ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d", ++ suffix, idx, bp->eth_dev->data->port_id); ++ ctx_pg->ctx_pg_arr = rte_zmalloc(name, sizeof(void *) * rmem->nr_pages, 0); ++ if (ctx_pg->ctx_pg_arr == NULL) ++ return -ENOMEM; ++ ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%x_%d", ++ suffix, idx, bp->eth_dev->data->port_id); ++ ctx_pg->ctx_dma_arr = rte_zmalloc(name, sizeof(rte_iova_t *) * rmem->nr_pages, 0); ++ if (ctx_pg->ctx_dma_arr == NULL) ++ return -ENOMEM; ++ + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; +@@ -4769,13 +4779,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + valid_bits = PTU_PTE_VALID; + + if (rmem->nr_pages > 1) { +- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, ++ snprintf(name, RTE_MEMZONE_NAMESIZE, + "bnxt_ctx_pg_tbl%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); +- mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; +- mz = rte_memzone_lookup(mz_name); ++ name[RTE_MEMZONE_NAMESIZE - 1] = 0; ++ mz = rte_memzone_lookup(name); + if (!mz) { +- mz = rte_memzone_reserve_aligned(mz_name, ++ mz = rte_memzone_reserve_aligned(name, + rmem->nr_pages * 8, + bp->eth_dev->device->numa_node, + RTE_MEMZONE_2MB | +@@ -4794,11 +4804,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + rmem->pg_tbl_mz = mz; } - return 0; -diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c -index 510b4d8f1c..49d41af953 100644 ---- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c -+++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c -@@ -193,62 +193,30 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, - _mm256_loadu_si256((void *)&sw_ring[i + 4])); - #endif +- snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", ++ snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); +- mz = rte_memzone_lookup(mz_name); ++ mz = rte_memzone_lookup(name); + if (!mz) { +- mz = rte_memzone_reserve_aligned(mz_name, ++ mz = rte_memzone_reserve_aligned(name, + mem_size, + bp->eth_dev->device->numa_node, + RTE_MEMZONE_1GB | +@@ -4844,6 +4854,17 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) + return; -- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; --#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC -- /* for AVX we need alignment otherwise loads are not atomic */ -- if (avx_aligned) { -- /* load in descriptors, 2 at a time, in reverse order */ -- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); -- rte_compiler_barrier(); -- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); -- rte_compiler_barrier(); -- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); -- rte_compiler_barrier(); -- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); -- } else --#endif -- { -- const __m128i raw_desc7 = -- _mm_load_si128((void *)(rxdp + 7)); -- rte_compiler_barrier(); -- const __m128i raw_desc6 = -- _mm_load_si128((void *)(rxdp + 6)); -- rte_compiler_barrier(); -- const __m128i raw_desc5 = -- _mm_load_si128((void *)(rxdp + 5)); -- rte_compiler_barrier(); -- const __m128i raw_desc4 = -- _mm_load_si128((void *)(rxdp + 4)); -- rte_compiler_barrier(); -- const __m128i raw_desc3 = -- _mm_load_si128((void *)(rxdp + 3)); -- rte_compiler_barrier(); -- const __m128i raw_desc2 = -- _mm_load_si128((void *)(rxdp + 2)); -- rte_compiler_barrier(); -- const __m128i raw_desc1 = -- _mm_load_si128((void *)(rxdp + 1)); -- rte_compiler_barrier(); -- const __m128i raw_desc0 = -- _mm_load_si128((void *)(rxdp + 0)); -- -- raw_desc6_7 = -- _mm256_inserti128_si256 -- (_mm256_castsi128_si256(raw_desc6), -- raw_desc7, 1); -- raw_desc4_5 = -- _mm256_inserti128_si256 -- (_mm256_castsi128_si256(raw_desc4), -- raw_desc5, 1); -- raw_desc2_3 = -- _mm256_inserti128_si256 -- (_mm256_castsi128_si256(raw_desc2), -- raw_desc3, 1); -- raw_desc0_1 = -- _mm256_inserti128_si256 -- (_mm256_castsi128_si256(raw_desc0), -- raw_desc1, 1); -- } -+ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); + bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; ++ rte_free(bp->ctx->qp_mem.ctx_pg_arr); ++ rte_free(bp->ctx->srq_mem.ctx_pg_arr); ++ rte_free(bp->ctx->cq_mem.ctx_pg_arr); ++ rte_free(bp->ctx->vnic_mem.ctx_pg_arr); ++ rte_free(bp->ctx->stat_mem.ctx_pg_arr); ++ rte_free(bp->ctx->qp_mem.ctx_dma_arr); ++ rte_free(bp->ctx->srq_mem.ctx_dma_arr); ++ rte_free(bp->ctx->cq_mem.ctx_dma_arr); ++ rte_free(bp->ctx->vnic_mem.ctx_dma_arr); ++ rte_free(bp->ctx->stat_mem.ctx_dma_arr); + -+ const __m256i raw_desc6_7 = -+ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); -+ const __m256i raw_desc4_5 = -+ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); -+ const __m256i raw_desc2_3 = -+ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); -+ const __m256i raw_desc0_1 = -+ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); + rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); +@@ -4856,6 +4877,8 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) + rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); - if (split_packet) { - int j; -diff --git a/dpdk/drivers/net/iavf/iavf_vchnl.c b/dpdk/drivers/net/iavf/iavf_vchnl.c -index 0a3e1d082c..1111d30f57 100644 ---- a/dpdk/drivers/net/iavf/iavf_vchnl.c -+++ b/dpdk/drivers/net/iavf/iavf_vchnl.c -@@ -273,20 +273,18 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, - iavf_dev_watchdog_enable(adapter); - } - if (adapter->devargs.no_poll_on_link_down) { -- if (vf->link_up && adapter->no_poll) { -- adapter->no_poll = false; -- PMD_DRV_LOG(DEBUG, "VF no poll turned off"); -- } -- if (!vf->link_up) { -- adapter->no_poll = true; -+ iavf_set_no_poll(adapter, true); -+ if (adapter->no_poll) - PMD_DRV_LOG(DEBUG, "VF no poll turned on"); -- } -+ else -+ PMD_DRV_LOG(DEBUG, "VF no poll turned off"); - } - PMD_DRV_LOG(INFO, "Link status update:%s", - vf->link_up ? "up" : "down"); - break; - case VIRTCHNL_EVENT_RESET_IMPENDING: - vf->vf_reset = true; -+ iavf_set_no_poll(adapter, false); - PMD_DRV_LOG(INFO, "VF is resetting"); - break; - case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: -@@ -462,6 +460,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, - vf->link_up = false; - if (!vf->vf_reset) { - vf->vf_reset = true; -+ iavf_set_no_poll(adapter, false); - iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_RESET, - NULL, 0); - } -@@ -485,14 +484,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, - iavf_dev_watchdog_enable(adapter); - } - if (adapter->devargs.no_poll_on_link_down) { -- if (vf->link_up && adapter->no_poll) { -- adapter->no_poll = false; -- PMD_DRV_LOG(DEBUG, "VF no poll turned off"); -- } -- if (!vf->link_up) { -- adapter->no_poll = true; -+ iavf_set_no_poll(adapter, true); -+ if (adapter->no_poll) - PMD_DRV_LOG(DEBUG, "VF no poll turned on"); -- } -+ else -+ PMD_DRV_LOG(DEBUG, "VF no poll turned off"); - } - iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0); - break; -diff --git a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h -index 844e90bbce..1131379d63 100644 ---- a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h -+++ b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h -@@ -1728,8 +1728,8 @@ struct ice_aqc_link_topo_addr { - #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) - /* Used to decode the handle field */ - #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) --#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) --#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 -+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM 0 -+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ BIT(9) - #define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 - /* In case of a Mezzanine type */ - #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ -diff --git a/dpdk/drivers/net/ice/base/ice_bitops.h b/dpdk/drivers/net/ice/base/ice_bitops.h -index 3b71c1b7f5..5c17bcb674 100644 ---- a/dpdk/drivers/net/ice/base/ice_bitops.h -+++ b/dpdk/drivers/net/ice/base/ice_bitops.h -@@ -418,10 +418,10 @@ ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits) - * Note that this function assumes it is operating on a bitmap declared using - * ice_declare_bitmap. - */ --static inline int -+static inline u16 - ice_bitmap_hweight(ice_bitmap_t *bm, u16 size) + for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { ++ rte_free(bp->ctx->tqm_mem[i]->ctx_pg_arr); ++ rte_free(bp->ctx->tqm_mem[i]->ctx_dma_arr); + if (bp->ctx->tqm_mem[i]) + rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); + } +@@ -6173,13 +6196,6 @@ static int bnxt_init_rep_info(struct bnxt *bp) + for (i = 0; i < BNXT_MAX_CFA_CODE; i++) + bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; + +- rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); +- if (rc) { +- PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); +- bnxt_free_rep_info(bp); +- return rc; +- } +- + rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); + if (rc) { + PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); +diff --git a/dpdk/drivers/net/bnxt/bnxt_hwrm.c b/dpdk/drivers/net/bnxt/bnxt_hwrm.c +index 06f196760f..94c3249ae4 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_hwrm.c ++++ b/dpdk/drivers/net/bnxt/bnxt_hwrm.c +@@ -863,6 +863,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) + bp->max_l2_ctx, bp->max_vnics); + bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); + bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters); ++ if (!bp->max_mcast_addr) ++ bp->max_mcast_addr = BNXT_DFLT_MAX_MC_ADDR; + memcpy(bp->dsn, resp->device_serial_number, sizeof(bp->dsn)); + + if (BNXT_PF(bp)) +@@ -3039,6 +3041,8 @@ static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link) + static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, + struct bnxt_link_info *link_info) { -- int count = 0; -+ u16 count = 0; - u16 bit = 0; ++ uint16_t support_pam4_speeds = link_info->support_pam4_speeds; ++ uint16_t support_speeds = link_info->support_speeds; + uint16_t eth_link_speed = 0; - while (size > (bit = ice_find_next_bit(bm, size, bit))) { -diff --git a/dpdk/drivers/net/ice/base/ice_common.c b/dpdk/drivers/net/ice/base/ice_common.c -index 8867279c28..7a50a0f9f0 100644 ---- a/dpdk/drivers/net/ice/base/ice_common.c -+++ b/dpdk/drivers/net/ice/base/ice_common.c -@@ -3890,8 +3890,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG) +@@ -3070,29 +3074,30 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, + case RTE_ETH_LINK_SPEED_25G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; break; - case ICE_FEC_DIS_AUTO: - /* Set No FEC and auto FEC */ -- if (!ice_fw_supports_fec_dis_auto(hw)) -- return ICE_ERR_NOT_SUPPORTED; -+ if (!ice_fw_supports_fec_dis_auto(hw)) { -+ status = ICE_ERR_NOT_SUPPORTED; -+ goto out; -+ } - cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS; - /* fall-through */ - case ICE_FEC_AUTO: -@@ -4904,7 +4906,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) + case RTE_ETH_LINK_SPEED_40G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; + break; + case RTE_ETH_LINK_SPEED_50G: +- if (link_info->support_pam4_speeds & +- HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { +- eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; +- link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; +- } else { ++ if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) { + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; + link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; ++ } else if (support_pam4_speeds & ++ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { ++ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; + } + break; + case RTE_ETH_LINK_SPEED_100G: +- if (link_info->support_pam4_speeds & +- HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { +- eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; +- link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; +- } else { ++ if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) { + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; + link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; ++ } else if (support_pam4_speeds & ++ HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { ++ eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; ++ link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; + } + break; + case RTE_ETH_LINK_SPEED_200G: +diff --git a/dpdk/drivers/net/bnxt/bnxt_reps.c b/dpdk/drivers/net/bnxt/bnxt_reps.c +index 78337431af..6d6b8252e2 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_reps.c ++++ b/dpdk/drivers/net/bnxt/bnxt_reps.c +@@ -32,6 +32,14 @@ static const struct eth_dev_ops bnxt_rep_dev_ops = { + .flow_ops_get = bnxt_flow_ops_get_op + }; - ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); ++static bool bnxt_rep_check_parent(struct bnxt_representor *rep) ++{ ++ if (!rep->parent_dev->data->dev_private) ++ return false; ++ ++ return true; ++} ++ + uint16_t + bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf) + { +@@ -124,8 +132,8 @@ bnxt_rep_tx_burst(void *tx_queue, + qid = vfr_txq->txq->queue_id; + vf_rep_bp = vfr_txq->bp; + parent = vf_rep_bp->parent_dev->data->dev_private; +- pthread_mutex_lock(&parent->rep_info->vfr_lock); + ptxq = parent->tx_queues[qid]; ++ pthread_mutex_lock(&ptxq->txq_lock); -- dest_byte &= ~(mask); -+ dest_byte &= mask; + ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action; - dest_byte >>= shift_width; +@@ -134,9 +142,9 @@ bnxt_rep_tx_burst(void *tx_queue, + vf_rep_bp->tx_pkts[qid]++; + } -@@ -4944,7 +4946,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) - /* the data in the memory is stored as little endian so mask it - * correctly - */ -- src_word &= ~(CPU_TO_LE16(mask)); -+ src_word &= CPU_TO_LE16(mask); +- rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); ++ rc = _bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); + ptxq->vfr_tx_cfa_action = 0; +- pthread_mutex_unlock(&parent->rep_info->vfr_lock); ++ pthread_mutex_unlock(&ptxq->txq_lock); - /* get the data back into host order before shifting */ - dest_word = LE16_TO_CPU(src_word); -@@ -4995,7 +4997,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) - /* the data in the memory is stored as little endian so mask it - * correctly - */ -- src_dword &= ~(CPU_TO_LE32(mask)); -+ src_dword &= CPU_TO_LE32(mask); + return rc; + } +@@ -266,12 +274,12 @@ int bnxt_representor_uninit(struct rte_eth_dev *eth_dev) + PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id); + eth_dev->data->mac_addrs = NULL; - /* get the data back into host order before shifting */ - dest_dword = LE32_TO_CPU(src_dword); -@@ -5046,7 +5048,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) - /* the data in the memory is stored as little endian so mask it - * correctly - */ -- src_qword &= ~(CPU_TO_LE64(mask)); -+ src_qword &= CPU_TO_LE64(mask); +- parent_bp = rep->parent_dev->data->dev_private; +- if (!parent_bp) { ++ if (!bnxt_rep_check_parent(rep)) { + PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n", + eth_dev->data->port_id); + return 0; + } ++ parent_bp = rep->parent_dev->data->dev_private; - /* get the data back into host order before shifting */ - dest_qword = LE64_TO_CPU(src_qword); -diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c -index f9266447d9..a0e4f5fa27 100644 ---- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c -+++ b/dpdk/drivers/net/ice/base/ice_flex_pipe.c -@@ -1534,16 +1534,14 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, - switch (blk) { - case ICE_BLK_RSS: - offset = GLQF_HMASK(mask_idx); -- val = (idx << GLQF_HMASK_MSK_INDEX_S) & -- GLQF_HMASK_MSK_INDEX_M; -- val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; -+ val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; -+ val |= ((u32)mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; - break; - case ICE_BLK_FD: - offset = GLQF_FDMASK(mask_idx); - val = (idx << GLQF_FDMASK_MSK_INDEX_S) & - GLQF_FDMASK_MSK_INDEX_M; -- val |= (mask << GLQF_FDMASK_MASK_S) & -- GLQF_FDMASK_MASK_M; -+ val |= ((u32)mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; - break; - default: - ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", -diff --git a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h -index d816df0ff6..39673e36f7 100644 ---- a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h -+++ b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h -@@ -1074,10 +1074,9 @@ struct ice_tx_ctx_desc { - __le64 qw1; - }; + parent_bp->num_reps--; + vf_id = rep->vf_id; +@@ -539,11 +547,12 @@ int bnxt_rep_dev_info_get_op(struct rte_eth_dev *eth_dev, + int rc = 0; --#define ICE_TX_GSC_DESC_START 0 /* 7 BITS */ --#define ICE_TX_GSC_DESC_OFFSET 7 /* 4 BITS */ --#define ICE_TX_GSC_DESC_TYPE 11 /* 2 BITS */ --#define ICE_TX_GSC_DESC_ENA 13 /* 1 BIT */ -+#define ICE_TX_GCS_DESC_START 0 /* 8 BITS */ -+#define ICE_TX_GCS_DESC_OFFSET 8 /* 4 BITS */ -+#define ICE_TX_GCS_DESC_TYPE 12 /* 3 BITS */ + /* MAC Specifics */ +- parent_bp = rep_bp->parent_dev->data->dev_private; +- if (!parent_bp) { +- PMD_DRV_LOG(ERR, "Rep parent NULL!\n"); ++ if (!bnxt_rep_check_parent(rep_bp)) { ++ /* Need not be an error scenario, if parent is closed first */ ++ PMD_DRV_LOG(INFO, "Rep parent port does not exist.\n"); + return rc; + } ++ parent_bp = rep_bp->parent_dev->data->dev_private; + PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n"); + dev_info->max_mac_addrs = parent_bp->max_l2_ctx; + dev_info->max_hash_mac_addrs = 0; +@@ -730,10 +739,10 @@ int bnxt_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + struct bnxt_tx_queue *parent_txq, *txq; + struct bnxt_vf_rep_tx_queue *vfr_txq; - #define ICE_TXD_CTX_QW1_DTYPE_S 0 - #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) -diff --git a/dpdk/drivers/net/ice/base/ice_nvm.c b/dpdk/drivers/net/ice/base/ice_nvm.c -index e46aded12a..6b0794f562 100644 ---- a/dpdk/drivers/net/ice/base/ice_nvm.c -+++ b/dpdk/drivers/net/ice/base/ice_nvm.c -@@ -72,6 +72,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, - enum ice_status status; - u32 inlen = *length; - u32 bytes_read = 0; -+ int retry_cnt = 0; - bool last_cmd; +- if (queue_idx >= rep_bp->rx_nr_rings) { ++ if (queue_idx >= rep_bp->tx_nr_rings) { + PMD_DRV_LOG(ERR, + "Cannot create Tx rings %d. %d rings available\n", +- queue_idx, rep_bp->rx_nr_rings); ++ queue_idx, rep_bp->tx_nr_rings); + return -EINVAL; + } - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); -@@ -106,11 +107,24 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, - offset, (u16)read_size, - data + bytes_read, last_cmd, - read_shadow_ram, NULL); -- if (status) -- break; -- -- bytes_read += read_size; -- offset += read_size; -+ if (status) { -+ if (hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY || -+ retry_cnt > ICE_SQ_SEND_MAX_EXECUTE) -+ break; -+ ice_debug(hw, ICE_DBG_NVM, -+ "NVM read EBUSY error, retry %d\n", -+ retry_cnt + 1); -+ ice_release_nvm(hw); -+ msleep(ICE_SQ_SEND_DELAY_TIME_MS); -+ status = ice_acquire_nvm(hw, ICE_RES_READ); -+ if (status) -+ break; -+ retry_cnt++; +diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.c b/dpdk/drivers/net/bnxt/bnxt_txq.c +index 4df4604975..696603757b 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txq.c ++++ b/dpdk/drivers/net/bnxt/bnxt_txq.c +@@ -112,6 +112,7 @@ void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx) + txq->mz = NULL; + + rte_free(txq->free); ++ pthread_mutex_destroy(&txq->txq_lock); + rte_free(txq); + dev->data->tx_queues[queue_idx] = NULL; + } +@@ -195,6 +196,11 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + goto err; + } + ++ rc = pthread_mutex_init(&txq->txq_lock, NULL); ++ if (rc != 0) { ++ PMD_DRV_LOG(ERR, "TxQ mutex init failed!"); ++ goto err; ++ } + return 0; + err: + bnxt_tx_queue_release_op(eth_dev, queue_idx); +diff --git a/dpdk/drivers/net/bnxt/bnxt_txq.h b/dpdk/drivers/net/bnxt/bnxt_txq.h +index 3a483ad5c3..9e54985c4c 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txq.h ++++ b/dpdk/drivers/net/bnxt/bnxt_txq.h +@@ -26,6 +26,7 @@ struct bnxt_tx_queue { + int index; + int tx_wake_thresh; + uint32_t vfr_tx_cfa_action; ++ pthread_mutex_t txq_lock; + struct bnxt_tx_ring_info *tx_ring; + + unsigned int cp_nr_rings; +diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c +index 899986764f..6500738ff2 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txr.c ++++ b/dpdk/drivers/net/bnxt/bnxt_txr.c +@@ -303,17 +303,24 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, + */ + txbd1->kid_or_ts_high_mss = 0; + +- if (txq->vfr_tx_cfa_action) +- txbd1->cfa_action = txq->vfr_tx_cfa_action; +- else +- txbd1->cfa_action = txq->bp->tx_cfa_action; ++ if (txq->vfr_tx_cfa_action) { ++ txbd1->cfa_action = txq->vfr_tx_cfa_action & 0xffff; ++ txbd1->cfa_action_high = (txq->vfr_tx_cfa_action >> 16) & ++ TX_BD_LONG_CFA_ACTION_HIGH_MASK; + } else { -+ bytes_read += read_size; -+ offset += read_size; -+ retry_cnt = 0; ++ txbd1->cfa_action = txq->bp->tx_cfa_action & 0xffff; ++ txbd1->cfa_action_high = (txq->bp->tx_cfa_action >> 16) & ++ TX_BD_LONG_CFA_ACTION_HIGH_MASK; + } - } while (!last_cmd); - *length = bytes_read; -@@ -474,7 +488,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, - { - enum ice_status status; - u16 pfa_len, pfa_ptr; -- u16 next_tlv; -+ u32 next_tlv; + if (tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { + uint16_t hdr_size; - status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); - if (status != ICE_SUCCESS) { -@@ -490,25 +504,30 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, - * of TLVs to find the requested one. + /* TSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO | +- TX_BD_LONG_LFLAGS_T_IPID; ++ TX_BD_LONG_LFLAGS_T_IPID | ++ TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | ++ TX_BD_LONG_LFLAGS_T_IP_CHKSUM; + hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + + tx_pkt->l4_len; + hdr_size += (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? +@@ -562,6 +569,19 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) + + uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) ++{ ++ struct bnxt_tx_queue *txq = tx_queue; ++ uint16_t rc; ++ ++ pthread_mutex_lock(&txq->txq_lock); ++ rc = _bnxt_xmit_pkts(tx_queue, tx_pkts, nb_pkts); ++ pthread_mutex_unlock(&txq->txq_lock); ++ ++ return rc; ++} ++ ++uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ++ uint16_t nb_pkts) + { + int rc; + uint16_t nb_tx_pkts = 0; +diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.h b/dpdk/drivers/net/bnxt/bnxt_txr.h +index e64ea2c7d1..09078d545d 100644 +--- a/dpdk/drivers/net/bnxt/bnxt_txr.h ++++ b/dpdk/drivers/net/bnxt/bnxt_txr.h +@@ -47,7 +47,9 @@ void bnxt_free_tx_rings(struct bnxt *bp); + int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq); + int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); + uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, +- uint16_t nb_pkts); ++ uint16_t nb_pkts); ++uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ++ uint16_t nb_pkts); + #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) + uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +diff --git a/dpdk/drivers/net/bnxt/tf_core/cfa_tcam_mgr.c b/dpdk/drivers/net/bnxt/tf_core/cfa_tcam_mgr.c +index f26d93e7a9..130985f92a 100644 +--- a/dpdk/drivers/net/bnxt/tf_core/cfa_tcam_mgr.c ++++ b/dpdk/drivers/net/bnxt/tf_core/cfa_tcam_mgr.c +@@ -909,6 +909,7 @@ cfa_tcam_mgr_init(int sess_idx, enum cfa_tcam_mgr_device_type type, + /* Now calculate the max entries per table and global max entries based + * on the updated table limits. */ - next_tlv = pfa_ptr + 1; -- while (next_tlv < pfa_ptr + pfa_len) { -+ while (next_tlv < ((u32)pfa_ptr + pfa_len)) { - u16 tlv_sub_module_type; - u16 tlv_len; ++ cfa_tcam_mgr_max_entries[sess_idx] = 0; + for (dir = 0; dir < ARRAY_SIZE(cfa_tcam_mgr_tables[sess_idx]); dir++) + for (tbl_type = 0; + tbl_type < ARRAY_SIZE(cfa_tcam_mgr_tables[sess_idx][dir]); +@@ -958,8 +959,8 @@ cfa_tcam_mgr_init(int sess_idx, enum cfa_tcam_mgr_device_type type, + if (parms != NULL) + parms->max_entries = cfa_tcam_mgr_max_entries[sess_idx]; + +- CFA_TCAM_MGR_LOG(INFO, "Global TCAM table initialized for sess_idx %d.\n", +- sess_idx); ++ CFA_TCAM_MGR_LOG(DEBUG, "Global TCAM table initialized for sess_idx %d max entries %d.\n", ++ sess_idx, cfa_tcam_mgr_max_entries[sess_idx]); - /* Read TLV type */ -- status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); -- if (status != ICE_SUCCESS) { -+ status = ice_read_sr_word(hw, (u16)next_tlv, -+ &tlv_sub_module_type); -+ if (status) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); - break; + return 0; + } +@@ -1677,6 +1678,11 @@ cfa_tcam_mgr_shared_entry_move(int sess_idx, struct cfa_tcam_mgr_context *contex + uint8_t key[CFA_TCAM_MGR_MAX_KEY_SIZE]; + uint8_t mask[CFA_TCAM_MGR_MAX_KEY_SIZE]; + uint8_t result[CFA_TCAM_MGR_MAX_KEY_SIZE]; ++ /* ++ * Copy entry size before moving else if ++ * slice number is non zero and entry size is zero it will cause issues ++ */ ++ dst_row->entry_size = src_row->entry_size; + + int rc; + +@@ -1751,7 +1757,6 @@ cfa_tcam_mgr_shared_entry_move(int sess_idx, struct cfa_tcam_mgr_context *contex + + ROW_ENTRY_SET(dst_row, dst_row_slice); + dst_row->entries[dst_row_slice] = entry_id; +- dst_row->entry_size = src_row->entry_size; + dst_row->priority = src_row->priority; + ROW_ENTRY_CLEAR(src_row, entry->slice); + entry->row = dst_row_index; +diff --git a/dpdk/drivers/net/bnxt/tf_core/tf_msg.c b/dpdk/drivers/net/bnxt/tf_core/tf_msg.c +index 1c66c7e01a..46e9d4187a 100644 +--- a/dpdk/drivers/net/bnxt/tf_core/tf_msg.c ++++ b/dpdk/drivers/net/bnxt/tf_core/tf_msg.c +@@ -25,7 +25,7 @@ + */ + #define TF_MSG_SET_GLOBAL_CFG_DATA_SIZE 16 + #define TF_MSG_EM_INSERT_KEY_SIZE 64 +-#define TF_MSG_EM_INSERT_RECORD_SIZE 80 ++#define TF_MSG_EM_INSERT_RECORD_SIZE 96 + #define TF_MSG_TBL_TYPE_SET_DATA_SIZE 88 + + /* Compile check - Catch any msg changes that we depend on, like the +@@ -1612,20 +1612,20 @@ tf_msg_tcam_entry_set(struct tf *tfp, + req.result_size = parms->result_size; + data_size = 2 * req.key_size + req.result_size; + +- if (data_size <= TF_PCI_BUF_SIZE_MAX) { +- /* use pci buffer */ +- data = &req.dev_data[0]; +- } else { +- /* use dma buffer */ +- req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA; +- rc = tf_msg_alloc_dma_buf(&buf, data_size); +- if (rc) +- goto cleanup; +- data = buf.va_addr; +- tfp_memcpy(&req.dev_data[0], +- &buf.pa_addr, +- sizeof(buf.pa_addr)); +- } ++ /* ++ * Always use dma buffer, as the delete multi slice ++ * tcam entries not support with HWRM request buffer ++ * only DMA'ed buffer can update the mode bits for ++ * the delete to work ++ */ ++ req.flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA; ++ rc = tf_msg_alloc_dma_buf(&buf, data_size); ++ if (rc) ++ goto cleanup; ++ data = buf.va_addr; ++ tfp_memcpy(&req.dev_data[0], ++ &buf.pa_addr, ++ sizeof(buf.pa_addr)); + + tfp_memcpy(&data[0], parms->key, parms->key_size); + tfp_memcpy(&data[parms->key_size], parms->mask, parms->key_size); +diff --git a/dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c b/dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c +index 239191e14e..b0d9d8d3d9 100644 +--- a/dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c ++++ b/dpdk/drivers/net/bnxt/tf_ulp/bnxt_tf_pmd_shim.c +@@ -32,9 +32,16 @@ bnxt_tunnel_dst_port_alloc(struct bnxt *bp, + uint16_t port, + uint8_t type) + { +- return bnxt_hwrm_tunnel_dst_port_alloc(bp, ++ int rc = 0; ++ rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, + port, + type); ++ if (rc) { ++ PMD_DRV_LOG(ERR, "Tunnel type:%d alloc failed for port:%d error:%s\n", ++ type, port, (rc == HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ALLOCATED) ? ++ "already allocated" : "no resource"); ++ } ++ return rc; + } + + int +@@ -589,7 +596,12 @@ bnxt_pmd_global_tunnel_set(uint16_t port_id, uint8_t type, } - /* Read TLV length */ -- status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); -+ status = ice_read_sr_word(hw, (u16)(next_tlv + 1), &tlv_len); - if (status != ICE_SUCCESS) { - ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); - break; + + rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_port, hwtype); +- if (!rc) { ++ if (rc) { ++ if (rc == HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ALLOCATED) ++ PMD_DRV_LOG(ERR, "Tunnel already allocated, type:%d port:%d\n", hwtype, udp_port); ++ else ++ PMD_DRV_LOG(ERR, "Tunnel allocation failed, type:%d port:%d\n", hwtype, udp_port); ++ } else { + ulp_global_tunnel_db[type].ref_cnt++; + ulp_global_tunnel_db[type].dport = udp_port; + bnxt_pmd_global_reg_data_to_hndl(port_id, bp->ecpri_upar_in_use, +diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c +index f3f5bda890..852deef3b4 100644 +--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c ++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c +@@ -253,6 +253,7 @@ ulp_ha_mgr_timer_cb(void *arg) + + myclient_cnt = bnxt_ulp_cntxt_num_shared_clients_get(ulp_ctx); + if (myclient_cnt == 0) { ++ bnxt_ulp_cntxt_entry_release(); + BNXT_TF_DBG(ERR, + "PANIC Client Count is zero kill timer\n."); + return; +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c +index 79f1b3f1a0..3cca8a07f3 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_8023ad.c +@@ -702,7 +702,7 @@ selection_logic(struct bond_dev_private *internals, uint16_t member_id) + ret = rte_eth_link_get_nowait(members[i], &link_info); + if (ret < 0) { + RTE_BOND_LOG(ERR, +- "Member (port %u) link get failed: %s\n", ++ "Member (port %u) link get failed: %s", + members[i], rte_strerror(-ret)); + continue; + } +@@ -865,7 +865,6 @@ bond_mode_8023ad_periodic_cb(void *arg) + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct port *port; + struct rte_eth_link link_info; +- struct rte_ether_addr member_addr; + struct rte_mbuf *lacp_pkt = NULL; + uint16_t member_id; + uint16_t i; +@@ -880,7 +879,7 @@ bond_mode_8023ad_periodic_cb(void *arg) + ret = rte_eth_link_get_nowait(member_id, &link_info); + if (ret < 0) { + RTE_BOND_LOG(ERR, +- "Member (port %u) link get failed: %s\n", ++ "Member (port %u) link get failed: %s", + member_id, rte_strerror(-ret)); } -+ if (tlv_len > pfa_len) { -+ ice_debug(hw, ICE_DBG_INIT, "Invalid TLV length.\n"); -+ return ICE_ERR_INVAL_SIZE; -+ } - if (tlv_sub_module_type == module_type) { - if (tlv_len) { -- *module_tlv = next_tlv; -+ *module_tlv = (u16)next_tlv; - *module_tlv_len = tlv_len; - return ICE_SUCCESS; - } -@@ -749,7 +768,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, - orom_data, hw->flash.banks.orom_size); - if (status) { - ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n"); -- return status; -+ goto exit_error;; - } - /* Scan the memory buffer to locate the CIVD data section */ -@@ -773,7 +792,8 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, - if (sum) { - ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", - sum); -- goto err_invalid_checksum; -+ status = ICE_ERR_NVM; -+ goto exit_error; +@@ -892,7 +891,6 @@ bond_mode_8023ad_periodic_cb(void *arg) + key = 0; } - *civd = *tmp; -@@ -781,11 +801,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, - return ICE_SUCCESS; - } +- rte_eth_macaddr_get(member_id, &member_addr); + port = &bond_mode_8023ad_ports[member_id]; -+ status = ICE_ERR_NVM; - ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n"); + key = rte_cpu_to_be_16(key); +@@ -904,8 +902,8 @@ bond_mode_8023ad_periodic_cb(void *arg) + SM_FLAG_SET(port, NTT); + } --err_invalid_checksum: -+exit_error: - ice_free(hw, orom_data); -- return ICE_ERR_NVM; -+ return status; - } +- if (!rte_is_same_ether_addr(&port->actor.system, &member_addr)) { +- rte_ether_addr_copy(&member_addr, &port->actor.system); ++ if (!rte_is_same_ether_addr(&internals->mode4.mac_addr, &port->actor.system)) { ++ rte_ether_addr_copy(&internals->mode4.mac_addr, &port->actor.system); + if (port->aggregator_port_id == member_id) + SM_FLAG_SET(port, NTT); + } +@@ -1173,21 +1171,20 @@ void + bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) + { + struct bond_dev_private *internals = bond_dev->data->dev_private; +- struct rte_ether_addr member_addr; + struct port *member, *agg_member; + uint16_t member_id, i, j; - /** -diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.c b/dpdk/drivers/net/ice/base/ice_ptp_hw.c -index 548ef5e820..c507f211df 100644 ---- a/dpdk/drivers/net/ice/base/ice_ptp_hw.c -+++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.c -@@ -2817,8 +2817,8 @@ ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd, - val &= ~TS_CMD_MASK; - val |= cmd_val; + bond_mode_8023ad_stop(bond_dev); -- status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TMR_CMD, val, -- lock_sbq); -+ status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TMR_CMD, -+ val | TS_CMD_RX_TYPE, lock_sbq); - if (status) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, status %d\n", - status); -diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.h b/dpdk/drivers/net/ice/base/ice_ptp_hw.h -index 3667c9882d..f53b9e3ecc 100644 ---- a/dpdk/drivers/net/ice/base/ice_ptp_hw.h -+++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.h -@@ -295,6 +295,8 @@ enum ice_status ice_ptp_init_phy_cfg(struct ice_hw *hw); - #define TS_CMD_MASK_E810 0xFF - #define TS_CMD_MASK 0xF - #define SYNC_EXEC_CMD 0x3 -+#define TS_CMD_RX_TYPE_S 0x4 -+#define TS_CMD_RX_TYPE MAKEMASK(0x18, TS_CMD_RX_TYPE_S) - - /* Macros to derive port low and high addresses on both quads */ - #define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF) -diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c -index a4d31647fe..21cfe53a6d 100644 ---- a/dpdk/drivers/net/ice/base/ice_sched.c -+++ b/dpdk/drivers/net/ice/base/ice_sched.c -@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, - if (!root) - return ICE_ERR_NO_MEMORY; - -- /* coverity[suspicious_sizeof] */ - root->children = (struct ice_sched_node **) -- ice_calloc(hw, hw->max_children[0], sizeof(*root)); -+ ice_calloc(hw, hw->max_children[0], sizeof(*root->children)); - if (!root->children) { - ice_free(hw, root); - return ICE_ERR_NO_MEMORY; -@@ -186,9 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, - if (!node) - return ICE_ERR_NO_MEMORY; - if (hw->max_children[layer]) { -- /* coverity[suspicious_sizeof] */ - node->children = (struct ice_sched_node **) -- ice_calloc(hw, hw->max_children[layer], sizeof(*node)); -+ ice_calloc(hw, hw->max_children[layer], -+ sizeof(*node->children)); - if (!node->children) { - ice_free(hw, node); - return ICE_ERR_NO_MEMORY; -@@ -1069,11 +1068,11 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, - u32 *first_teid_ptr = first_node_teid; - u16 new_num_nodes = num_nodes; - enum ice_status status = ICE_SUCCESS; -+ u32 temp; ++ rte_eth_macaddr_get(internals->port_id, &internals->mode4.mac_addr); + for (i = 0; i < internals->active_member_count; i++) { + member_id = internals->active_members[i]; + member = &bond_mode_8023ad_ports[member_id]; +- rte_eth_macaddr_get(member_id, &member_addr); - *num_nodes_added = 0; - while (*num_nodes_added < num_nodes) { - u16 max_child_nodes, num_added = 0; -- u32 temp; +- if (rte_is_same_ether_addr(&member_addr, &member->actor.system)) ++ if (rte_is_same_ether_addr(&internals->mode4.mac_addr, &member->actor.system)) + continue; - status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, - layer, new_num_nodes, -diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c -index f7fcc3a8d4..c4fd07199e 100644 ---- a/dpdk/drivers/net/ice/base/ice_switch.c -+++ b/dpdk/drivers/net/ice/base/ice_switch.c -@@ -4603,7 +4603,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, - u16 vsi_handle_arr[2]; +- rte_ether_addr_copy(&member_addr, &member->actor.system); ++ rte_ether_addr_copy(&internals->mode4.mac_addr, &member->actor.system); + /* Do nothing if this port is not an aggregator. In other case + * Set NTT flag on every port that use this aggregator. */ + if (member->aggregator_port_id != member_id) +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_alb.c b/dpdk/drivers/net/bonding/rte_eth_bond_alb.c +index 56945e2349..253f38da4a 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_alb.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_alb.c +@@ -60,7 +60,7 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev) + 0, data_size, socket_id); + + if (internals->mode6.mempool == NULL) { +- RTE_BOND_LOG(ERR, "%s: Failed to initialize ALB mempool.\n", ++ RTE_BOND_LOG(ERR, "%s: Failed to initialize ALB mempool.", + bond_dev->device->name); + goto mempool_alloc_error; + } +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_api.c b/dpdk/drivers/net/bonding/rte_eth_bond_api.c +index 99e496556a..ffc1322047 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_api.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_api.c +@@ -482,7 +482,7 @@ __eth_bond_member_add_lock_free(uint16_t bonding_port_id, uint16_t member_port_i + ret = rte_eth_dev_info_get(member_port_id, &dev_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, +- "%s: Error during getting device (port %u) info: %s\n", ++ "%s: Error during getting device (port %u) info: %s", + __func__, member_port_id, strerror(-ret)); - /* A rule already exists with the new VSI being added */ -- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) -+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle) - return ICE_ERR_ALREADY_EXISTS; + return ret; +@@ -609,7 +609,7 @@ __eth_bond_member_add_lock_free(uint16_t bonding_port_id, uint16_t member_port_i + &bonding_eth_dev->data->port_id); + internals->member_count--; + RTE_BOND_LOG(ERR, +- "Member (port %u) link get failed: %s\n", ++ "Member (port %u) link get failed: %s", + member_port_id, rte_strerror(-ret)); + return -1; + } +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_flow.c b/dpdk/drivers/net/bonding/rte_eth_bond_flow.c +index 71a91675f7..5d0be5caf5 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_flow.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_flow.c +@@ -180,6 +180,8 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, - vsi_handle_arr[0] = cur_fltr->vsi_handle; -@@ -4651,7 +4651,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, + count->bytes = 0; + count->hits = 0; ++ count->bytes_set = 0; ++ count->hits_set = 0; + rte_memcpy(&member_count, count, sizeof(member_count)); + for (i = 0; i < internals->member_count; i++) { + ret = rte_flow_query(internals->members[i].port_id, +@@ -192,8 +194,12 @@ bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, + } + count->bytes += member_count.bytes; + count->hits += member_count.hits; ++ count->bytes_set |= member_count.bytes_set; ++ count->hits_set |= member_count.hits_set; + member_count.bytes = 0; + member_count.hits = 0; ++ member_count.bytes_set = 0; ++ member_count.hits_set = 0; + } + return 0; + } +diff --git a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c +index c40d18d128..4144c86be4 100644 +--- a/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c ++++ b/dpdk/drivers/net/bonding/rte_eth_bond_pmd.c +@@ -191,7 +191,7 @@ bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev, + ret = rte_eth_dev_info_get(member_port, &member_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, +- "%s: Error during getting device (port %u) info: %s\n", ++ "%s: Error during getting device (port %u) info: %s", + __func__, member_port, strerror(-ret)); - /* A rule already exists with the new VSI being added */ - if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) -- return ICE_SUCCESS; -+ return ICE_ERR_ALREADY_EXISTS; + return ret; +@@ -221,7 +221,7 @@ bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) { + ret = rte_eth_dev_info_get(bond_dev->data->port_id, &bond_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, +- "%s: Error during getting device (port %u) info: %s\n", ++ "%s: Error during getting device (port %u) info: %s", + __func__, bond_dev->data->port_id, + strerror(-ret)); + +@@ -2289,7 +2289,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + ret = rte_eth_dev_info_get(member.port_id, &member_info); + if (ret != 0) { + RTE_BOND_LOG(ERR, +- "%s: Error during getting device (port %u) info: %s\n", ++ "%s: Error during getting device (port %u) info: %s", + __func__, + member.port_id, + strerror(-ret)); +diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev.c b/dpdk/drivers/net/cnxk/cn10k_ethdev.c +index 4a4e97287c..3b7de891e0 100644 +--- a/dpdk/drivers/net/cnxk/cn10k_ethdev.c ++++ b/dpdk/drivers/net/cnxk/cn10k_ethdev.c +@@ -30,7 +30,7 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) + flags |= NIX_RX_MULTI_SEG_F; - /* Update the previously created VSI list set with - * the new VSI ID passed in -@@ -7390,7 +7390,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles, - ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); +- if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) ++ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) + flags |= NIX_RX_OFFLOAD_TSTAMP_F; - /* return number of free indexes */ -- return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS); -+ return ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS); - } + if (!dev->ptype_disable) +@@ -389,7 +389,13 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx) + struct roc_nix_sq *sq = &dev->sqs[qidx]; + do { + handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F); ++ /* Check if SQ is empty */ + roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail); ++ if (head != tail) ++ continue; ++ ++ /* Check if completion CQ is empty */ ++ roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail); + } while (head != tail); + } - static void ice_set_recipe_index(unsigned long idx, u8 *bitmap) -@@ -8101,6 +8101,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, - enum ice_status status = ICE_SUCCESS; - struct ice_sw_recipe *rm; - u8 i; -+ u16 cnt; +@@ -467,7 +473,7 @@ cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) + struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix; + struct rte_eth_dev *eth_dev; + struct cn10k_eth_rxq *rxq; +- int i; ++ int i, rc; - if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt) - return ICE_ERR_PARAM; -diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c -index 3ccba4db80..c1d2b91ad7 100644 ---- a/dpdk/drivers/net/ice/ice_ethdev.c -+++ b/dpdk/drivers/net/ice/ice_ethdev.c -@@ -1804,6 +1804,7 @@ ice_pf_setup(struct ice_pf *pf) + if (!dev) + return -EINVAL; +@@ -490,8 +496,22 @@ cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) + * and MTU setting also requires MBOX message to be + * sent(VF->PF) + */ ++ if (dev->ptp_en) { ++ rc = rte_mbuf_dyn_rx_timestamp_register ++ (&dev->tstamp.tstamp_dynfield_offset, ++ &dev->tstamp.rx_tstamp_dynflag); ++ if (rc != 0) { ++ plt_err("Failed to register Rx timestamp field/flag"); ++ return -EINVAL; ++ } ++ } + eth_dev->rx_pkt_burst = nix_ptp_vf_burst; ++ rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst; + rte_mb(); ++ if (dev->cnxk_sso_ptp_tstamp_cb) ++ dev->cnxk_sso_ptp_tstamp_cb(eth_dev->data->port_id, ++ NIX_RX_OFFLOAD_TSTAMP_F, dev->ptp_en); ++ } - pf->main_vsi = vsi; -+ rte_spinlock_init(&pf->link_lock); - return 0; - } -@@ -3621,17 +3622,31 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev) - return 0; - } +diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c b/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c +index 575d0fabd5..47822a3d84 100644 +--- a/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c ++++ b/dpdk/drivers/net/cnxk/cn10k_ethdev_sec.c +@@ -14,6 +14,13 @@ + #include + #include -+static enum ice_status -+ice_get_link_info_safe(struct ice_pf *pf, bool ena_lse, -+ struct ice_link_status *link) ++cnxk_ethdev_rx_offload_cb_t cnxk_ethdev_rx_offload_cb; ++void ++cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb) +{ -+ struct ice_hw *hw = ICE_PF_TO_HW(pf); -+ int ret; -+ -+ rte_spinlock_lock(&pf->link_lock); -+ -+ ret = ice_aq_get_link_info(hw->port_info, ena_lse, link, NULL); -+ -+ rte_spinlock_unlock(&pf->link_lock); -+ -+ return ret; ++ cnxk_ethdev_rx_offload_cb = cb; +} + - static void - ice_get_init_link_status(struct rte_eth_dev *dev) + static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = { + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, +@@ -891,6 +898,9 @@ cn10k_eth_sec_session_create(void *device, + !(dev->rx_offload_flags & NIX_RX_REAS_F)) { + dev->rx_offload_flags |= NIX_RX_REAS_F; + cn10k_eth_set_rx_function(eth_dev); ++ if (cnxk_ethdev_rx_offload_cb) ++ cnxk_ethdev_rx_offload_cb(eth_dev->data->port_id, ++ NIX_RX_REAS_F); + } + } else { + struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr; +@@ -1087,8 +1097,8 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess, { -- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); - bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; - struct ice_link_status link_status; - int ret; + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device; + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); +- struct roc_ot_ipsec_inb_sa *inb_sa_dptr; + struct rte_security_ipsec_xform *ipsec; ++ struct cn10k_sec_sess_priv sess_priv; + struct rte_crypto_sym_xform *crypto; + struct cnxk_eth_sec_sess *eth_sec; + bool inbound; +@@ -1109,6 +1119,11 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess, + eth_sec->spi = conf->ipsec.spi; -- ret = ice_aq_get_link_info(hw->port_info, enable_lse, -- &link_status, NULL); -+ ret = ice_get_link_info_safe(pf, enable_lse, &link_status); - if (ret != ICE_SUCCESS) { - PMD_DRV_LOG(ERR, "Failed to get link info"); - pf->init_link_up = false; -@@ -3735,7 +3750,10 @@ ice_dev_start(struct rte_eth_dev *dev) - ice_set_tx_function(dev); + if (inbound) { ++ struct roc_ot_ipsec_inb_sa *inb_sa_dptr, *inb_sa; ++ struct cn10k_inb_priv_data *inb_priv; ++ ++ inb_sa = eth_sec->sa; ++ inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa); + inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr; + memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa)); - mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | -- RTE_ETH_VLAN_EXTEND_MASK | RTE_ETH_QINQ_STRIP_MASK; -+ RTE_ETH_VLAN_EXTEND_MASK; -+ if (ice_is_dvm_ena(hw)) -+ mask |= RTE_ETH_QINQ_STRIP_MASK; +@@ -1116,26 +1131,74 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess, + true); + if (rc) + return -EINVAL; ++ /* Use cookie for original data */ ++ inb_sa_dptr->w1.s.cookie = inb_sa->w1.s.cookie; + - ret = ice_vlan_offload_set(dev, mask); - if (ret) { - PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); -@@ -3876,7 +3894,11 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) - RTE_ETH_TX_OFFLOAD_TCP_CKSUM | - RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | - RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | -- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; -+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | -+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | -+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | -+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | -+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO; - dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; - } ++ if (ipsec->options.stats == 1) { ++ /* Enable mib counters */ ++ inb_sa_dptr->w0.s.count_mib_bytes = 1; ++ inb_sa_dptr->w0.s.count_mib_pkts = 1; ++ } ++ ++ /* Enable out-of-place processing */ ++ if (ipsec->options.ingress_oop) ++ inb_sa_dptr->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_FULL; -@@ -3996,7 +4018,7 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) - { - #define CHECK_INTERVAL 50 /* 50ms */ - #define MAX_REPEAT_TIME 40 /* 2s (40 * 50ms) in total */ -- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); -+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); - struct ice_link_status link_status; - struct rte_eth_link link, old; - int status; -@@ -4010,8 +4032,7 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) - - do { - /* Get link status information from hardware */ -- status = ice_aq_get_link_info(hw->port_info, enable_lse, -- &link_status, NULL); -+ status = ice_get_link_info_safe(pf, enable_lse, &link_status); - if (status != ICE_SUCCESS) { - link.link_speed = RTE_ETH_SPEED_NUM_100M; - link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; -@@ -4802,19 +4823,35 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask) - ice_vsi_config_vlan_filter(vsi, false); - } + rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa, + eth_sec->inb, + sizeof(struct roc_ot_ipsec_inb_sa)); + if (rc) + return -EINVAL; ++ ++ /* Save userdata in inb private area */ ++ inb_priv->userdata = conf->userdata; + } else { +- struct roc_ot_ipsec_outb_sa *outb_sa_dptr; ++ struct roc_ot_ipsec_outb_sa *outb_sa_dptr, *outb_sa; ++ struct cn10k_outb_priv_data *outb_priv; ++ struct cnxk_ipsec_outb_rlens *rlens; -- if (mask & RTE_ETH_VLAN_STRIP_MASK) { -- if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) -- ice_vsi_config_vlan_stripping(vsi, true); -- else -- ice_vsi_config_vlan_stripping(vsi, false); -- } -+ struct ice_hw *hw = ICE_VSI_TO_HW(vsi); -+ if (!ice_is_dvm_ena(hw)) { -+ if (mask & RTE_ETH_VLAN_STRIP_MASK) { -+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) -+ ice_vsi_config_vlan_stripping(vsi, true); -+ else -+ ice_vsi_config_vlan_stripping(vsi, false); -+ } ++ outb_sa = eth_sec->sa; ++ outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa); ++ rlens = &outb_priv->rlens; + outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr; + memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa)); -- if (mask & RTE_ETH_QINQ_STRIP_MASK) { -- /* Enable or disable outer VLAN stripping */ -- if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) -- ice_vsi_config_outer_vlan_stripping(vsi, true); -- else -- ice_vsi_config_outer_vlan_stripping(vsi, false); -+ if (mask & RTE_ETH_QINQ_STRIP_MASK) { -+ PMD_DRV_LOG(ERR, "Single VLAN mode (SVM) does not support qinq"); -+ return -ENOTSUP; -+ } -+ } else { -+ if ((mask & RTE_ETH_VLAN_STRIP_MASK) | -+ (mask & RTE_ETH_QINQ_STRIP_MASK)) { -+ if (rxmode->offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | -+ RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) -+ ice_vsi_config_outer_vlan_stripping(vsi, true); -+ else -+ ice_vsi_config_outer_vlan_stripping(vsi, false); -+ } + rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto); + if (rc) + return -EINVAL; + -+ if (mask & RTE_ETH_QINQ_STRIP_MASK) { -+ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) -+ ice_vsi_config_vlan_stripping(vsi, true); -+ else -+ ice_vsi_config_vlan_stripping(vsi, false); ++ /* Save rlen info */ ++ cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto); ++ ++ if (ipsec->options.stats == 1) { ++ /* Enable mib counters */ ++ outb_sa_dptr->w0.s.count_mib_bytes = 1; ++ outb_sa_dptr->w0.s.count_mib_pkts = 1; + } ++ ++ sess_priv.u64 = 0; ++ sess_priv.sa_idx = outb_priv->sa_idx; ++ sess_priv.roundup_byte = rlens->roundup_byte; ++ sess_priv.roundup_len = rlens->roundup_len; ++ sess_priv.partial_len = rlens->partial_len; ++ sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode; ++ sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver; ++ /* Propagate inner checksum enable from SA to fast path */ ++ sess_priv.chksum = ++ (!ipsec->options.ip_csum_enable << 1 | !ipsec->options.l4_csum_enable); ++ sess_priv.dec_ttl = ipsec->options.dec_ttl; ++ if (roc_feature_nix_has_inl_ipsec_mseg() && dev->outb.cpt_eng_caps & BIT_ULL(35)) ++ sess_priv.nixtx_off = 1; ++ + rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa, + eth_sec->inb, + sizeof(struct roc_ot_ipsec_outb_sa)); + if (rc) + return -EINVAL; ++ ++ /* Save userdata */ ++ outb_priv->userdata = conf->userdata; ++ sess->fast_mdata = sess_priv.u64; } return 0; -diff --git a/dpdk/drivers/net/ice/ice_ethdev.h b/dpdk/drivers/net/ice/ice_ethdev.h -index abe6dcdc23..d607f028e0 100644 ---- a/dpdk/drivers/net/ice/ice_ethdev.h -+++ b/dpdk/drivers/net/ice/ice_ethdev.h -@@ -548,6 +548,10 @@ struct ice_pf { - uint64_t rss_hf; - struct ice_tm_conf tm_conf; - uint16_t outer_ethertype; -+ /* lock prevent race condition between lsc interrupt handler -+ * and link status update during dev_start. -+ */ -+ rte_spinlock_t link_lock; - }; - - #define ICE_MAX_QUEUE_NUM 2048 -diff --git a/dpdk/drivers/net/ice/ice_hash.c b/dpdk/drivers/net/ice/ice_hash.c -index f923641533..d8c46347d2 100644 ---- a/dpdk/drivers/net/ice/ice_hash.c -+++ b/dpdk/drivers/net/ice/ice_hash.c -@@ -650,10 +650,10 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, - uint8_t *pkt_buf, *msk_buf; - uint8_t tmp_val = 0; - uint8_t tmp_c = 0; -- int i, j; -+ int i, j, ret = 0; - - if (ad->psr == NULL) -- return -rte_errno; -+ return -ENOTSUP; - - raw_spec = item->spec; - raw_mask = item->mask; -@@ -670,8 +670,10 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, - return -ENOMEM; - - msk_buf = rte_zmalloc(NULL, pkt_len, 0); -- if (!msk_buf) -+ if (!msk_buf) { -+ rte_free(pkt_buf); - return -ENOMEM; -+ } +diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.h b/dpdk/drivers/net/cnxk/cn10k_rx.h +index 7bb4c86d75..86e4233dc7 100644 +--- a/dpdk/drivers/net/cnxk/cn10k_rx.h ++++ b/dpdk/drivers/net/cnxk/cn10k_rx.h +@@ -705,7 +705,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, + if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) { + const uint64_t *wqe = (const uint64_t *)(mbuf + 1); - /* convert string to int array */ - for (i = 0, j = 0; i < spec_len; i += 2, j++) { -@@ -708,18 +710,22 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, - msk_buf[j] = tmp_val * 16 + tmp_c - '0'; +- if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) ++ if (!(flags & NIX_RX_REAS_F) || hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) + rx = (const union nix_rx_parse_u *)(wqe + 1); } -- if (ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt)) -- return -rte_errno; -+ ret = ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt); -+ if (ret) -+ goto free_mem; - -- if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf, -- pkt_len, ICE_BLK_RSS, true, &prof)) -- return -rte_errno; -+ ret = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, -+ pkt_len, ICE_BLK_RSS, true, &prof); -+ if (ret) -+ goto free_mem; +diff --git a/dpdk/drivers/net/cnxk/cn10k_rxtx.h b/dpdk/drivers/net/cnxk/cn10k_rxtx.h +index aeffc4ac92..9f33d0192e 100644 +--- a/dpdk/drivers/net/cnxk/cn10k_rxtx.h ++++ b/dpdk/drivers/net/cnxk/cn10k_rxtx.h +@@ -177,6 +177,7 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe) + m = m_next; + } + rte_pktmbuf_free_seg(m); ++ txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL; - rte_memcpy(&meta->raw.prof, &prof, sizeof(prof)); + head++; + head &= qmask; +diff --git a/dpdk/drivers/net/cnxk/cn10k_tx.h b/dpdk/drivers/net/cnxk/cn10k_tx.h +index 467f0ccc65..c84154ee84 100644 +--- a/dpdk/drivers/net/cnxk/cn10k_tx.h ++++ b/dpdk/drivers/net/cnxk/cn10k_tx.h +@@ -784,19 +784,35 @@ cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr, + } + #endif -+free_mem: - rte_free(pkt_buf); - rte_free(msk_buf); -- return 0; ++static inline void ++cn10k_nix_free_extmbuf(struct rte_mbuf *m) ++{ ++ struct rte_mbuf *m_next; ++ while (m != NULL) { ++ m_next = m->next; ++ rte_pktmbuf_free_seg(m); ++ m = m_next; ++ } ++} + -+ return ret; - } + static __rte_always_inline uint64_t +-cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq, +- struct nix_send_hdr_s *send_hdr) ++cn10k_nix_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **extm, struct cn10k_eth_txq *txq, ++ struct nix_send_hdr_s *send_hdr, uint64_t *aura) + { ++ struct rte_mbuf *prev = NULL; + uint32_t sqe_id; - static void -diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c -index 73e47ae92d..dea6a5b535 100644 ---- a/dpdk/drivers/net/ice/ice_rxtx.c -+++ b/dpdk/drivers/net/ice/ice_rxtx.c -@@ -2734,9 +2734,9 @@ ice_parse_tunneling_params(uint64_t ol_flags, - * Calculate the tunneling UDP checksum. - * Shall be set only if L4TUNT = 01b and EIPT is not zero - */ -- if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && -- (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && -- (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) -+ if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) && -+ (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && -+ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) - *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; + if (RTE_MBUF_HAS_EXTBUF(m)) { + if (unlikely(txq->tx_compl.ena == 0)) { +- rte_pktmbuf_free_seg(m); ++ m->next = *extm; ++ *extm = m; + return 1; + } + if (send_hdr->w0.pnc) { +- txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m; ++ sqe_id = send_hdr->w1.sqe_id; ++ prev = txq->tx_compl.ptr[sqe_id]; ++ m->next = prev; ++ txq->tx_compl.ptr[sqe_id] = m; + } else { + sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED); + send_hdr->w0.pnc = 1; +@@ -806,10 +822,160 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq, + } + return 1; + } else { +- return cnxk_nix_prefree_seg(m); ++ return cnxk_nix_prefree_seg(m, aura); + } } -diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c -index 6f6d790967..d6e88dbb29 100644 ---- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c -+++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c -@@ -254,62 +254,30 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, - _mm256_loadu_si256((void *)&sw_ring[i + 4])); - #endif - -- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; --#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC -- /* for AVX we need alignment otherwise loads are not atomic */ -- if (avx_aligned) { -- /* load in descriptors, 2 at a time, in reverse order */ -- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); -- rte_compiler_barrier(); -- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); -- rte_compiler_barrier(); -- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); -- rte_compiler_barrier(); -- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); -- } else --#endif -- { -- const __m128i raw_desc7 = -- _mm_load_si128((void *)(rxdp + 7)); -- rte_compiler_barrier(); -- const __m128i raw_desc6 = -- _mm_load_si128((void *)(rxdp + 6)); -- rte_compiler_barrier(); -- const __m128i raw_desc5 = -- _mm_load_si128((void *)(rxdp + 5)); -- rte_compiler_barrier(); -- const __m128i raw_desc4 = -- _mm_load_si128((void *)(rxdp + 4)); -- rte_compiler_barrier(); -- const __m128i raw_desc3 = -- _mm_load_si128((void *)(rxdp + 3)); -- rte_compiler_barrier(); -- const __m128i raw_desc2 = -- _mm_load_si128((void *)(rxdp + 2)); -- rte_compiler_barrier(); -- const __m128i raw_desc1 = -- _mm_load_si128((void *)(rxdp + 1)); -- rte_compiler_barrier(); -- const __m128i raw_desc0 = -- _mm_load_si128((void *)(rxdp + 0)); -- -- raw_desc6_7 = -- _mm256_inserti128_si256 -- (_mm256_castsi128_si256(raw_desc6), -- raw_desc7, 1); -- raw_desc4_5 = -- _mm256_inserti128_si256 -- (_mm256_castsi128_si256(raw_desc4), -- raw_desc5, 1); -- raw_desc2_3 = -- _mm256_inserti128_si256 -- (_mm256_castsi128_si256(raw_desc2), -- raw_desc3, 1); -- raw_desc0_1 = -- _mm256_inserti128_si256 -- (_mm256_castsi128_si256(raw_desc0), -- raw_desc1, 1); -- } -+ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); -+ rte_compiler_barrier(); -+ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); ++#if defined(RTE_ARCH_ARM64) ++/* Only called for first segments of single segmented mbufs */ ++static __rte_always_inline void ++cn10k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct rte_mbuf **extm, ++ struct cn10k_eth_txq *txq, ++ uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0, ++ uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1) ++{ ++ struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr; ++ uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask; ++ bool tx_compl_ena = txq->tx_compl.ena; ++ struct rte_mbuf *m0, *m1, *m2, *m3; ++ struct rte_mbuf *cookie; ++ uint64_t w0, w1, aura; ++ uint64_t sqe_id; + -+ const __m256i raw_desc6_7 = -+ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); -+ const __m256i raw_desc4_5 = -+ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); -+ const __m256i raw_desc2_3 = -+ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); -+ const __m256i raw_desc0_1 = -+ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); - - if (split_packet) { - int j; -diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h -index 55840cf170..4b73465af5 100644 ---- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h -+++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h -@@ -251,6 +251,10 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) - RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ - RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ - RTE_ETH_TX_OFFLOAD_TCP_TSO | \ -+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ -+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ -+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \ -+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \ - RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) - - #define ICE_TX_VECTOR_OFFLOAD ( \ -diff --git a/dpdk/drivers/net/ice/ice_tm.c b/dpdk/drivers/net/ice/ice_tm.c -index f5ea47ae83..65b9fdf320 100644 ---- a/dpdk/drivers/net/ice/ice_tm.c -+++ b/dpdk/drivers/net/ice/ice_tm.c -@@ -58,8 +58,15 @@ void - ice_tm_conf_uninit(struct rte_eth_dev *dev) - { - struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); -+ struct ice_tm_shaper_profile *shaper_profile; - struct ice_tm_node *tm_node; - -+ /* clear profile */ -+ while ((shaper_profile = TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) { -+ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node); -+ rte_free(shaper_profile); -+ } ++ m0 = mbufs[0]; ++ m1 = mbufs[1]; ++ m2 = mbufs[2]; ++ m3 = mbufs[3]; + - /* clear node configuration */ - while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) { - TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node); -@@ -648,6 +655,8 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, - uint16_t buf_size = ice_struct_size(buf, txqs, 1); - - buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf)); -+ if (buf == NULL) -+ return -ENOMEM; - - queue_parent_node = queue_sched_node->parent; - buf->src_teid = queue_parent_node->info.node_teid; -@@ -659,6 +668,7 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, - NULL, buf, buf_size, &txqs_moved, NULL); - if (ret || txqs_moved == 0) { - PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id); -+ rte_free(buf); - return ICE_ERR_PARAM; - } - -@@ -668,12 +678,14 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, - } else { - PMD_DRV_LOG(ERR, "invalid children number %d for queue %u", - queue_parent_node->num_children, queue_id); -+ rte_free(buf); - return ICE_ERR_PARAM; - } - dst_node->children[dst_node->num_children++] = queue_sched_node; - queue_sched_node->parent = dst_node; - ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info); - -+ rte_free(buf); - return ret; - } - -diff --git a/dpdk/drivers/net/igc/igc_ethdev.c b/dpdk/drivers/net/igc/igc_ethdev.c -index 58c4f80927..690736b6d1 100644 ---- a/dpdk/drivers/net/igc/igc_ethdev.c -+++ b/dpdk/drivers/net/igc/igc_ethdev.c -@@ -2853,7 +2853,7 @@ eth_igc_timesync_disable(struct rte_eth_dev *dev) - IGC_WRITE_REG(hw, IGC_TSYNCRXCTL, 0); - - val = IGC_READ_REG(hw, IGC_RXPBS); -- val &= IGC_RXPBS_CFG_TS_EN; -+ val &= ~IGC_RXPBS_CFG_TS_EN; - IGC_WRITE_REG(hw, IGC_RXPBS, val); - - val = IGC_READ_REG(hw, IGC_SRRCTL(0)); -diff --git a/dpdk/drivers/net/ionic/ionic_ethdev.c b/dpdk/drivers/net/ionic/ionic_ethdev.c -index 340fd0cd59..4ec9598b8e 100644 ---- a/dpdk/drivers/net/ionic/ionic_ethdev.c -+++ b/dpdk/drivers/net/ionic/ionic_ethdev.c -@@ -561,7 +561,7 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, - struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); - struct ionic_adapter *adapter = lif->adapter; - struct ionic_identity *ident = &adapter->ident; -- int i, num; -+ int i, j, num; - uint16_t tbl_sz = rte_le_to_cpu_16(ident->lif.eth.rss_ind_tbl_sz); - - IONIC_PRINT_CALL(); -@@ -582,9 +582,10 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, - num = reta_size / RTE_ETH_RETA_GROUP_SIZE; - - for (i = 0; i < num; i++) { -- memcpy(reta_conf->reta, -- &lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE], -- RTE_ETH_RETA_GROUP_SIZE); -+ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) { -+ reta_conf->reta[j] = -+ lif->rss_ind_tbl[(i * RTE_ETH_RETA_GROUP_SIZE) + j]; ++ /* mbuf 0 */ ++ w0 = vgetq_lane_u64(*senddesc01_w0, 0); ++ if (RTE_MBUF_HAS_EXTBUF(m0)) { ++ w0 |= BIT_ULL(19); ++ w1 = vgetq_lane_u64(*senddesc01_w1, 0); ++ w1 &= ~0xFFFF000000000000UL; ++ if (unlikely(!tx_compl_ena)) { ++ m0->next = *extm; ++ *extm = m0; ++ } else { ++ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, ++ rte_memory_order_relaxed); ++ sqe_id = sqe_id & nb_desc_mask; ++ /* Set PNC */ ++ w0 |= BIT_ULL(43); ++ w1 |= sqe_id << 48; ++ tx_compl_ptr[sqe_id] = m0; ++ *senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0); + } - reta_conf++; - } - -@@ -969,19 +970,21 @@ ionic_dev_close(struct rte_eth_dev *eth_dev) - - ionic_lif_stop(lif); - -- ionic_lif_free_queues(lif); -- - IONIC_PRINT(NOTICE, "Removing device %s", eth_dev->device->name); - if (adapter->intf->unconfigure_intr) - (*adapter->intf->unconfigure_intr)(adapter); - -- rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); -- - ionic_port_reset(adapter); - ionic_reset(adapter); -+ -+ ionic_lif_free_queues(lif); -+ ionic_lif_deinit(lif); -+ ionic_lif_free(lif); /* Does not free LIF object */ ++ } else { ++ cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0); ++ aura = (w0 >> 20) & 0xFFFFF; ++ w0 &= ~0xFFFFF00000UL; ++ w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19; ++ w0 |= aura << 20; + - if (adapter->intf->unmap_bars) - (*adapter->intf->unmap_bars)(adapter); - -+ lif->adapter = NULL; - rte_free(adapter); ++ if ((w0 & BIT_ULL(19)) == 0) ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); ++ } ++ *senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0); ++ ++ /* mbuf1 */ ++ w0 = vgetq_lane_u64(*senddesc01_w0, 1); ++ if (RTE_MBUF_HAS_EXTBUF(m1)) { ++ w0 |= BIT_ULL(19); ++ w1 = vgetq_lane_u64(*senddesc01_w1, 1); ++ w1 &= ~0xFFFF000000000000UL; ++ if (unlikely(!tx_compl_ena)) { ++ m1->next = *extm; ++ *extm = m1; ++ } else { ++ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, ++ rte_memory_order_relaxed); ++ sqe_id = sqe_id & nb_desc_mask; ++ /* Set PNC */ ++ w0 |= BIT_ULL(43); ++ w1 |= sqe_id << 48; ++ tx_compl_ptr[sqe_id] = m1; ++ *senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1); ++ } ++ } else { ++ cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1); ++ aura = (w0 >> 20) & 0xFFFFF; ++ w0 &= ~0xFFFFF00000UL; ++ w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19; ++ w0 |= aura << 20; ++ ++ if ((w0 & BIT_ULL(19)) == 0) ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); ++ } ++ *senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1); ++ ++ /* mbuf 2 */ ++ w0 = vgetq_lane_u64(*senddesc23_w0, 0); ++ if (RTE_MBUF_HAS_EXTBUF(m2)) { ++ w0 |= BIT_ULL(19); ++ w1 = vgetq_lane_u64(*senddesc23_w1, 0); ++ w1 &= ~0xFFFF000000000000UL; ++ if (unlikely(!tx_compl_ena)) { ++ m2->next = *extm; ++ *extm = m2; ++ } else { ++ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, ++ rte_memory_order_relaxed); ++ sqe_id = sqe_id & nb_desc_mask; ++ /* Set PNC */ ++ w0 |= BIT_ULL(43); ++ w1 |= sqe_id << 48; ++ tx_compl_ptr[sqe_id] = m2; ++ *senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0); ++ } ++ } else { ++ cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2); ++ aura = (w0 >> 20) & 0xFFFFF; ++ w0 &= ~0xFFFFF00000UL; ++ w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19; ++ w0 |= aura << 20; ++ ++ if ((w0 & BIT_ULL(19)) == 0) ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); ++ } ++ *senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0); ++ ++ /* mbuf3 */ ++ w0 = vgetq_lane_u64(*senddesc23_w0, 1); ++ if (RTE_MBUF_HAS_EXTBUF(m3)) { ++ w0 |= BIT_ULL(19); ++ w1 = vgetq_lane_u64(*senddesc23_w1, 1); ++ w1 &= ~0xFFFF000000000000UL; ++ if (unlikely(!tx_compl_ena)) { ++ m3->next = *extm; ++ *extm = m3; ++ } else { ++ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, ++ rte_memory_order_relaxed); ++ sqe_id = sqe_id & nb_desc_mask; ++ /* Set PNC */ ++ w0 |= BIT_ULL(43); ++ w1 |= sqe_id << 48; ++ tx_compl_ptr[sqe_id] = m3; ++ *senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1); ++ } ++ } else { ++ cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3); ++ aura = (w0 >> 20) & 0xFFFFF; ++ w0 &= ~0xFFFFF00000UL; ++ w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19; ++ w0 |= aura << 20; ++ ++ if ((w0 & BIT_ULL(19)) == 0) ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); ++ } ++ *senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1); ++#ifndef RTE_LIBRTE_MEMPOOL_DEBUG ++ RTE_SET_USED(cookie); ++#endif ++} ++#endif ++ + static __rte_always_inline void + cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) + { +@@ -864,9 +1030,9 @@ cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) - return 0; -@@ -1058,21 +1061,18 @@ err: - static int - eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev) + static __rte_always_inline void + cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq, +- struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags, +- const uint64_t lso_tun_fmt, bool *sec, uint8_t mark_flag, +- uint64_t mark_fmt) ++ struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd, ++ const uint16_t flags, const uint64_t lso_tun_fmt, bool *sec, ++ uint8_t mark_flag, uint64_t mark_fmt) { -- struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); -- struct ionic_adapter *adapter = lif->adapter; -- - IONIC_PRINT_CALL(); + uint8_t mark_off = 0, mark_vlan = 0, markptr = 0; + struct nix_send_ext_s *send_hdr_ext; +@@ -889,6 +1055,9 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq, + sg = (union nix_send_sg_s *)(cmd + 2); + } - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) ++ send_hdr->w0.pnc = 0; ++ + if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) { + ol_flags = m->ol_flags; + w1.u = 0; +@@ -1049,19 +1218,30 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq, + send_hdr->w1.u = w1.u; -- adapter->lif = NULL; -- -- ionic_lif_deinit(lif); -- ionic_lif_free(lif); -+ if (eth_dev->state != RTE_ETH_DEV_UNUSED) -+ ionic_dev_close(eth_dev); + if (!(flags & NIX_TX_MULTI_SEG_F)) { ++ struct rte_mbuf *cookie; ++ + sg->seg1_size = send_hdr->w0.total; + *(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m); ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); -- if (!(lif->state & IONIC_LIF_F_FW_RESET)) -- ionic_lif_reset(lif); -+ eth_dev->dev_ops = NULL; -+ eth_dev->rx_pkt_burst = NULL; -+ eth_dev->tx_pkt_burst = NULL; -+ eth_dev->tx_pkt_prepare = NULL; + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { ++ uint64_t aura; ++ + /* DF bit = 1 if refcount of current mbuf or parent mbuf + * is greater than 1 + * DF bit = 0 otherwise + */ +- send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr); ++ aura = send_hdr->w0.aura; ++ send_hdr->w0.df = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura); ++ send_hdr->w0.aura = aura; + } ++#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + /* Mark mempool object as "put" since it is freed by NIX */ + if (!send_hdr->w0.df) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); ++#else ++ RTE_SET_USED(cookie); ++#endif + } else { + sg->seg1_size = m->data_len; + *(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m); +@@ -1113,7 +1293,7 @@ cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr, + struct nix_send_mem_s *send_mem; - return 0; + send_mem = (struct nix_send_mem_s *)(lmt + off); +- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp ++ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, Tx tstamp + * should not be recorded, hence changing the alg type to + * NIX_SENDMEMALG_SUB and also changing send mem addr field to + * next 8 bytes as it corrupts the actual Tx tstamp registered +@@ -1128,13 +1308,14 @@ cn10k_nix_xmit_prepare_tstamp(struct cn10k_eth_txq *txq, uintptr_t lmt_addr, } -@@ -1227,17 +1227,18 @@ eth_ionic_dev_remove(struct rte_device *rte_dev) + + static __rte_always_inline uint16_t +-cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, +- struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) ++cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm, ++ uint64_t *cmd, const uint16_t flags) { - char name[RTE_ETH_NAME_MAX_LEN]; - struct rte_eth_dev *eth_dev; -+ int ret = 0; + uint64_t prefree = 0, aura0, aura, nb_segs, segdw; + struct nix_send_hdr_s *send_hdr; + union nix_send_sg_s *sg, l_sg; + union nix_send_sg2_s l_sg2; ++ struct rte_mbuf *cookie; + struct rte_mbuf *m_next; + uint8_t off, is_sg2; + uint64_t len, dlen; +@@ -1163,21 +1344,27 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, + len -= dlen; + nb_segs = m->nb_segs - 1; + m_next = m->next; ++ m->next = NULL; ++ m->nb_segs = 1; + slist = &cmd[3 + off + 1]; - /* Adapter lookup is using the eth_dev name */ - snprintf(name, sizeof(name), "%s_lif", rte_dev->name); ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); + /* Set invert df if buffer is not to be freed by H/W */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { +- prefree = cn10k_nix_prefree_seg(m, txq, send_hdr); ++ aura = send_hdr->w0.aura; ++ prefree = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura); ++ send_hdr->w0.aura = aura; + l_sg.i1 = prefree; + } - eth_dev = rte_eth_dev_allocated(name); - if (eth_dev) -- ionic_dev_close(eth_dev); -+ ret = rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); - else - IONIC_PRINT(DEBUG, "Cannot find device %s", rte_dev->name); + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + /* Mark mempool object as "put" since it is freed by NIX */ + if (!prefree) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + rte_io_wmb(); ++#else ++ RTE_SET_USED(cookie); + #endif +- m->next = NULL; -- return 0; -+ return ret; - } + /* Quickly handle single segmented packets. With this if-condition + * compiler will completely optimize out the below do-while loop +@@ -1207,9 +1394,12 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, + aura = aura0; + prefree = 0; - RTE_LOG_REGISTER_DEFAULT(ionic_logtype, NOTICE); -diff --git a/dpdk/drivers/net/ionic/ionic_rxtx.c b/dpdk/drivers/net/ionic/ionic_rxtx.c -index b9e73b4871..170d3b0802 100644 ---- a/dpdk/drivers/net/ionic/ionic_rxtx.c -+++ b/dpdk/drivers/net/ionic/ionic_rxtx.c -@@ -26,38 +26,40 @@ - #include "ionic_logs.h" ++ m->next = NULL; ++ ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { + aura = roc_npa_aura_handle_to_aura(m->pool->pool_id); +- prefree = cn10k_nix_prefree_seg(m, txq, send_hdr); ++ prefree = cn10k_nix_prefree_seg(m, extm, txq, send_hdr, &aura); + is_sg2 = aura != aura0 && !prefree; + } - static void --ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) -+ionic_empty_array(void **array, uint32_t free_idx, uint32_t zero_idx) - { - uint32_t i; +@@ -1259,13 +1449,14 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq, + l_sg.subdc = NIX_SUBDC_SG; + slist++; + } +- m->next = NULL; -- for (i = idx; i < cnt; i++) -+ for (i = 0; i < free_idx; i++) - if (array[i]) - rte_pktmbuf_free_seg(array[i]); + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + /* Mark mempool object as "put" since it is freed by NIX + */ + if (!prefree) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); ++#else ++ RTE_SET_USED(cookie); + #endif + m = m_next; + } while (nb_segs); +@@ -1302,6 +1493,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts, + uint8_t lnum, c_lnum, c_shft, c_loff; + uintptr_t pa, lbase = txq->lmt_base; + uint16_t lmt_id, burst, left, i; ++ struct rte_mbuf *extm = NULL; + uintptr_t c_lbase = lbase; + uint64_t lso_tun_fmt = 0; + uint64_t mark_fmt = 0; +@@ -1356,7 +1548,7 @@ again: + if (flags & NIX_TX_OFFLOAD_TSO_F) + cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags); -- memset(array, 0, sizeof(void *) * cnt); -+ memset(array, 0, sizeof(void *) * zero_idx); - } +- cn10k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt, ++ cn10k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt, + &sec, mark_flag, mark_fmt); - static void __rte_cold - ionic_tx_empty(struct ionic_tx_qcq *txq) - { - struct ionic_queue *q = &txq->qcq.q; -+ uint32_t info_len = q->num_descs * q->num_segs; + laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0); +@@ -1431,6 +1623,11 @@ again: + } -- ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); -+ ionic_empty_array(q->info, info_len, info_len); - } + rte_io_wmb(); ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) { ++ cn10k_nix_free_extmbuf(extm); ++ extm = NULL; ++ } ++ + if (left) + goto again; - static void __rte_cold - ionic_rx_empty(struct ionic_rx_qcq *rxq) - { - struct ionic_queue *q = &rxq->qcq.q; -+ uint32_t info_len = q->num_descs * q->num_segs; +@@ -1446,6 +1643,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws, + uintptr_t pa0, pa1, lbase = txq->lmt_base; + const rte_iova_t io_addr = txq->io_addr; + uint16_t segdw, lmt_id, burst, left, i; ++ struct rte_mbuf *extm = NULL; + uint8_t lnum, c_lnum, c_loff; + uintptr_t c_lbase = lbase; + uint64_t lso_tun_fmt = 0; +@@ -1507,7 +1705,7 @@ again: + if (flags & NIX_TX_OFFLOAD_TSO_F) + cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags); - /* - * Walk the full info array so that the clean up includes any - * fragments that were left dangling for later reuse - */ -- ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); -+ ionic_empty_array(q->info, info_len, info_len); +- cn10k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt, ++ cn10k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt, + &sec, mark_flag, mark_fmt); -- ionic_empty_array((void **)rxq->mbs, -- IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); -+ ionic_empty_array((void **)rxq->mbs, rxq->mb_idx, -+ IONIC_MBUF_BULK_ALLOC); - rxq->mb_idx = 0; - } + laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0); +@@ -1521,7 +1719,7 @@ again: + /* Move NIX desc to LMT/NIXTX area */ + cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags); + /* Store sg list directly on lmt line */ +- segdw = cn10k_nix_prepare_mseg(txq, tx_pkts[i], (uint64_t *)laddr, ++ segdw = cn10k_nix_prepare_mseg(txq, tx_pkts[i], &extm, (uint64_t *)laddr, + flags); + cn10k_nix_xmit_prepare_tstamp(txq, laddr, tx_pkts[i]->ol_flags, + segdw, flags); +@@ -1594,6 +1792,11 @@ again: + } -@@ -752,7 +754,7 @@ ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) - { - struct ionic_rx_qcq *rxq = rx_queue; - struct ionic_qcq *qcq = &rxq->qcq; -- struct ionic_rxq_comp *cq_desc; -+ volatile struct ionic_rxq_comp *cq_desc; - uint16_t mask, head, tail, pos; - bool done_color; + rte_io_wmb(); ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) { ++ cn10k_nix_free_extmbuf(extm); ++ extm = NULL; ++ } ++ + if (left) + goto again; -@@ -791,7 +793,7 @@ ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) - { - struct ionic_tx_qcq *txq = tx_queue; - struct ionic_qcq *qcq = &txq->qcq; -- struct ionic_txq_comp *cq_desc; -+ volatile struct ionic_txq_comp *cq_desc; - uint16_t mask, head, tail, pos, cq_pos; - bool done_color; +@@ -1644,7 +1847,7 @@ cn10k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1, -diff --git a/dpdk/drivers/net/ionic/ionic_rxtx_sg.c b/dpdk/drivers/net/ionic/ionic_rxtx_sg.c -index ab8e56e91c..241b6f8587 100644 ---- a/dpdk/drivers/net/ionic/ionic_rxtx_sg.c -+++ b/dpdk/drivers/net/ionic/ionic_rxtx_sg.c -@@ -27,7 +27,8 @@ ionic_tx_flush_sg(struct ionic_tx_qcq *txq) - struct ionic_cq *cq = &txq->qcq.cq; - struct ionic_queue *q = &txq->qcq.q; - struct rte_mbuf *txm; -- struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; -+ struct ionic_txq_comp *cq_desc_base = cq->base; -+ volatile struct ionic_txq_comp *cq_desc; - void **info; - uint32_t i; + static __rte_always_inline uint16_t + cn10k_nix_prepare_mseg_vec_noff(struct cn10k_eth_txq *txq, +- struct rte_mbuf *m, uint64_t *cmd, ++ struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd, + uint64x2_t *cmd0, uint64x2_t *cmd1, + uint64x2_t *cmd2, uint64x2_t *cmd3, + const uint32_t flags) +@@ -1659,7 +1862,7 @@ cn10k_nix_prepare_mseg_vec_noff(struct cn10k_eth_txq *txq, + vst1q_u64(cmd + 2, *cmd1); /* sg */ + } -@@ -252,7 +253,7 @@ ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts, - */ - static __rte_always_inline void - ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq, -- struct ionic_rxq_comp *cq_desc, -+ volatile struct ionic_rxq_comp *cq_desc, - struct ionic_rx_service *rx_svc) - { - struct ionic_queue *q = &rxq->qcq.q; -@@ -438,7 +439,8 @@ ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do, - struct ionic_cq *cq = &rxq->qcq.cq; - struct ionic_queue *q = &rxq->qcq.q; - struct ionic_rxq_desc *q_desc_base = q->base; -- struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; -+ struct ionic_rxq_comp *cq_desc_base = cq->base; -+ volatile struct ionic_rxq_comp *cq_desc; - uint32_t work_done = 0; - uint64_t then, now, hz, delta; +- segdw = cn10k_nix_prepare_mseg(txq, m, cmd, flags); ++ segdw = cn10k_nix_prepare_mseg(txq, m, extm, cmd, flags); -diff --git a/dpdk/drivers/net/ionic/ionic_rxtx_simple.c b/dpdk/drivers/net/ionic/ionic_rxtx_simple.c -index 5f81856256..0992177afc 100644 ---- a/dpdk/drivers/net/ionic/ionic_rxtx_simple.c -+++ b/dpdk/drivers/net/ionic/ionic_rxtx_simple.c -@@ -27,7 +27,8 @@ ionic_tx_flush(struct ionic_tx_qcq *txq) - struct ionic_cq *cq = &txq->qcq.cq; - struct ionic_queue *q = &txq->qcq.q; - struct rte_mbuf *txm; -- struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; -+ struct ionic_txq_comp *cq_desc_base = cq->base; -+ volatile struct ionic_txq_comp *cq_desc; - void **info; + if (flags & NIX_TX_OFFLOAD_TSTAMP_F) + vst1q_u64(cmd + segdw * 2 - 2, *cmd3); +@@ -1694,9 +1897,13 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + len -= dlen; + sg_u = sg_u | ((uint64_t)dlen); - cq_desc = &cq_desc_base[cq->tail_idx]; -@@ -225,7 +226,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - */ - static __rte_always_inline void - ionic_rx_clean_one(struct ionic_rx_qcq *rxq, -- struct ionic_rxq_comp *cq_desc, -+ volatile struct ionic_rxq_comp *cq_desc, - struct ionic_rx_service *rx_svc) - { - struct ionic_queue *q = &rxq->qcq.q; -@@ -359,7 +360,8 @@ ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, - struct ionic_cq *cq = &rxq->qcq.cq; - struct ionic_queue *q = &rxq->qcq.q; - struct ionic_rxq_desc *q_desc_base = q->base; -- struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; -+ struct ionic_rxq_comp *cq_desc_base = cq->base; -+ volatile struct ionic_rxq_comp *cq_desc; - uint32_t work_done = 0; - uint64_t then, now, hz, delta; ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ + nb_segs = m->nb_segs - 1; + m_next = m->next; + m->next = NULL; ++ m->nb_segs = 1; + m = m_next; + /* Fill mbuf segments */ + do { +@@ -1719,6 +1926,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + slist++; + } + m->next = NULL; ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ + m = m_next; + } while (nb_segs); -diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c -index 74c5db16fa..56267bb00d 100644 ---- a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c -+++ b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c -@@ -432,8 +432,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) - case TN1010_PHY_ID: - phy_type = ixgbe_phy_tn; - break; -- case X550_PHY_ID2: -- case X550_PHY_ID3: -+ case X550_PHY_ID: - case X540_PHY_ID: - phy_type = ixgbe_phy_aq; - break; -@@ -915,6 +914,10 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) - hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; +@@ -1742,8 +1952,11 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0, + union nix_send_hdr_w0_u sh; + union nix_send_sg_s sg; - switch (hw->mac.type) { -+ case ixgbe_mac_X550: -+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; -+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; -+ break; - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; -diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h -index 1094df5891..f709681df2 100644 ---- a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h -+++ b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h -@@ -1664,6 +1664,7 @@ struct ixgbe_dmac_config { - #define TN1010_PHY_ID 0x00A19410 - #define TNX_FW_REV 0xB - #define X540_PHY_ID 0x01540200 -+#define X550_PHY_ID 0x01540220 - #define X550_PHY_ID2 0x01540223 - #define X550_PHY_ID3 0x01540221 - #define X557_PHY_ID 0x01540240 -@@ -1800,7 +1801,7 @@ enum { - /* VFRE bitmask */ - #define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF +- if (m->nb_segs == 1) ++ if (m->nb_segs == 1) { ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); + return; ++ } --#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ -+#define IXGBE_VF_INIT_TIMEOUT 10000 /* Number of retries to clear RSTI */ + sh.u = vgetq_lane_u64(cmd0[0], 0); + sg.u = vgetq_lane_u64(cmd1[0], 0); +@@ -1759,7 +1972,7 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0, - /* RDHMPN and TDHMPN bitmasks */ - #define IXGBE_RDHMPN_RDICADDR 0x007FF800 -diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c -index 5e3ae1b519..11dbbe2a86 100644 ---- a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c -+++ b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c -@@ -585,7 +585,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - switch (links_reg & IXGBE_LINKS_SPEED_82599) { - case IXGBE_LINKS_SPEED_10G_82599: - *speed = IXGBE_LINK_SPEED_10GB_FULL; -- if (hw->mac.type >= ixgbe_mac_X550) { -+ if (hw->mac.type >= ixgbe_mac_X550_vf) { - if (links_reg & IXGBE_LINKS_SPEED_NON_STD) - *speed = IXGBE_LINK_SPEED_2_5GB_FULL; - } -@@ -595,7 +595,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - break; - case IXGBE_LINKS_SPEED_100_82599: - *speed = IXGBE_LINK_SPEED_100_FULL; -- if (hw->mac.type == ixgbe_mac_X550) { -+ if (hw->mac.type == ixgbe_mac_X550_vf) { - if (links_reg & IXGBE_LINKS_SPEED_NON_STD) - *speed = IXGBE_LINK_SPEED_5GB_FULL; + static __rte_always_inline uint8_t + cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq, +- struct rte_mbuf **mbufs, uint64x2_t *cmd0, ++ struct rte_mbuf **mbufs, struct rte_mbuf **extm, uint64x2_t *cmd0, + uint64x2_t *cmd1, uint64x2_t *cmd2, + uint64x2_t *cmd3, uint8_t *segdw, + uint64_t *lmt_addr, __uint128_t *data128, +@@ -1777,7 +1990,7 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq, + lmt_addr += 16; + off = 0; + } +- off += cn10k_nix_prepare_mseg_vec_noff(txq, mbufs[j], ++ off += cn10k_nix_prepare_mseg_vec_noff(txq, mbufs[j], extm, + lmt_addr + off * 2, &cmd0[j], &cmd1[j], + &cmd2[j], &cmd3[j], flags); } -@@ -603,7 +603,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - case IXGBE_LINKS_SPEED_10_X550EM_A: - *speed = IXGBE_LINK_SPEED_UNKNOWN; - /* Since Reserved in older MAC's */ -- if (hw->mac.type >= ixgbe_mac_X550) -+ if (hw->mac.type >= ixgbe_mac_X550_vf) - *speed = IXGBE_LINK_SPEED_10_FULL; - break; - default: -diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -index d6cf00317e..a44497ce51 100644 ---- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -+++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -@@ -1190,7 +1190,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) - diag = ixgbe_validate_eeprom_checksum(hw, &csum); - if (diag != IXGBE_SUCCESS) { - PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); -- return -EIO; -+ ret = -EIO; -+ goto err_exit; - } - - #ifdef RTE_LIBRTE_IXGBE_BYPASS -@@ -1228,7 +1229,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) - PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); - if (diag) { - PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); -- return -EIO; -+ ret = -EIO; -+ goto err_exit; - } +@@ -1803,6 +2016,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq, + *data128 |= ((__uint128_t)7) << *shift; + *shift += 3; - /* Reset the hw statistics */ -@@ -1248,7 +1250,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) - "Failed to allocate %u bytes needed to store " - "MAC addresses", - RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); -- return -ENOMEM; -+ ret = -ENOMEM; -+ goto err_exit; - } - /* Copy the permanent MAC address */ - rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, -@@ -1263,7 +1266,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) - RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; -- return -ENOMEM; -+ ret = -ENOMEM; -+ goto err_exit; ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[0]->pool, (void **)&mbufs[0], 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[1]->pool, (void **)&mbufs[1], 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[2]->pool, (void **)&mbufs[2], 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[3]->pool, (void **)&mbufs[3], 1, 0); + return 1; + } } +@@ -1821,6 +2039,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq, + vst1q_u64(lmt_addr + 10, cmd2[j + 1]); + vst1q_u64(lmt_addr + 12, cmd1[j + 1]); + vst1q_u64(lmt_addr + 14, cmd3[j + 1]); ++ ++ /* Mark mempool object as "put" since it is freed by NIX */ ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[j]->pool, (void **)&mbufs[j], 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(mbufs[j + 1]->pool, ++ (void **)&mbufs[j + 1], 1, 0); + } else if (flags & NIX_TX_NEED_EXT_HDR) { + /* EXT header take 3 each, space for 2 segs.*/ + cn10k_nix_prepare_mseg_vec(mbufs[j], +@@ -1920,14 +2143,14 @@ cn10k_nix_lmt_next(uint8_t dw, uintptr_t laddr, uint8_t *lnum, uint8_t *loff, - /* initialize the vfta */ -@@ -1347,6 +1351,11 @@ err_pf_host_init: - eth_dev->data->mac_addrs = NULL; - rte_free(eth_dev->data->hash_mac_addrs); - eth_dev->data->hash_mac_addrs = NULL; -+err_exit: -+#ifdef RTE_LIB_SECURITY -+ rte_free(eth_dev->security_ctx); -+ eth_dev->security_ctx = NULL; -+#endif - return ret; - } - -@@ -4280,6 +4289,9 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, - int wait = 1; - u32 esdp_reg; + static __rte_always_inline void + cn10k_nix_xmit_store(struct cn10k_eth_txq *txq, +- struct rte_mbuf *mbuf, uint8_t segdw, uintptr_t laddr, ++ struct rte_mbuf *mbuf, struct rte_mbuf **extm, uint8_t segdw, uintptr_t laddr, + uint64x2_t cmd0, uint64x2_t cmd1, uint64x2_t cmd2, + uint64x2_t cmd3, const uint16_t flags) + { + uint8_t off; -+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) -+ return -1; -+ - memset(&link, 0, sizeof(link)); - link.link_status = RTE_ETH_LINK_DOWN; - link.link_speed = RTE_ETH_SPEED_NUM_NONE; -@@ -4654,14 +4666,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) - timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { +- cn10k_nix_prepare_mseg_vec_noff(txq, mbuf, LMT_OFF(laddr, 0, 0), ++ cn10k_nix_prepare_mseg_vec_noff(txq, mbuf, extm, LMT_OFF(laddr, 0, 0), + &cmd0, &cmd1, &cmd2, &cmd3, + flags); + return; +@@ -1997,13 +2220,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, + uint64x2_t sgdesc01_w0, sgdesc23_w0; + uint64x2_t sgdesc01_w1, sgdesc23_w1; + struct cn10k_eth_txq *txq = tx_queue; +- uint64x2_t xmask01_w0, xmask23_w0; +- uint64x2_t xmask01_w1, xmask23_w1; + rte_iova_t io_addr = txq->io_addr; + uint8_t lnum, shift = 0, loff = 0; + uintptr_t laddr = txq->lmt_base; + uint8_t c_lnum, c_shft, c_loff; +- struct nix_send_hdr_s send_hdr; + uint64x2_t ltypes01, ltypes23; + uint64x2_t xtmp128, ytmp128; + uint64x2_t xmask01, xmask23; +@@ -2014,6 +2234,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, + __uint128_t data128; + uint64_t data[2]; + } wd; ++ struct rte_mbuf *extm = NULL; - ixgbe_dev_link_status_print(dev); -- if (rte_eal_alarm_set(timeout * 1000, -- ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) -- PMD_DRV_LOG(ERR, "Error setting alarm"); -- else { -- /* remember original mask */ -- intr->mask_original = intr->mask; -- /* only disable lsc interrupt */ -- intr->mask &= ~IXGBE_EIMS_LSC; -+ -+ /* Don't program delayed handler if LSC interrupt is disabled. -+ * It means one is already programmed. -+ */ -+ if (intr->mask & IXGBE_EIMS_LSC) { -+ if (rte_eal_alarm_set(timeout * 1000, -+ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) -+ PMD_DRV_LOG(ERR, "Error setting alarm"); -+ else { -+ /* remember original mask */ -+ intr->mask_original = intr->mask; -+ /* only disable lsc interrupt */ -+ intr->mask &= ~IXGBE_EIMS_LSC; -+ } - } + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena) + handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F); +@@ -2098,7 +2319,8 @@ again: } -diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c -index 90b0a7004f..f6c17d4efb 100644 ---- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c -+++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c -@@ -5844,6 +5844,25 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) - IXGBE_PSRTYPE_RQPL_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + for (i = 0; i < burst; i += NIX_DESCS_PER_LOOP) { +- if (flags & NIX_TX_OFFLOAD_SECURITY_F && c_lnum + 2 > 16) { ++ if (flags & NIX_TX_OFFLOAD_SECURITY_F && ++ (((int)((16 - c_lnum) << 1) - c_loff) < 4)) { + burst = i; + break; + } +@@ -2153,7 +2375,7 @@ again: + } + /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */ + senddesc01_w0 = +- vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF)); ++ vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF)); + sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF)); -+ /* Initialize the rss for x550_vf cards if enabled */ -+ switch (hw->mac.type) { -+ case ixgbe_mac_X550_vf: -+ case ixgbe_mac_X550EM_x_vf: -+ case ixgbe_mac_X550EM_a_vf: -+ switch (dev->data->dev_conf.rxmode.mq_mode) { -+ case RTE_ETH_MQ_RX_RSS: -+ case RTE_ETH_MQ_RX_DCB_RSS: -+ case RTE_ETH_MQ_RX_VMDQ_RSS: -+ ixgbe_rss_configure(dev); -+ break; -+ default: -+ break; -+ } -+ break; -+ default: -+ break; -+ } -+ - ixgbe_set_rx_function(dev); + senddesc23_w0 = senddesc01_w0; +@@ -2859,73 +3081,8 @@ again: + !(flags & NIX_TX_MULTI_SEG_F) && + !(flags & NIX_TX_OFFLOAD_SECURITY_F)) { + /* Set don't free bit if reference count > 1 */ +- xmask01_w0 = vdupq_n_u64(0); +- xmask01_w1 = vdupq_n_u64(0); +- xmask23_w0 = xmask01_w0; +- xmask23_w1 = xmask01_w1; +- +- /* Move mbufs to iova */ +- mbuf0 = (uint64_t *)tx_pkts[0]; +- mbuf1 = (uint64_t *)tx_pkts[1]; +- mbuf2 = (uint64_t *)tx_pkts[2]; +- mbuf3 = (uint64_t *)tx_pkts[3]; +- +- send_hdr.w0.u = 0; +- send_hdr.w1.u = 0; +- +- if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) { +- send_hdr.w0.df = 1; +- xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0); +- xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0); +- } else { +- RTE_MEMPOOL_CHECK_COOKIES( +- ((struct rte_mbuf *)mbuf0)->pool, +- (void **)&mbuf0, 1, 0); +- } +- +- send_hdr.w0.u = 0; +- send_hdr.w1.u = 0; +- +- if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) { +- send_hdr.w0.df = 1; +- xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1); +- xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1); +- } else { +- RTE_MEMPOOL_CHECK_COOKIES( +- ((struct rte_mbuf *)mbuf1)->pool, +- (void **)&mbuf1, 1, 0); +- } +- +- send_hdr.w0.u = 0; +- send_hdr.w1.u = 0; +- +- if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) { +- send_hdr.w0.df = 1; +- xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0); +- xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0); +- } else { +- RTE_MEMPOOL_CHECK_COOKIES( +- ((struct rte_mbuf *)mbuf2)->pool, +- (void **)&mbuf2, 1, 0); +- } +- +- send_hdr.w0.u = 0; +- send_hdr.w1.u = 0; +- +- if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) { +- send_hdr.w0.df = 1; +- xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1); +- xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1); +- } else { +- RTE_MEMPOOL_CHECK_COOKIES( +- ((struct rte_mbuf *)mbuf3)->pool, +- (void **)&mbuf3, 1, 0); +- } +- +- senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0); +- senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0); +- senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1); +- senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1); ++ cn10k_nix_prefree_seg_vec(tx_pkts, &extm, txq, &senddesc01_w0, ++ &senddesc23_w0, &senddesc01_w1, &senddesc23_w1); + } else if (!(flags & NIX_TX_MULTI_SEG_F) && + !(flags & NIX_TX_OFFLOAD_SECURITY_F)) { + /* Move mbufs to iova */ +@@ -2997,7 +3154,7 @@ again: + &shift, &wd.data128, &next); - return 0; -diff --git a/dpdk/drivers/net/mana/mana.c b/dpdk/drivers/net/mana/mana.c -index 781ed76139..65ca139be5 100644 ---- a/dpdk/drivers/net/mana/mana.c -+++ b/dpdk/drivers/net/mana/mana.c -@@ -296,8 +296,8 @@ mana_dev_info_get(struct rte_eth_dev *dev, - dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE; - dev_info->max_rx_pktlen = MANA_MAX_MTU + RTE_ETHER_HDR_LEN; + /* Store mbuf0 to LMTLINE/CPT NIXTX area */ +- cn10k_nix_xmit_store(txq, tx_pkts[0], segdw[0], next, ++ cn10k_nix_xmit_store(txq, tx_pkts[0], &extm, segdw[0], next, + cmd0[0], cmd1[0], cmd2[0], cmd3[0], + flags); -- dev_info->max_rx_queues = priv->max_rx_queues; -- dev_info->max_tx_queues = priv->max_tx_queues; -+ dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, UINT16_MAX); -+ dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, UINT16_MAX); +@@ -3013,7 +3170,7 @@ again: + &shift, &wd.data128, &next); - dev_info->max_mac_addrs = MANA_MAX_MAC_ADDR; - dev_info->max_hash_mac_addrs = 0; -@@ -338,16 +338,20 @@ mana_dev_info_get(struct rte_eth_dev *dev, + /* Store mbuf1 to LMTLINE/CPT NIXTX area */ +- cn10k_nix_xmit_store(txq, tx_pkts[1], segdw[1], next, ++ cn10k_nix_xmit_store(txq, tx_pkts[1], &extm, segdw[1], next, + cmd0[1], cmd1[1], cmd2[1], cmd3[1], + flags); - /* Buffer limits */ - dev_info->rx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE; -- dev_info->rx_desc_lim.nb_max = priv->max_rx_desc; -+ dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, UINT16_MAX); - dev_info->rx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE; -- dev_info->rx_desc_lim.nb_seg_max = priv->max_recv_sge; -- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge; -+ dev_info->rx_desc_lim.nb_seg_max = -+ RTE_MIN(priv->max_recv_sge, UINT16_MAX); -+ dev_info->rx_desc_lim.nb_mtu_seg_max = -+ RTE_MIN(priv->max_recv_sge, UINT16_MAX); +@@ -3029,7 +3186,7 @@ again: + &shift, &wd.data128, &next); - dev_info->tx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE; -- dev_info->tx_desc_lim.nb_max = priv->max_tx_desc; -+ dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, UINT16_MAX); - dev_info->tx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE; -- dev_info->tx_desc_lim.nb_seg_max = priv->max_send_sge; -- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge; -+ dev_info->tx_desc_lim.nb_seg_max = -+ RTE_MIN(priv->max_send_sge, UINT16_MAX); -+ dev_info->tx_desc_lim.nb_mtu_seg_max = -+ RTE_MIN(priv->max_send_sge, UINT16_MAX); + /* Store mbuf2 to LMTLINE/CPT NIXTX area */ +- cn10k_nix_xmit_store(txq, tx_pkts[2], segdw[2], next, ++ cn10k_nix_xmit_store(txq, tx_pkts[2], &extm, segdw[2], next, + cmd0[2], cmd1[2], cmd2[2], cmd3[2], + flags); - /* Speed */ - dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G; -@@ -707,7 +711,7 @@ mana_dev_stats_reset(struct rte_eth_dev *dev __rte_unused) - static int - mana_get_ifname(const struct mana_priv *priv, char (*ifname)[IF_NAMESIZE]) - { -- int ret; -+ int ret = -ENODEV; - DIR *dir; - struct dirent *dent; +@@ -3045,7 +3202,7 @@ again: + &shift, &wd.data128, &next); -@@ -1385,9 +1389,9 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr, - priv->max_mr = dev_attr->orig_attr.max_mr; - priv->max_mr_size = dev_attr->orig_attr.max_mr_size; + /* Store mbuf3 to LMTLINE/CPT NIXTX area */ +- cn10k_nix_xmit_store(txq, tx_pkts[3], segdw[3], next, ++ cn10k_nix_xmit_store(txq, tx_pkts[3], &extm, segdw[3], next, + cmd0[3], cmd1[3], cmd2[3], cmd3[3], + flags); -- DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d", -+ DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d mr %" PRIu64, - name, priv->max_rx_queues, priv->max_rx_desc, -- priv->max_send_sge); -+ priv->max_send_sge, priv->max_mr_size); +@@ -3053,7 +3210,7 @@ again: + uint8_t j; - rte_eth_copy_pci_info(eth_dev, pci_dev); + segdw[4] = 8; +- j = cn10k_nix_prep_lmt_mseg_vector(txq, tx_pkts, cmd0, cmd1, ++ j = cn10k_nix_prep_lmt_mseg_vector(txq, tx_pkts, &extm, cmd0, cmd1, + cmd2, cmd3, segdw, + (uint64_t *) + LMT_OFF(laddr, lnum, +@@ -3203,6 +3360,11 @@ again: + } -diff --git a/dpdk/drivers/net/mana/mana.h b/dpdk/drivers/net/mana/mana.h -index 6836872dc2..822b8a1f15 100644 ---- a/dpdk/drivers/net/mana/mana.h -+++ b/dpdk/drivers/net/mana/mana.h -@@ -522,9 +522,9 @@ void mana_del_pmd_mr(struct mana_mr_cache *mr); - void mana_mempool_chunk_cb(struct rte_mempool *mp, void *opaque, - struct rte_mempool_memhdr *memhdr, unsigned int idx); + rte_io_wmb(); ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) { ++ cn10k_nix_free_extmbuf(extm); ++ extm = NULL; ++ } ++ + if (left) + goto again; --struct mana_mr_cache *mana_mr_btree_lookup(struct mana_mr_btree *bt, -- uint16_t *idx, -- uintptr_t addr, size_t len); -+int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, -+ uintptr_t addr, size_t len, -+ struct mana_mr_cache **cache); - int mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry); - int mana_mr_btree_init(struct mana_mr_btree *bt, int n, int socket); - void mana_mr_btree_free(struct mana_mr_btree *bt); -diff --git a/dpdk/drivers/net/mana/mr.c b/dpdk/drivers/net/mana/mr.c -index b8e6ea0bbf..eb6d073a95 100644 ---- a/dpdk/drivers/net/mana/mr.c -+++ b/dpdk/drivers/net/mana/mr.c -@@ -40,7 +40,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, - struct ibv_mr *ibv_mr; - struct mana_range ranges[pool->nb_mem_chunks]; - uint32_t i; -- struct mana_mr_cache *mr; -+ struct mana_mr_cache mr; - int ret; +diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev.c b/dpdk/drivers/net/cnxk/cn9k_ethdev.c +index bae4dda5e2..dee0abdac5 100644 +--- a/dpdk/drivers/net/cnxk/cn9k_ethdev.c ++++ b/dpdk/drivers/net/cnxk/cn9k_ethdev.c +@@ -30,7 +30,7 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) + flags |= NIX_RX_MULTI_SEG_F; - rte_mempool_mem_iter(pool, mana_mempool_chunk_cb, ranges); -@@ -75,14 +75,13 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, - DP_LOG(DEBUG, "MR lkey %u addr %p len %zu", - ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); +- if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) ++ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) + flags |= NIX_RX_OFFLOAD_TSTAMP_F; -- mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); -- mr->lkey = ibv_mr->lkey; -- mr->addr = (uintptr_t)ibv_mr->addr; -- mr->len = ibv_mr->length; -- mr->verb_obj = ibv_mr; -+ mr.lkey = ibv_mr->lkey; -+ mr.addr = (uintptr_t)ibv_mr->addr; -+ mr.len = ibv_mr->length; -+ mr.verb_obj = ibv_mr; + if (!dev->ptype_disable) +@@ -347,7 +347,13 @@ cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx) + struct roc_nix_sq *sq = &dev->sqs[qidx]; + do { + handle_tx_completion_pkts(txq, 0); ++ /* Check if SQ is empty */ + roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail); ++ if (head != tail) ++ continue; ++ ++ /* Check if completion CQ is empty */ ++ roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail); + } while (head != tail); + } - rte_spinlock_lock(&priv->mr_btree_lock); -- ret = mana_mr_btree_insert(&priv->mr_btree, mr); -+ ret = mana_mr_btree_insert(&priv->mr_btree, &mr); - rte_spinlock_unlock(&priv->mr_btree_lock); - if (ret) { - ibv_dereg_mr(ibv_mr); -@@ -90,7 +89,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, - return ret; - } +@@ -426,7 +432,7 @@ cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) + struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix; + struct rte_eth_dev *eth_dev; + struct cn9k_eth_rxq *rxq; +- int i; ++ int i, rc; -- ret = mana_mr_btree_insert(local_tree, mr); -+ ret = mana_mr_btree_insert(local_tree, &mr); - if (ret) { - /* Don't need to clean up MR as it's already - * in the global tree -@@ -138,8 +137,12 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv, + if (!dev) + return -EINVAL; +@@ -449,8 +455,21 @@ cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) + * and MTU setting also requires MBOX message to be + * sent(VF->PF) + */ ++ if (dev->ptp_en) { ++ rc = rte_mbuf_dyn_rx_timestamp_register ++ (&dev->tstamp.tstamp_dynfield_offset, ++ &dev->tstamp.rx_tstamp_dynflag); ++ if (rc != 0) { ++ plt_err("Failed to register Rx timestamp field/flag"); ++ return -EINVAL; ++ } ++ } + eth_dev->rx_pkt_burst = nix_ptp_vf_burst; ++ rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst; + rte_mb(); ++ if (dev->cnxk_sso_ptp_tstamp_cb) ++ dev->cnxk_sso_ptp_tstamp_cb(eth_dev->data->port_id, ++ NIX_RX_OFFLOAD_TSTAMP_F, dev->ptp_en); + } - try_again: - /* First try to find the MR in local queue tree */ -- mr = mana_mr_btree_lookup(local_mr_btree, &idx, -- (uintptr_t)mbuf->buf_addr, mbuf->buf_len); -+ ret = mana_mr_btree_lookup(local_mr_btree, &idx, -+ (uintptr_t)mbuf->buf_addr, mbuf->buf_len, -+ &mr); -+ if (ret) -+ return NULL; -+ - if (mr) { - DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIxPTR " len %zu", - mr->lkey, mr->addr, mr->len); -@@ -148,11 +151,14 @@ try_again: + return 0; +diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev.h b/dpdk/drivers/net/cnxk/cn9k_ethdev.h +index 9e0a3c5bb2..6ae0db62ca 100644 +--- a/dpdk/drivers/net/cnxk/cn9k_ethdev.h ++++ b/dpdk/drivers/net/cnxk/cn9k_ethdev.h +@@ -169,6 +169,7 @@ handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe) + m = m_next; + } + rte_pktmbuf_free_seg(m); ++ txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL; - /* If not found, try to find the MR in global tree */ - rte_spinlock_lock(&priv->mr_btree_lock); -- mr = mana_mr_btree_lookup(&priv->mr_btree, &idx, -- (uintptr_t)mbuf->buf_addr, -- mbuf->buf_len); -+ ret = mana_mr_btree_lookup(&priv->mr_btree, &idx, -+ (uintptr_t)mbuf->buf_addr, -+ mbuf->buf_len, &mr); - rte_spinlock_unlock(&priv->mr_btree_lock); + head++; + head &= qmask; +diff --git a/dpdk/drivers/net/cnxk/cn9k_tx.h b/dpdk/drivers/net/cnxk/cn9k_tx.h +index fba4bb4215..4715bf8a65 100644 +--- a/dpdk/drivers/net/cnxk/cn9k_tx.h ++++ b/dpdk/drivers/net/cnxk/cn9k_tx.h +@@ -82,32 +82,198 @@ cn9k_nix_tx_skeleton(struct cn9k_eth_txq *txq, uint64_t *cmd, + } + } -+ if (ret) -+ return NULL; ++static __rte_always_inline void ++cn9k_nix_free_extmbuf(struct rte_mbuf *m) ++{ ++ struct rte_mbuf *m_next; ++ while (m != NULL) { ++ m_next = m->next; ++ rte_pktmbuf_free_seg(m); ++ m = m_next; ++ } ++} + - /* If found in the global tree, add it to the local tree */ - if (mr) { - ret = mana_mr_btree_insert(local_mr_btree, mr); -@@ -228,22 +234,23 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n) - /* - * Look for a region of memory in MR cache. - */ --struct mana_mr_cache * --mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, -- uintptr_t addr, size_t len) -+int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, -+ uintptr_t addr, size_t len, -+ struct mana_mr_cache **cache) + static __rte_always_inline uint64_t +-cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq, +- struct nix_send_hdr_s *send_hdr) ++cn9k_nix_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **extm, struct cn9k_eth_txq *txq, ++ struct nix_send_hdr_s *send_hdr, uint64_t *aura) { - struct mana_mr_cache *table; - uint16_t n; - uint16_t base = 0; - int ret; - -- n = bt->len; -+ *cache = NULL; ++ struct rte_mbuf *prev; + uint32_t sqe_id; -+ n = bt->len; - /* Try to double the cache if it's full */ - if (n == bt->size) { - ret = mana_mr_btree_expand(bt, bt->size << 1); - if (ret) -- return NULL; -+ return ret; + if (RTE_MBUF_HAS_EXTBUF(m)) { + if (unlikely(txq->tx_compl.ena == 0)) { +- rte_pktmbuf_free_seg(m); ++ m->next = *extm; ++ *extm = m; + return 1; + } + if (send_hdr->w0.pnc) { +- txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m; ++ sqe_id = send_hdr->w1.sqe_id; ++ prev = txq->tx_compl.ptr[sqe_id]; ++ m->next = prev; ++ txq->tx_compl.ptr[sqe_id] = m; + } else { + sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED); + send_hdr->w0.pnc = 1; + send_hdr->w1.sqe_id = sqe_id & + txq->tx_compl.nb_desc_mask; + txq->tx_compl.ptr[send_hdr->w1.sqe_id] = m; ++ m->next = NULL; + } + return 1; + } else { +- return cnxk_nix_prefree_seg(m); ++ return cnxk_nix_prefree_seg(m, aura); } - - table = bt->table; -@@ -262,14 +269,16 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, - - *idx = base; - -- if (addr + len <= table[base].addr + table[base].len) -- return &table[base]; -+ if (addr + len <= table[base].addr + table[base].len) { -+ *cache = &table[base]; -+ return 0; -+ } - - DP_LOG(DEBUG, - "addr 0x%" PRIxPTR " len %zu idx %u sum 0x%" PRIxPTR " not found", - addr, len, *idx, addr + len); - -- return NULL; -+ return 0; } - int -@@ -314,14 +323,21 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) - struct mana_mr_cache *table; - uint16_t idx = 0; - uint16_t shift; -+ int ret; ++#if defined(RTE_ARCH_ARM64) ++/* Only called for first segments of single segmented mbufs */ ++static __rte_always_inline void ++cn9k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct rte_mbuf **extm, struct cn9k_eth_txq *txq, ++ uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0, ++ uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1) ++{ ++ struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr; ++ uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask; ++ bool tx_compl_ena = txq->tx_compl.ena; ++ struct rte_mbuf *m0, *m1, *m2, *m3; ++ struct rte_mbuf *cookie; ++ uint64_t w0, w1, aura; ++ uint64_t sqe_id; + -+ ret = mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len, &table); -+ if (ret) -+ return ret; - -- if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) { -+ if (table) { - DP_LOG(DEBUG, "Addr 0x%" PRIxPTR " len %zu exists in btree", - entry->addr, entry->len); - return 0; - } - - if (bt->len >= bt->size) { -+ DP_LOG(ERR, "Btree overflow detected len %u size %u", -+ bt->len, bt->size); - bt->overflow = 1; - return -1; - } -diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c -index 7cc8c0da91..18377d9caf 100644 ---- a/dpdk/drivers/net/memif/rte_eth_memif.c -+++ b/dpdk/drivers/net/memif/rte_eth_memif.c -@@ -265,8 +265,6 @@ memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_q - cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); - while (mq->last_tail != cur_tail) { - RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]); -- /* Decrement refcnt and free mbuf. (current segment) */ -- rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1); - rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); - mq->last_tail++; - } -@@ -684,7 +682,7 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) - n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot; - } - -- uint8_t i; -+ uint16_t i; - struct rte_mbuf **buf_tmp = bufs; - mbuf_head = *buf_tmp++; - struct rte_mempool *mp = mbuf_head->pool; -@@ -825,10 +823,6 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq - next_in_chain: - /* store pointer to mbuf to free it later */ - mq->buffers[slot & mask] = mbuf; -- /* Increment refcnt to make sure the buffer is not freed before server -- * receives it. (current segment) -- */ -- rte_mbuf_refcnt_update(mbuf, 1); - /* populate descriptor */ - d0 = &ring->desc[slot & mask]; - d0->length = rte_pktmbuf_data_len(mbuf); -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr.h b/dpdk/drivers/net/mlx5/hws/mlx5dr.h -index d88f73ab57..f003d9f446 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr.h -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr.h -@@ -80,6 +80,7 @@ enum mlx5dr_action_aso_ct_flags { - }; - - enum mlx5dr_match_template_flags { -+ MLX5DR_MATCH_TEMPLATE_FLAG_NONE = 0, - /* Allow relaxed matching by skipping derived dependent match fields. */ - MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH = 1, - }; -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c -index 862ee3e332..a068f100c5 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c -@@ -1465,7 +1465,9 @@ mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action, - - /* Create a full modify header action list in case shared */ - mlx5dr_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions); -- mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); ++ m0 = mbufs[0]; ++ m1 = mbufs[1]; ++ m2 = mbufs[2]; ++ m3 = mbufs[3]; + -+ if (action->flags & MLX5DR_ACTION_FLAG_SHARED) -+ mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); - - /* All DecapL3 cases require the same max arg size */ - arg_obj = mlx5dr_arg_create_modify_header_arg(ctx, -@@ -1489,6 +1491,7 @@ mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action, - - action[i].modify_header.max_num_of_actions = num_of_actions; - action[i].modify_header.num_of_actions = num_of_actions; -+ action[i].modify_header.num_of_patterns = num_of_hdrs; - action[i].modify_header.arg_obj = arg_obj; - action[i].modify_header.pat_obj = pat_obj; - action[i].modify_header.require_reparse = -@@ -2547,6 +2550,7 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) - case MLX5DR_ACTION_TYP_ASO_CT: - case MLX5DR_ACTION_TYP_PUSH_VLAN: - case MLX5DR_ACTION_TYP_REMOVE_HEADER: -+ case MLX5DR_ACTION_TYP_VPORT: - mlx5dr_action_destroy_stcs(action); - break; - case MLX5DR_ACTION_TYP_DEST_ROOT: -@@ -2600,6 +2604,9 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) - if (action->ipv6_route_ext.action[i]) - mlx5dr_action_destroy(action->ipv6_route_ext.action[i]); - break; -+ default: -+ DR_LOG(ERR, "Not supported action type: %d", action->type); -+ assert(false); - } - } - -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c -index 876a47147d..0fb764df32 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c -@@ -1027,7 +1027,8 @@ int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx, - - ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); - if (ret) { -- DR_LOG(ERR, "Failed to write GTA WQE using FW"); -+ DR_LOG(ERR, "Failed to write GTA WQE using FW (syndrome: %#x)", -+ mlx5dr_cmd_get_syndrome(out)); - rte_errno = errno; - return rte_errno; - } -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c -index 15d53c578a..7f120b3b1b 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c -@@ -263,6 +263,7 @@ struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx, - free_caps: - simple_free(ctx->caps); - free_ctx: -+ pthread_spin_destroy(&ctx->ctrl_lock); - simple_free(ctx); - return NULL; - } -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c -index 11557bcab8..f11c81ffee 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c -@@ -150,7 +150,7 @@ mlx5dr_debug_dump_matcher_action_template(FILE *f, struct mlx5dr_matcher *matche - MLX5DR_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE, - (uint64_t)(uintptr_t)at, - (uint64_t)(uintptr_t)matcher, -- at->only_term ? 0 : 1, -+ at->only_term, - is_root ? 0 : at->num_of_action_stes, - at->num_actions); - if (ret < 0) { -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c -index 0b60479406..031e87bc0c 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c -@@ -8,8 +8,9 @@ - #define BAD_PORT 0xBAD - #define ETH_TYPE_IPV4_VXLAN 0x0800 - #define ETH_TYPE_IPV6_VXLAN 0x86DD --#define ETH_VXLAN_DEFAULT_PORT 4789 --#define IP_UDP_PORT_MPLS 6635 -+#define UDP_GTPU_PORT 2152 -+#define UDP_VXLAN_PORT 4789 -+#define UDP_PORT_MPLS 6635 - #define UDP_ROCEV2_PORT 4791 - #define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS) - -@@ -41,6 +42,10 @@ - (bit_off))); \ - } while (0) - -+/* Getter function based on bit offset and mask, for 32bit DW*/ -+#define DR_GET_32(p, byte_off, bit_off, mask) \ -+ ((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask)) ++ /* mbuf 0 */ ++ w0 = vgetq_lane_u64(*senddesc01_w0, 0); ++ if (RTE_MBUF_HAS_EXTBUF(m0)) { ++ w0 |= BIT_ULL(19); ++ w1 = vgetq_lane_u64(*senddesc01_w1, 0); ++ w1 &= ~0xFFFF000000000000UL; ++ if (unlikely(!tx_compl_ena)) { ++ m0->next = *extm; ++ *extm = m0; ++ } else { ++ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, ++ rte_memory_order_relaxed); ++ sqe_id = sqe_id & nb_desc_mask; ++ /* Set PNC */ ++ w0 |= BIT_ULL(43); ++ w1 |= sqe_id << 48; ++ tx_compl_ptr[sqe_id] = m0; ++ *senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0); ++ } ++ } else { ++ cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0); ++ aura = (w0 >> 20) & 0xFFFFF; ++ w0 &= ~0xFFFFF00000UL; ++ w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19; ++ w0 |= aura << 20; + - /* Setter function based on bit offset and mask */ - #define DR_SET(p, v, byte_off, bit_off, mask) \ - do { \ -@@ -158,7 +163,7 @@ struct mlx5dr_definer_conv_data { - X(SET, tcp_protocol, STE_TCP, rte_flow_item_tcp) \ - X(SET_BE16, tcp_src_port, v->hdr.src_port, rte_flow_item_tcp) \ - X(SET_BE16, tcp_dst_port, v->hdr.dst_port, rte_flow_item_tcp) \ -- X(SET, gtp_udp_port, RTE_GTPU_UDP_PORT, rte_flow_item_gtp) \ -+ X(SET, gtp_udp_port, UDP_GTPU_PORT, rte_flow_item_gtp) \ - X(SET_BE32, gtp_teid, v->hdr.teid, rte_flow_item_gtp) \ - X(SET, gtp_msg_type, v->hdr.msg_type, rte_flow_item_gtp) \ - X(SET, gtp_ext_flag, !!v->hdr.gtp_hdr_info, rte_flow_item_gtp) \ -@@ -166,8 +171,8 @@ struct mlx5dr_definer_conv_data { - X(SET, gtp_ext_hdr_pdu, v->hdr.type, rte_flow_item_gtp_psc) \ - X(SET, gtp_ext_hdr_qfi, v->hdr.qfi, rte_flow_item_gtp_psc) \ - X(SET, vxlan_flags, v->flags, rte_flow_item_vxlan) \ -- X(SET, vxlan_udp_port, ETH_VXLAN_DEFAULT_PORT, rte_flow_item_vxlan) \ -- X(SET, mpls_udp_port, IP_UDP_PORT_MPLS, rte_flow_item_mpls) \ -+ X(SET, vxlan_udp_port, UDP_VXLAN_PORT, rte_flow_item_vxlan) \ -+ X(SET, mpls_udp_port, UDP_PORT_MPLS, rte_flow_item_mpls) \ - X(SET, source_qp, v->queue, mlx5_rte_flow_item_sq) \ - X(SET, tag, v->data, rte_flow_item_tag) \ - X(SET, metadata, v->data, rte_flow_item_meta) \ -@@ -183,6 +188,8 @@ struct mlx5dr_definer_conv_data { - X(SET, ib_l4_udp_port, UDP_ROCEV2_PORT, rte_flow_item_ib_bth) \ - X(SET, ib_l4_opcode, v->hdr.opcode, rte_flow_item_ib_bth) \ - X(SET, ib_l4_bth_a, v->hdr.a, rte_flow_item_ib_bth) \ -+ X(SET, cvlan, STE_CVLAN, rte_flow_item_vlan) \ -+ X(SET_BE16, inner_type, v->inner_type, rte_flow_item_vlan) \ - - /* Item set function format */ - #define X(set_type, func_name, value, item_type) \ -@@ -377,7 +384,7 @@ mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc, - { - bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I); - const struct rte_flow_item_integrity *v = item_spec; -- uint32_t ok1_bits = 0; -+ uint32_t ok1_bits = DR_GET_32(tag, fc->byte_off, fc->bit_off, fc->bit_mask); - - if (v->l3_ok) - ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) : -@@ -769,6 +776,15 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, - struct mlx5dr_definer_fc *fc; - bool inner = cd->tunnel; - -+ if (!cd->relaxed) { -+ /* Mark packet as tagged (CVLAN) */ -+ fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)]; -+ fc->item_idx = item_idx; -+ fc->tag_mask_set = &mlx5dr_definer_ones_set; -+ fc->tag_set = &mlx5dr_definer_cvlan_set; -+ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner); ++ if ((w0 & BIT_ULL(19)) == 0) ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + } ++ *senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0); + - if (!m) - return 0; - -@@ -777,8 +793,7 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, - return rte_errno; - } - -- if (!cd->relaxed || m->has_more_vlan) { -- /* Mark packet as tagged (CVLAN or SVLAN) even if TCI is not specified.*/ -+ if (m->has_more_vlan) { - fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)]; - fc->item_idx = item_idx; - fc->tag_mask_set = &mlx5dr_definer_ones_set; -@@ -796,7 +811,7 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, - if (m->hdr.eth_proto) { - fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)]; - fc->item_idx = item_idx; -- fc->tag_set = &mlx5dr_definer_eth_type_set; -+ fc->tag_set = &mlx5dr_definer_inner_type_set; - DR_CALC_SET(fc, eth_l2, l3_ethertype, inner); - } - -@@ -1170,6 +1185,12 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd, - const struct rte_flow_item_gtp *m = item->mask; - struct mlx5dr_definer_fc *fc; - -+ if (cd->tunnel) { -+ DR_LOG(ERR, "Inner GTPU item not supported"); -+ rte_errno = ENOTSUP; -+ return rte_errno; -+ } ++ /* mbuf1 */ ++ w0 = vgetq_lane_u64(*senddesc01_w0, 1); ++ if (RTE_MBUF_HAS_EXTBUF(m1)) { ++ w0 |= BIT_ULL(19); ++ w1 = vgetq_lane_u64(*senddesc01_w1, 1); ++ w1 &= ~0xFFFF000000000000UL; ++ if (unlikely(!tx_compl_ena)) { ++ m1->next = *extm; ++ *extm = m1; ++ } else { ++ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, ++ rte_memory_order_relaxed); ++ sqe_id = sqe_id & nb_desc_mask; ++ /* Set PNC */ ++ w0 |= BIT_ULL(43); ++ w1 |= sqe_id << 48; ++ tx_compl_ptr[sqe_id] = m1; ++ *senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1); ++ } ++ } else { ++ cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1); ++ aura = (w0 >> 20) & 0xFFFFF; ++ w0 &= ~0xFFFFF00000UL; ++ w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19; ++ w0 |= aura << 20; + - /* Overwrite GTPU dest port if not present */ - fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)]; - if (!fc->tag_set && !cd->relaxed) { -@@ -1344,9 +1365,20 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, - struct mlx5dr_definer_fc *fc; - bool inner = cd->tunnel; - -- /* In order to match on VXLAN we must match on ether_type, ip_protocol -- * and l4_dport. -- */ -+ if (inner) { -+ DR_LOG(ERR, "Inner VXLAN item not supported"); -+ rte_errno = ENOTSUP; -+ return rte_errno; ++ if ((w0 & BIT_ULL(19)) == 0) ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + } ++ *senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1); + -+ /* In order to match on VXLAN we must match on ip_protocol and l4_dport */ -+ if (m && (m->rsvd0[0] != 0 || m->rsvd0[1] != 0 || m->rsvd0[2] != 0 || -+ m->rsvd1 != 0)) { -+ DR_LOG(ERR, "reserved fields are not supported"); -+ rte_errno = ENOTSUP; -+ return rte_errno; ++ /* mbuf 2 */ ++ w0 = vgetq_lane_u64(*senddesc23_w0, 0); ++ if (RTE_MBUF_HAS_EXTBUF(m2)) { ++ w0 |= BIT_ULL(19); ++ w1 = vgetq_lane_u64(*senddesc23_w1, 0); ++ w1 &= ~0xFFFF000000000000UL; ++ if (unlikely(!tx_compl_ena)) { ++ m2->next = *extm; ++ *extm = m2; ++ } else { ++ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, ++ rte_memory_order_relaxed); ++ sqe_id = sqe_id & nb_desc_mask; ++ /* Set PNC */ ++ w0 |= BIT_ULL(43); ++ w1 |= sqe_id << 48; ++ tx_compl_ptr[sqe_id] = m2; ++ *senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0); ++ } ++ } else { ++ cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2); ++ aura = (w0 >> 20) & 0xFFFFF; ++ w0 &= ~0xFFFFF00000UL; ++ w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19; ++ w0 |= aura << 20; ++ ++ if ((w0 & BIT_ULL(19)) == 0) ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + } ++ *senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0); + - if (!cd->relaxed) { - fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)]; - if (!fc->tag_set) { -@@ -1369,12 +1401,6 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, - return 0; - - if (m->flags) { -- if (inner) { -- DR_LOG(ERR, "Inner VXLAN flags item not supported"); -- rte_errno = ENOTSUP; -- return rte_errno; -- } -- - fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_FLAGS]; - fc->item_idx = item_idx; - fc->tag_set = &mlx5dr_definer_vxlan_flags_set; -@@ -1384,12 +1410,6 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, - } - - if (!is_mem_zero(m->vni, 3)) { -- if (inner) { -- DR_LOG(ERR, "Inner VXLAN vni item not supported"); -- rte_errno = ENOTSUP; -- return rte_errno; -- } -- - fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_VNI]; - fc->item_idx = item_idx; - fc->tag_set = &mlx5dr_definer_vxlan_vni_set; -@@ -2240,11 +2260,6 @@ mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd, - const struct rte_flow_item_esp *m = item->mask; - struct mlx5dr_definer_fc *fc; - -- if (!cd->ctx->caps->ipsec_offload) { -- rte_errno = ENOTSUP; -- return rte_errno; -- } -- - if (!m) - return 0; - if (m->hdr.spi) { -@@ -2842,7 +2857,7 @@ mlx5dr_definer_find_best_match_fit(struct mlx5dr_context *ctx, - return 0; - } - -- DR_LOG(ERR, "Unable to find supporting match/jumbo definer combination"); -+ DR_LOG(DEBUG, "Unable to find supporting match/jumbo definer combination"); - rte_errno = ENOTSUP; - return rte_errno; ++ /* mbuf3 */ ++ w0 = vgetq_lane_u64(*senddesc23_w0, 1); ++ if (RTE_MBUF_HAS_EXTBUF(m3)) { ++ w0 |= BIT_ULL(19); ++ w1 = vgetq_lane_u64(*senddesc23_w1, 1); ++ w1 &= ~0xFFFF000000000000UL; ++ if (unlikely(!tx_compl_ena)) { ++ m3->next = *extm; ++ *extm = m3; ++ } else { ++ sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1, ++ rte_memory_order_relaxed); ++ sqe_id = sqe_id & nb_desc_mask; ++ /* Set PNC */ ++ w0 |= BIT_ULL(43); ++ w1 |= sqe_id << 48; ++ tx_compl_ptr[sqe_id] = m3; ++ *senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1); ++ } ++ } else { ++ cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3); ++ aura = (w0 >> 20) & 0xFFFFF; ++ w0 &= ~0xFFFFF00000UL; ++ w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19; ++ w0 |= aura << 20; ++ ++ if ((w0 & BIT_ULL(19)) == 0) ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); ++ } ++ *senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1); ++#ifndef RTE_LIBRTE_MEMPOOL_DEBUG ++ RTE_SET_USED(cookie); ++#endif ++} ++#endif ++ + static __rte_always_inline void + cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) + { +@@ -161,10 +327,9 @@ cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags) } -@@ -2975,7 +2990,7 @@ mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher, - /* Find the match definer layout for header layout match union */ - ret = mlx5dr_definer_find_best_match_fit(ctx, match_definer, match_hl); - if (ret) { -- DR_LOG(ERR, "Failed to create match definer from header layout"); -+ DR_LOG(DEBUG, "Failed to create match definer from header layout"); - goto free_fc; + + static __rte_always_inline void +-cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, +- struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags, +- const uint64_t lso_tun_fmt, uint8_t mark_flag, +- uint64_t mark_fmt) ++cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm, ++ uint64_t *cmd, const uint16_t flags, const uint64_t lso_tun_fmt, ++ uint8_t mark_flag, uint64_t mark_fmt) + { + uint8_t mark_off = 0, mark_vlan = 0, markptr = 0; + struct nix_send_ext_s *send_hdr_ext; +@@ -191,6 +356,8 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, + ol_flags = m->ol_flags; + w1.u = 0; } ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) ++ send_hdr->w0.pnc = 0; -@@ -3191,15 +3206,18 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx, + if (!(flags & NIX_TX_MULTI_SEG_F)) + send_hdr->w0.total = m->data_len; +@@ -345,23 +512,33 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq, + send_hdr->w1.u = w1.u; - /* Create optional range definers */ - for (i = 0; i < matcher->num_of_mt; i++) { -- if (!mt[i].fcr_sz) -- continue; -- - /* All must use range if requested */ -- if (i && !mt[i - 1].range_definer) { -+ bool is_range = !!mt[i].fcr_sz; -+ bool has_range = matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER; + if (!(flags & NIX_TX_MULTI_SEG_F)) { ++ struct rte_mbuf *cookie; + -+ if (i && ((is_range && !has_range) || (!is_range && has_range))) { - DR_LOG(ERR, "Using range and non range templates is not allowed"); - goto free_definers; - } + sg->seg1_size = m->data_len; + *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m); ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); -+ if (!mt[i].fcr_sz) -+ continue; -+ - matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER; - /* Create definer without fcr binding, already binded */ - mt[i].range_definer = mlx5dr_definer_alloc(ctx, -@@ -3320,7 +3338,7 @@ int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx, + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { ++ uint64_t aura; + /* DF bit = 1 if refcount of current mbuf or parent mbuf + * is greater than 1 + * DF bit = 0 otherwise + */ +- send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr); ++ aura = send_hdr->w0.aura; ++ send_hdr->w0.df = cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura); ++ send_hdr->w0.aura = aura; + /* Ensuring mbuf fields which got updated in + * cnxk_nix_prefree_seg are written before LMTST. + */ + rte_io_wmb(); + } ++#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + /* Mark mempool object as "put" since it is freed by NIX */ + if (!send_hdr->w0.df) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); ++#else ++ RTE_SET_USED(cookie); ++#endif + } else { + sg->seg1_size = m->data_len; + *(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m); +@@ -383,7 +560,7 @@ cn9k_nix_xmit_prepare_tstamp(struct cn9k_eth_txq *txq, uint64_t *cmd, - ret = mlx5dr_definer_calc_layout(matcher, &match_layout, &range_layout); - if (ret) { -- DR_LOG(ERR, "Failed to calculate matcher definer layout"); -+ DR_LOG(DEBUG, "Failed to calculate matcher definer layout"); - return ret; - } + send_mem = (struct nix_send_mem_s *)(cmd + off); -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c -index 4ea161eae6..36be96c668 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c -@@ -340,7 +340,7 @@ static int mlx5dr_matcher_disconnect(struct mlx5dr_matcher *matcher) - return 0; +- /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp ++ /* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, Tx tstamp + * should not be recorded, hence changing the alg type to + * NIX_SENDMEMALG_SUB and also changing send mem addr field to + * next 8 bytes as it corrupts the actual Tx tstamp registered +@@ -439,10 +616,12 @@ cn9k_nix_xmit_submit_lmt_release(const rte_iova_t io_addr) + } - matcher_reconnect: -- if (LIST_EMPTY(&tbl->head)) -+ if (LIST_EMPTY(&tbl->head) || prev_matcher == matcher) - LIST_INSERT_HEAD(&matcher->tbl->head, matcher, next); - else - LIST_INSERT_AFTER(prev_matcher, matcher, next); -@@ -807,7 +807,7 @@ static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher) - /* Calculate match, range and hash definers */ - ret = mlx5dr_definer_matcher_init(ctx, matcher); - if (ret) { -- DR_LOG(ERR, "Failed to set matcher templates with match definers"); -+ DR_LOG(DEBUG, "Failed to set matcher templates with match definers"); - return ret; - } + static __rte_always_inline uint16_t +-cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, +- struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) ++cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm, ++ uint64_t *cmd, const uint16_t flags) + { + struct nix_send_hdr_s *send_hdr; ++ uint64_t prefree = 0, aura; ++ struct rte_mbuf *cookie; + union nix_send_sg_s *sg; + struct rte_mbuf *m_next; + uint64_t *slist, sg_u; +@@ -467,17 +646,27 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, + m_next = m->next; + slist = &cmd[3 + off + 1]; -@@ -1171,6 +1171,13 @@ static int mlx5dr_matcher_init_root(struct mlx5dr_matcher *matcher) - return rte_errno; ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); + /* Set invert df if buffer is not to be freed by H/W */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { +- sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55); ++ aura = send_hdr->w0.aura; ++ prefree = (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << 55); ++ send_hdr->w0.aura = aura; ++ sg_u |= prefree; + rte_io_wmb(); } -+ ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id); -+ if (ret) { -+ DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name); -+ rte_errno = EINVAL; -+ return rte_errno; -+ } -+ - mask = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) + - offsetof(struct mlx5dv_flow_match_parameters, match_buf)); - if (!mask) { -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h -index bbe313102f..c4e0cbc843 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h -@@ -30,7 +30,6 @@ struct mlx5dr_pattern_cache { - struct mlx5dr_pattern_cache_item { - struct { - struct mlx5dr_devx_obj *pattern_obj; -- struct dr_icm_chunk *chunk; - uint8_t *data; - uint16_t num_of_actions; - } mh_data; -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c -index fa19303b91..cc7a30d6d0 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c -@@ -23,6 +23,9 @@ static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher, - *skip_rx = false; - *skip_tx = false; - -+ if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher))) -+ return; -+ - if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) { - v = items[mt->vport_item_id].spec; - vport = flow_hw_conv_port_id(v->port_id); -@@ -55,14 +58,16 @@ static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe, - struct mlx5dr_rule *rule, - const struct rte_flow_item *items, - struct mlx5dr_match_template *mt, -- void *user_data) -+ struct mlx5dr_rule_attr *attr) - { - struct mlx5dr_matcher *matcher = rule->matcher; - struct mlx5dr_table *tbl = matcher->tbl; - bool skip_rx, skip_tx; + /* Mark mempool object as "put" since it is freed by NIX */ + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (!(sg_u & (1ULL << 55))) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + rte_io_wmb(); ++#else ++ RTE_SET_USED(cookie); ++#endif ++#ifdef RTE_ENABLE_ASSERT ++ m->next = NULL; ++ m->nb_segs = 1; + #endif + m = m_next; + if (!m) +@@ -488,16 +677,17 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, + m_next = m->next; + sg_u = sg_u | ((uint64_t)m->data_len << (i << 4)); + *slist = rte_mbuf_data_iova(m); ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); + /* Set invert df if buffer is not to be freed by H/W */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { +- sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55)); ++ sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, NULL) << (i + 55)); + /* Commit changes to mbuf */ + rte_io_wmb(); + } + /* Mark mempool object as "put" since it is freed by NIX */ + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (!(sg_u & (1ULL << (i + 55)))) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + rte_io_wmb(); + #endif + slist++; +@@ -513,6 +703,9 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq, + sg_u = sg->u; + slist++; + } ++#ifdef RTE_ENABLE_ASSERT ++ m->next = NULL; ++#endif + m = m_next; + } while (nb_segs); - dep_wqe->rule = rule; -- dep_wqe->user_data = user_data; -+ dep_wqe->user_data = attr->user_data; -+ dep_wqe->direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ? -+ attr->rule_idx : 0; +@@ -526,6 +719,9 @@ done: + segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); + send_hdr->w0.sizem1 = segdw - 1; - if (!items) { /* rule update */ - dep_wqe->rtc_0 = rule->rtc_0; -@@ -145,8 +150,13 @@ mlx5dr_rule_save_delete_info(struct mlx5dr_rule *rule, - rule->tag_ptr = simple_calloc(2, sizeof(*rule->tag_ptr)); - assert(rule->tag_ptr); ++#ifdef RTE_ENABLE_ASSERT ++ rte_io_wmb(); ++#endif + return segdw; + } -- src_tag = (uint8_t *)ste_attr->wqe_data->tag; -- memcpy(rule->tag_ptr[0].match, src_tag, MLX5DR_MATCH_TAG_SZ); -+ if (is_jumbo) -+ memcpy(rule->tag_ptr[0].jumbo, ste_attr->wqe_data->action, -+ MLX5DR_JUMBO_TAG_SZ); -+ else -+ memcpy(rule->tag_ptr[0].match, ste_attr->wqe_data->tag, -+ MLX5DR_MATCH_TAG_SZ); -+ - rule->tag_ptr[1].reserved[0] = ste_attr->send_attr.match_definer_id; +@@ -568,6 +764,7 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts, + const rte_iova_t io_addr = txq->io_addr; + uint64_t lso_tun_fmt = 0, mark_fmt = 0; + void *lmt_addr = txq->lmt_addr; ++ struct rte_mbuf *extm = NULL; + uint8_t mark_flag = 0; + uint16_t i; - /* Save range definer id and tag for delete */ -@@ -289,8 +299,8 @@ static int mlx5dr_rule_create_hws_fw_wqe(struct mlx5dr_rule *rule, +@@ -598,13 +795,16 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts, + rte_io_wmb(); + + for (i = 0; i < pkts; i++) { +- cn9k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt, ++ cn9k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt, + mark_flag, mark_fmt); + cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, 4, + flags); + cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags); } - mlx5dr_rule_create_init(rule, &ste_attr, &apply, false); -- mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr->user_data); -- mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr->user_data); -+ mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr); -+ mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr); ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) ++ cn9k_nix_free_extmbuf(extm); ++ + /* Reduce the cached count */ + txq->fc_cache_pkts -= pkts; - ste_attr.direct_index = 0; - ste_attr.rtc_0 = match_wqe.rtc_0; -@@ -395,7 +405,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule, - * dep_wqe buffers (ctrl, data) are also reused for all STE writes. - */ - dep_wqe = mlx5dr_send_add_new_dep_wqe(queue); -- mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr->user_data); -+ mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr); +@@ -619,6 +819,7 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, + const rte_iova_t io_addr = txq->io_addr; + uint64_t lso_tun_fmt = 0, mark_fmt = 0; + void *lmt_addr = txq->lmt_addr; ++ struct rte_mbuf *extm = NULL; + uint8_t mark_flag = 0; + uint16_t segdw; + uint64_t i; +@@ -650,14 +851,17 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, + rte_io_wmb(); - ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl; - ste_attr.wqe_data = &dep_wqe->wqe_data; -@@ -457,8 +467,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule, - ste_attr.used_id_rtc_1 = &rule->rtc_1; - ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0; - ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1; -- ste_attr.direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ? -- attr->rule_idx : 0; -+ ste_attr.direct_index = dep_wqe->direct_index; - } else { - apply.next_direct_idx = --ste_attr.direct_index; - } -@@ -594,6 +603,13 @@ static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule, - uint8_t match_criteria; - int ret; + for (i = 0; i < pkts; i++) { +- cn9k_nix_xmit_prepare(txq, tx_pkts[i], cmd, flags, lso_tun_fmt, ++ cn9k_nix_xmit_prepare(txq, tx_pkts[i], &extm, cmd, flags, lso_tun_fmt, + mark_flag, mark_fmt); +- segdw = cn9k_nix_prepare_mseg(txq, tx_pkts[i], cmd, flags); ++ segdw = cn9k_nix_prepare_mseg(txq, tx_pkts[i], &extm, cmd, flags); + cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, + segdw, flags); + cn9k_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw); + } -+ ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id); -+ if (ret) { -+ DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name); -+ rte_errno = EINVAL; -+ return rte_errno; -+ } ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) ++ cn9k_nix_free_extmbuf(extm); + - attr = simple_calloc(num_actions, sizeof(*attr)); - if (!attr) { - rte_errno = ENOMEM; -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c -index 622d574bfa..4c279ba42a 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c -@@ -50,6 +50,7 @@ void mlx5dr_send_all_dep_wqe(struct mlx5dr_send_engine *queue) - ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1; - ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl; - ste_attr.wqe_data = &dep_wqe->wqe_data; -+ ste_attr.direct_index = dep_wqe->direct_index; - - mlx5dr_send_ste(queue, &ste_attr); - -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h -index c1e8616f7e..0c89faa8a7 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h -@@ -106,6 +106,7 @@ struct mlx5dr_send_ring_dep_wqe { - uint32_t rtc_1; - uint32_t retry_rtc_0; - uint32_t retry_rtc_1; -+ uint32_t direct_index; - void *user_data; - }; + /* Reduce the cached count */ + txq->fc_cache_pkts -= pkts; -@@ -202,8 +203,6 @@ struct mlx5dr_send_ste_attr { - * value to write in CPU endian format. - * @param addr - * Address to write to. -- * @param lock -- * Address of the lock to use for that UAR access. - */ - static __rte_always_inline void - mlx5dr_uar_write64_relaxed(uint64_t val, void *addr) -diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c -index 55b9b20150..ab73017ade 100644 ---- a/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c -+++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c -@@ -611,8 +611,7 @@ static int mlx5dr_table_set_default_miss_not_valid(struct mlx5dr_table *tbl, +@@ -705,12 +909,12 @@ cn9k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1, - if (mlx5dr_table_is_root(tbl) || - (miss_tbl && mlx5dr_table_is_root(miss_tbl)) || -- (miss_tbl && miss_tbl->type != tbl->type) || -- (miss_tbl && tbl->default_miss.miss_tbl)) { -+ (miss_tbl && miss_tbl->type != tbl->type)) { - DR_LOG(ERR, "Invalid arguments"); - rte_errno = EINVAL; - return -rte_errno; -@@ -625,6 +624,7 @@ int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl, - struct mlx5dr_table *miss_tbl) + static __rte_always_inline uint8_t + cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq, +- struct rte_mbuf *m, uint64_t *cmd, ++ struct rte_mbuf *m, struct rte_mbuf **extm, uint64_t *cmd, + struct nix_send_hdr_s *send_hdr, + union nix_send_sg_s *sg, const uint32_t flags) { - struct mlx5dr_context *ctx = tbl->ctx; -+ struct mlx5dr_table *old_miss_tbl; - int ret; - - ret = mlx5dr_table_set_default_miss_not_valid(tbl, miss_tbl); -@@ -632,15 +632,16 @@ int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl, - return ret; - - pthread_spin_lock(&ctx->ctrl_lock); -- -+ old_miss_tbl = tbl->default_miss.miss_tbl; - ret = mlx5dr_table_connect_to_miss_table(tbl, miss_tbl); - if (ret) - goto out; +- struct rte_mbuf *m_next; +- uint64_t *slist, sg_u; ++ struct rte_mbuf *m_next, *cookie; ++ uint64_t *slist, sg_u, aura; + uint16_t nb_segs; + uint64_t segdw; + int i = 1; +@@ -727,29 +931,40 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq, + m_next = m->next; -+ if (old_miss_tbl) -+ LIST_REMOVE(tbl, default_miss.next); -+ - if (miss_tbl) - LIST_INSERT_HEAD(&miss_tbl->default_miss.head, tbl, default_miss.next); -- else -- LIST_REMOVE(tbl, default_miss.next); + /* Set invert df if buffer is not to be freed by H/W */ +- if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) +- sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55); +- /* Mark mempool object as "put" since it is freed by NIX */ ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { ++ aura = send_hdr->w0.aura; ++ sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << 55); ++ send_hdr->w0.aura = aura; ++ } ++ /* Mark mempool object as "put" since it is freed by NIX */ + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (!(sg_u & (1ULL << 55))) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + rte_io_wmb(); ++#else ++ RTE_SET_USED(cookie); + #endif - pthread_spin_unlock(&ctx->ctrl_lock); - return 0; -diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c -index dd5a0c546d..1d999ef66b 100644 ---- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c -+++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c -@@ -671,7 +671,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) - ifr.ifr_data = (void *)ðpause; - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); - if (ret) { -- DRV_LOG(WARNING, -+ DRV_LOG(DEBUG, - "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" - " %s", - dev->data->port_id, strerror(rte_errno)); -@@ -1286,13 +1286,17 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - unsigned int i; - struct ifreq ifr; -- unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); -+ unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd); -+ unsigned int stats_sz = max_stats_n * sizeof(uint64_t); - unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; - struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; - int ret; -+ uint16_t i_idx, o_idx; -+ uint32_t total_stats = xstats_n; ++#ifdef RTE_ENABLE_ASSERT ++ m->next = NULL; ++ m->nb_segs = 1; ++#endif + m = m_next; + /* Fill mbuf segments */ + do { + m_next = m->next; + sg_u = sg_u | ((uint64_t)m->data_len << (i << 4)); + *slist = rte_mbuf_data_iova(m); ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); + /* Set invert df if buffer is not to be freed by H/W */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) +- sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55)); ++ sg_u |= (cn9k_nix_prefree_seg(m, extm, txq, send_hdr, &aura) << (i + 55)); + /* Mark mempool object as "put" since it is freed by NIX + */ + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (!(sg_u & (1ULL << (i + 55)))) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + rte_io_wmb(); + #endif + slist++; +@@ -765,6 +980,9 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq, + sg_u = sg->u; + slist++; + } ++#ifdef RTE_ENABLE_ASSERT ++ m->next = NULL; ++#endif + m = m_next; + } while (nb_segs); - et_stats->cmd = ETHTOOL_GSTATS; -- et_stats->n_stats = xstats_ctrl->stats_n; -+ /* Pass the maximum value, the driver may ignore this. */ -+ et_stats->n_stats = max_stats_n; - ifr.ifr_data = (caddr_t)et_stats; - if (pf >= 0) - ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname, -@@ -1305,21 +1309,34 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) - dev->data->port_id); - return ret; - } -- for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { -- if (xstats_ctrl->info[i].dev) -- continue; -- stats[i] += (uint64_t) -- et_stats->data[xstats_ctrl->dev_table_idx[i]]; -+ if (pf <= 0) { -+ for (i = 0; i != total_stats; i++) { -+ i_idx = xstats_ctrl->dev_table_idx[i]; -+ o_idx = xstats_ctrl->xstats_o_idx[i]; -+ if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev) -+ continue; -+ stats[o_idx] += (uint64_t)et_stats->data[i_idx]; -+ } -+ } else { -+ for (i = 0; i != total_stats; i++) { -+ i_idx = xstats_ctrl->dev_table_idx_2nd[i]; -+ o_idx = xstats_ctrl->xstats_o_idx_2nd[i]; -+ if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev) -+ continue; -+ stats[o_idx] += (uint64_t)et_stats->data[i_idx]; -+ } - } - return 0; +@@ -780,24 +998,31 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq, + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); + send_hdr->w0.sizem1 = segdw - 1; + ++#ifdef RTE_ENABLE_ASSERT ++ rte_io_wmb(); ++#endif + return segdw; } --/** -+/* - * Read device counters. - * - * @param dev - * Pointer to Ethernet device. -- * @param[out] stats -+ * @param bond_master -+ * Indicate if the device is a bond master. -+ * @param stats - * Counters table output buffer. - * - * @return -@@ -1327,7 +1344,7 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) - * rte_errno is set. - */ - int --mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) -+mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) + static __rte_always_inline uint8_t +-cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, +- struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0, +- uint64x2_t *cmd1, const uint32_t flags) ++cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, struct rte_mbuf *m, struct rte_mbuf **extm, ++ uint64_t *cmd, uint64x2_t *cmd0, uint64x2_t *cmd1, const uint32_t flags) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; -@@ -1335,7 +1352,7 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + struct nix_send_hdr_s send_hdr; ++ struct rte_mbuf *cookie; + union nix_send_sg_s sg; ++ uint64_t aura; + uint8_t ret; - memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n); - /* Read ifreq counters. */ -- if (priv->master && priv->pf_bond >= 0) { -+ if (bond_master) { - /* Sum xstats from bonding device member ports. */ - for (i = 0; i < priv->sh->bond.n_port; i++) { - ret = _mlx5_os_read_dev_counters(dev, i, stats); -@@ -1347,13 +1364,17 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) - if (ret) - return ret; - } -- /* Read IB counters. */ -- for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { -+ /* -+ * Read IB dev counters. -+ * The counters are unique per IB device but not per netdev IF. -+ * In bonding mode, getting the stats name only from 1 port is enough. -+ */ -+ for (i = xstats_ctrl->dev_cnt_start; i < xstats_ctrl->mlx5_stats_n; i++) { - if (!xstats_ctrl->info[i].dev) - continue; - /* return last xstats counter if fail to read. */ - if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, -- &stats[i]) == 0) -+ &stats[i]) == 0) - xstats_ctrl->xstats[i] = stats[i]; - else - stats[i] = xstats_ctrl->xstats[i]; -@@ -1361,18 +1382,24 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) - return ret; - } + if (m->nb_segs == 1) { ++ cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m); + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { + send_hdr.w0.u = vgetq_lane_u64(cmd0[0], 0); + send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1); + sg.u = vgetq_lane_u64(cmd1[0], 0); +- sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr) << 55); ++ aura = send_hdr.w0.aura; ++ sg.u |= (cn9k_nix_prefree_seg(m, extm, txq, &send_hdr, &aura) << 55); ++ send_hdr.w0.aura = aura; + cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0); + cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0); + cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1); +@@ -806,8 +1031,10 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, + #ifdef RTE_LIBRTE_MEMPOOL_DEBUG + sg.u = vgetq_lane_u64(cmd1[0], 0); + if (!(sg.u & (1ULL << 55))) +- RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); ++ RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0); + rte_io_wmb(); ++#else ++ RTE_SET_USED(cookie); + #endif + return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) + + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); +@@ -817,7 +1044,7 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq, + send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1); + sg.u = vgetq_lane_u64(cmd1[0], 0); --/** -+/* - * Query the number of statistics provided by ETHTOOL. - * - * @param dev - * Pointer to Ethernet device. -+ * @param bond_master -+ * Indicate if the device is a bond master. -+ * @param n_stats -+ * Pointer to number of stats to store. -+ * @param n_stats_sec -+ * Pointer to number of stats to store for the 2nd port of the bond. - * - * @return -- * Number of statistics on success, negative errno value otherwise and -- * rte_errno is set. -+ * 0 on success, negative errno value otherwise and rte_errno is set. - */ - int --mlx5_os_get_stats_n(struct rte_eth_dev *dev) -+mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, -+ uint16_t *n_stats, uint16_t *n_stats_sec) - { - struct mlx5_priv *priv = dev->data->dev_private; - struct ethtool_drvinfo drvinfo; -@@ -1381,18 +1408,34 @@ mlx5_os_get_stats_n(struct rte_eth_dev *dev) +- ret = cn9k_nix_prepare_mseg_vec_list(txq, m, cmd, &send_hdr, &sg, flags); ++ ret = cn9k_nix_prepare_mseg_vec_list(txq, m, extm, cmd, &send_hdr, &sg, flags); - drvinfo.cmd = ETHTOOL_GDRVINFO; - ifr.ifr_data = (caddr_t)&drvinfo; -- if (priv->master && priv->pf_bond >= 0) -- /* Bonding PF. */ -+ /* Bonding PFs. */ -+ if (bond_master) { - ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, - SIOCETHTOOL, &ifr); -- else -+ if (ret) { -+ DRV_LOG(WARNING, "bonding port %u unable to query number of" -+ " statistics for the 1st slave, %d", PORT_ID(priv), ret); -+ return ret; -+ } -+ *n_stats = drvinfo.n_stats; -+ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, -+ SIOCETHTOOL, &ifr); -+ if (ret) { -+ DRV_LOG(WARNING, "bonding port %u unable to query number of" -+ " statistics for the 2nd slave, %d", PORT_ID(priv), ret); -+ return ret; -+ } -+ *n_stats_sec = drvinfo.n_stats; -+ } else { - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); -- if (ret) { -- DRV_LOG(WARNING, "port %u unable to query number of statistics", -- dev->data->port_id); -- return ret; -+ if (ret) { -+ DRV_LOG(WARNING, "port %u unable to query number of statistics", -+ PORT_ID(priv)); -+ return ret; -+ } -+ *n_stats = drvinfo.n_stats; - } -- return drvinfo.n_stats; -+ return 0; - } + cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0); + cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1); +@@ -962,11 +1189,9 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + uint64x2_t sgdesc01_w1, sgdesc23_w1; + struct cn9k_eth_txq *txq = tx_queue; + uint64_t *lmt_addr = txq->lmt_addr; +- uint64x2_t xmask01_w0, xmask23_w0; +- uint64x2_t xmask01_w1, xmask23_w1; + rte_iova_t io_addr = txq->io_addr; +- struct nix_send_hdr_s send_hdr; + uint64x2_t ltypes01, ltypes23; ++ struct rte_mbuf *extm = NULL; + uint64x2_t xtmp128, ytmp128; + uint64x2_t xmask01, xmask23; + uint64_t lmt_status, i; +@@ -1028,7 +1253,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) { + /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */ + senddesc01_w0 = +- vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF)); ++ vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF)); + sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF)); - static const struct mlx5_counter_ctrl mlx5_counters_init[] = { -@@ -1576,7 +1619,104 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { - }, - }; + senddesc23_w0 = senddesc01_w0; +@@ -1732,74 +1957,8 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && + !(flags & NIX_TX_MULTI_SEG_F)) { + /* Set don't free bit if reference count > 1 */ +- xmask01_w0 = vdupq_n_u64(0); +- xmask01_w1 = vdupq_n_u64(0); +- xmask23_w0 = xmask01_w0; +- xmask23_w1 = xmask01_w1; +- +- /* Move mbufs to iova */ +- mbuf0 = (uint64_t *)tx_pkts[0]; +- mbuf1 = (uint64_t *)tx_pkts[1]; +- mbuf2 = (uint64_t *)tx_pkts[2]; +- mbuf3 = (uint64_t *)tx_pkts[3]; +- +- send_hdr.w0.u = 0; +- send_hdr.w1.u = 0; +- +- if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) { +- send_hdr.w0.df = 1; +- xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0); +- xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0); +- } else { +- RTE_MEMPOOL_CHECK_COOKIES( +- ((struct rte_mbuf *)mbuf0)->pool, +- (void **)&mbuf0, 1, 0); +- } +- +- send_hdr.w0.u = 0; +- send_hdr.w1.u = 0; +- +- if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) { +- send_hdr.w0.df = 1; +- xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1); +- xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1); +- } else { +- RTE_MEMPOOL_CHECK_COOKIES( +- ((struct rte_mbuf *)mbuf1)->pool, +- (void **)&mbuf1, 1, 0); +- } +- +- send_hdr.w0.u = 0; +- send_hdr.w1.u = 0; +- +- if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) { +- send_hdr.w0.df = 1; +- xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0); +- xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0); +- } else { +- RTE_MEMPOOL_CHECK_COOKIES( +- ((struct rte_mbuf *)mbuf2)->pool, +- (void **)&mbuf2, 1, 0); +- } +- +- send_hdr.w0.u = 0; +- send_hdr.w1.u = 0; +- +- if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) { +- send_hdr.w0.df = 1; +- xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1); +- xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1); +- } else { +- RTE_MEMPOOL_CHECK_COOKIES( +- ((struct rte_mbuf *)mbuf3)->pool, +- (void **)&mbuf3, 1, 0); +- } +- +- senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0); +- senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0); +- senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1); +- senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1); +- ++ cn9k_nix_prefree_seg_vec(tx_pkts, &extm, txq, &senddesc01_w0, ++ &senddesc23_w0, &senddesc01_w1, &senddesc23_w1); + /* Ensuring mbuf fields which got updated in + * cnxk_nix_prefree_seg are written before LMTST. + */ +@@ -1860,7 +2019,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + /* Build mseg list for each packet individually. */ + for (j = 0; j < NIX_DESCS_PER_LOOP; j++) + segdw[j] = cn9k_nix_prepare_mseg_vec(txq, +- tx_pkts[j], ++ tx_pkts[j], &extm, + seg_list[j], &cmd0[j], + &cmd1[j], flags); + segdw[4] = 8; +@@ -1935,6 +2094,9 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, + tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP; + } --static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); -+const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); -+ -+static int -+mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master, -+ struct ethtool_gstrings *strings, -+ uint32_t stats_n, uint32_t stats_n_2nd) -+{ -+ struct mlx5_priv *priv = dev->data->dev_private; -+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; -+ struct ifreq ifr; -+ int ret; -+ uint32_t i, j, idx; -+ -+ /* Ensure no out of bounds access before. */ -+ MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS); -+ strings->cmd = ETHTOOL_GSTRINGS; -+ strings->string_set = ETH_SS_STATS; -+ strings->len = stats_n; -+ ifr.ifr_data = (caddr_t)strings; -+ if (bond_master) -+ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, -+ SIOCETHTOOL, &ifr); -+ else -+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); -+ if (ret) { -+ DRV_LOG(WARNING, "port %u unable to get statistic names with %d", -+ PORT_ID(priv), ret); -+ return ret; -+ } -+ /* Reorganize the orders to reduce the iterations. */ -+ for (j = 0; j < xstats_n; j++) { -+ xstats_ctrl->dev_table_idx[j] = UINT16_MAX; -+ for (i = 0; i < stats_n; i++) { -+ const char *curr_string = -+ (const char *)&strings->data[i * ETH_GSTRING_LEN]; ++ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && !txq->tx_compl.ena) ++ cn9k_nix_free_extmbuf(extm); + -+ if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { -+ idx = xstats_ctrl->mlx5_stats_n++; -+ xstats_ctrl->dev_table_idx[j] = i; -+ xstats_ctrl->xstats_o_idx[j] = idx; -+ xstats_ctrl->info[idx] = mlx5_counters_init[j]; -+ } -+ } -+ } -+ if (!bond_master) { -+ /* Add dev counters, unique per IB device. */ -+ xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n; -+ for (j = 0; j != xstats_n; j++) { -+ if (mlx5_counters_init[j].dev) { -+ idx = xstats_ctrl->mlx5_stats_n++; -+ xstats_ctrl->info[idx] = mlx5_counters_init[j]; -+ xstats_ctrl->hw_stats[idx] = 0; -+ } -+ } -+ return 0; + if (unlikely(pkts_left)) { + if (flags & NIX_TX_MULTI_SEG_F) + pkts += cn9k_nix_xmit_pkts_mseg(tx_queue, tx_pkts, +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c +index 5e11bbb017..f0cf376e7d 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c +@@ -582,7 +582,7 @@ cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, + } + + if (mp == NULL || mp[0] == NULL || mp[1] == NULL) { +- plt_err("invalid memory pools\n"); ++ plt_err("invalid memory pools"); + return -EINVAL; + } + +@@ -610,7 +610,7 @@ cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, + return -EINVAL; + } + +- plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u\n", (*spb_pool)->name, ++ plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u", (*spb_pool)->name, + (*lpb_pool)->name, (*lpb_pool)->elt_size, (*spb_pool)->elt_size); + + return 0; +@@ -1384,6 +1384,13 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) + goto free_nix_lf; + } + ++ /* Overwrite default RSS setup if requested by user */ ++ rc = cnxk_nix_rss_hash_update(eth_dev, &conf->rx_adv_conf.rss_conf); ++ if (rc) { ++ plt_err("Failed to configure rss rc=%d", rc); ++ goto free_nix_lf; + } + -+ strings->len = stats_n_2nd; -+ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, -+ SIOCETHTOOL, &ifr); -+ if (ret) { -+ DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d", -+ PORT_ID(priv), ret); -+ return ret; -+ } -+ /* The 2nd slave port may have a different strings set, based on the configuration. */ -+ for (j = 0; j != xstats_n; j++) { -+ xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX; -+ for (i = 0; i != stats_n_2nd; i++) { -+ const char *curr_string = -+ (const char *)&strings->data[i * ETH_GSTRING_LEN]; + /* Init the default TM scheduler hierarchy */ + rc = roc_nix_tm_init(nix); + if (rc) { +@@ -1727,7 +1734,7 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev) + else + cnxk_eth_dev_ops.timesync_disable(eth_dev); + +- if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { ++ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP || dev->ptp_en) { + rc = rte_mbuf_dyn_rx_timestamp_register + (&dev->tstamp.tstamp_dynfield_offset, + &dev->tstamp.rx_tstamp_dynflag); +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.h b/dpdk/drivers/net/cnxk/cnxk_ethdev.h +index 4d3ebf123b..138d206987 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev.h ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.h +@@ -424,6 +424,13 @@ struct cnxk_eth_dev { + /* MCS device */ + struct cnxk_mcs_dev *mcs_dev; + struct cnxk_macsec_sess_list mcs_list; ++ ++ /* SSO event dev */ ++ void *evdev_priv; ++ ++ /* SSO event dev ptp */ ++ void (*cnxk_sso_ptp_tstamp_cb) ++ (uint16_t port_id, uint16_t flags, bool ptp_en); + }; + + struct cnxk_eth_rxq_sp { +@@ -633,6 +640,10 @@ int cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev); + int cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev); + __rte_internal + int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev); ++typedef void (*cnxk_ethdev_rx_offload_cb_t)(uint16_t port_id, uint64_t flags); ++__rte_internal ++void cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb); + -+ if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { -+ xstats_ctrl->dev_table_idx_2nd[j] = i; -+ if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) { -+ /* Already mapped in the 1st slave port. */ -+ idx = xstats_ctrl->xstats_o_idx[j]; -+ xstats_ctrl->xstats_o_idx_2nd[j] = idx; -+ } else { -+ /* Append the new items to the end of the map. */ -+ idx = xstats_ctrl->mlx5_stats_n++; -+ xstats_ctrl->xstats_o_idx_2nd[j] = idx; -+ xstats_ctrl->info[idx] = mlx5_counters_init[j]; -+ } -+ } -+ } -+ } -+ /* Dev counters are always at the last now. */ -+ xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n; -+ for (j = 0; j != xstats_n; j++) { -+ if (mlx5_counters_init[j].dev) { -+ idx = xstats_ctrl->mlx5_stats_n++; -+ xstats_ctrl->info[idx] = mlx5_counters_init[j]; -+ xstats_ctrl->hw_stats[idx] = 0; -+ } -+ } -+ return 0; -+} + struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev, + uint32_t spi, bool inb); + struct cnxk_eth_sec_sess * +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c +index 8e862be933..8c022e5f08 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c +@@ -75,7 +75,7 @@ parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args) + if (errno) + val = 0; - /** - * Init the structures to read device counters. -@@ -1590,76 +1730,44 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; -- unsigned int i; -- unsigned int j; -- struct ifreq ifr; - struct ethtool_gstrings *strings = NULL; -- unsigned int dev_stats_n; -+ uint16_t dev_stats_n = 0; -+ uint16_t dev_stats_n_2nd = 0; -+ unsigned int max_stats_n; - unsigned int str_sz; - int ret; -+ bool bond_master = (priv->master && priv->pf_bond >= 0); +- *(uint16_t *)extra_args = val; ++ *(uint32_t *)extra_args = val; - /* So that it won't aggregate for each init. */ - xstats_ctrl->mlx5_stats_n = 0; -- ret = mlx5_os_get_stats_n(dev); -+ ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd); - if (ret < 0) { - DRV_LOG(WARNING, "port %u no extended statistics available", - dev->data->port_id); - return; - } -- dev_stats_n = ret; -+ max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd); - /* Allocate memory to grab stat names and values. */ -- str_sz = dev_stats_n * ETH_GSTRING_LEN; -+ str_sz = max_stats_n * ETH_GSTRING_LEN; - strings = (struct ethtool_gstrings *) - mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, - SOCKET_ID_ANY); - if (!strings) { - DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", -- dev->data->port_id); -+ dev->data->port_id); - return; - } -- strings->cmd = ETHTOOL_GSTRINGS; -- strings->string_set = ETH_SS_STATS; -- strings->len = dev_stats_n; -- ifr.ifr_data = (caddr_t)strings; -- if (priv->master && priv->pf_bond >= 0) -- /* Bonding master. */ -- ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, -- SIOCETHTOOL, &ifr); -- else -- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); -- if (ret) { -- DRV_LOG(WARNING, "port %u unable to get statistic names", -+ ret = mlx5_os_get_stats_strings(dev, bond_master, strings, -+ dev_stats_n, dev_stats_n_2nd); -+ if (ret < 0) { -+ DRV_LOG(WARNING, "port %u failed to get the stats strings", - dev->data->port_id); - goto free; - } -- for (i = 0; i != dev_stats_n; ++i) { -- const char *curr_string = (const char *) -- &strings->data[i * ETH_GSTRING_LEN]; -- -- for (j = 0; j != xstats_n; ++j) { -- if (!strcmp(mlx5_counters_init[j].ctr_name, -- curr_string)) { -- unsigned int idx = xstats_ctrl->mlx5_stats_n++; -- -- xstats_ctrl->dev_table_idx[idx] = i; -- xstats_ctrl->info[idx] = mlx5_counters_init[j]; -- break; -- } -- } -- } -- /* Add dev counters. */ -- MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS); -- for (i = 0; i != xstats_n; ++i) { -- if (mlx5_counters_init[i].dev) { -- unsigned int idx = xstats_ctrl->mlx5_stats_n++; -- -- xstats_ctrl->info[idx] = mlx5_counters_init[i]; -- xstats_ctrl->hw_stats[idx] = 0; -- } -- } - xstats_ctrl->stats_n = dev_stats_n; -+ xstats_ctrl->stats_n_2nd = dev_stats_n_2nd; - /* Copy to base at first time. */ -- ret = mlx5_os_read_dev_counters(dev, xstats_ctrl->base); -+ ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base); - if (ret) - DRV_LOG(ERR, "port %u cannot read device counters: %s", - dev->data->port_id, strerror(rte_errno)); -diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c -index ae82e1e5d8..2241e84341 100644 ---- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c -+++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c -@@ -455,15 +455,16 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv) - * Routine checks the reference counter and does actual - * resources creation/initialization only if counter is zero. - * -- * @param[in] priv -- * Pointer to the private device data structure. -+ * @param[in] eth_dev -+ * Pointer to the device. - * - * @return - * Zero on success, positive error code otherwise. - */ - static int --mlx5_alloc_shared_dr(struct mlx5_priv *priv) -+mlx5_alloc_shared_dr(struct rte_eth_dev *eth_dev) + return 0; + } +@@ -303,8 +303,8 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) + uint16_t custom_sa_act = 0; + struct rte_kvargs *kvlist; + uint32_t meta_buf_sz = 0; ++ uint16_t lock_rx_ctx = 0; + uint16_t no_inl_dev = 0; +- uint8_t lock_rx_ctx = 0; + + memset(&sdp_chan, 0, sizeof(sdp_chan)); + memset(&pre_l2_info, 0, sizeof(struct flow_pre_l2_size_info)); +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h b/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h +index c1f99a2616..67f40b8e25 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_dp.h +@@ -84,7 +84,7 @@ struct cnxk_timesync_info { + + /* Inlines */ + static __rte_always_inline uint64_t +-cnxk_pktmbuf_detach(struct rte_mbuf *m) ++cnxk_pktmbuf_detach(struct rte_mbuf *m, uint64_t *aura) { -+ struct mlx5_priv *priv = eth_dev->data->dev_private; - struct mlx5_dev_ctx_shared *sh = priv->sh; - char s[MLX5_NAME_SIZE] __rte_unused; - int err; -@@ -578,6 +579,44 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) - err = errno; - goto error; - } + struct rte_mempool *mp = m->pool; + uint32_t mbuf_size, buf_len; +@@ -94,6 +94,8 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m) + + /* Update refcount of direct mbuf */ + md = rte_mbuf_from_indirect(m); ++ if (aura) ++ *aura = roc_npa_aura_handle_to_aura(md->pool->pool_id); + refcount = rte_mbuf_refcnt_update(md, -1); + + priv_size = rte_pktmbuf_priv_size(mp); +@@ -126,18 +128,18 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m) + } + + static __rte_always_inline uint64_t +-cnxk_nix_prefree_seg(struct rte_mbuf *m) ++cnxk_nix_prefree_seg(struct rte_mbuf *m, uint64_t *aura) + { + if (likely(rte_mbuf_refcnt_read(m) == 1)) { + if (!RTE_MBUF_DIRECT(m)) +- return cnxk_pktmbuf_detach(m); ++ return cnxk_pktmbuf_detach(m, aura); + + m->next = NULL; + m->nb_segs = 1; + return 0; + } else if (rte_mbuf_refcnt_update(m, -1) == 0) { + if (!RTE_MBUF_DIRECT(m)) +- return cnxk_pktmbuf_detach(m); ++ return cnxk_pktmbuf_detach(m, aura); + + rte_mbuf_refcnt_set(m, 1); + m->next = NULL; +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_mcs.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_mcs.c +index 06ef7c98f3..119060bcf3 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_mcs.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_mcs.c +@@ -568,17 +568,17 @@ cnxk_eth_macsec_session_stats_get(struct cnxk_eth_dev *dev, struct cnxk_macsec_s + req.id = sess->flow_id; + req.dir = sess->dir; + roc_mcs_flowid_stats_get(mcs_dev->mdev, &req, &flow_stats); +- plt_nix_dbg("\n******* FLOW_ID IDX[%u] STATS dir: %u********\n", sess->flow_id, sess->dir); +- plt_nix_dbg("TX: tcam_hit_cnt: 0x%" PRIx64 "\n", flow_stats.tcam_hit_cnt); ++ plt_nix_dbg("******* FLOW_ID IDX[%u] STATS dir: %u********", sess->flow_id, sess->dir); ++ plt_nix_dbg("TX: tcam_hit_cnt: 0x%" PRIx64, flow_stats.tcam_hit_cnt); + + req.id = mcs_dev->port_id; + req.dir = sess->dir; + roc_mcs_port_stats_get(mcs_dev->mdev, &req, &port_stats); +- plt_nix_dbg("\n********** PORT[0] STATS ****************\n"); +- plt_nix_dbg("RX tcam_miss_cnt: 0x%" PRIx64 "\n", port_stats.tcam_miss_cnt); +- plt_nix_dbg("RX parser_err_cnt: 0x%" PRIx64 "\n", port_stats.parser_err_cnt); +- plt_nix_dbg("RX preempt_err_cnt: 0x%" PRIx64 "\n", port_stats.preempt_err_cnt); +- plt_nix_dbg("RX sectag_insert_err_cnt: 0x%" PRIx64 "\n", port_stats.sectag_insert_err_cnt); ++ plt_nix_dbg("********** PORT[0] STATS ****************"); ++ plt_nix_dbg("RX tcam_miss_cnt: 0x%" PRIx64, port_stats.tcam_miss_cnt); ++ plt_nix_dbg("RX parser_err_cnt: 0x%" PRIx64, port_stats.parser_err_cnt); ++ plt_nix_dbg("RX preempt_err_cnt: 0x%" PRIx64, port_stats.preempt_err_cnt); ++ plt_nix_dbg("RX sectag_insert_err_cnt: 0x%" PRIx64, port_stats.sectag_insert_err_cnt); + + req.id = sess->secy_id; + req.dir = sess->dir; +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c +index 5de2919047..89e00f8fc7 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c +@@ -20,8 +20,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo) + devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; + devinfo->max_mac_addrs = dev->max_mac_entries; + devinfo->max_vfs = pci_dev->max_vfs; +- devinfo->max_mtu = devinfo->max_rx_pktlen - +- (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); ++ devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD; + devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD; + + devinfo->rx_offload_capa = dev->rx_offload_capa; +@@ -448,6 +447,13 @@ cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr) + roc_nix_npc_mac_addr_set(nix, dev->mac_addr); + goto exit; + } + -+ if (sh->config.dv_flow_en == 1) { -+ /* Query availability of metadata reg_c's. */ -+ if (!priv->sh->metadata_regc_check_flag) { -+ err = mlx5_flow_discover_mreg_c(eth_dev); -+ if (err < 0) { -+ err = -err; -+ goto error; -+ } -+ } -+ if (!mlx5_flow_ext_mreg_supported(eth_dev)) { -+ DRV_LOG(DEBUG, -+ "port %u extensive metadata register is not supported", -+ eth_dev->data->port_id); -+ if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { -+ DRV_LOG(ERR, "metadata mode %u is not supported " -+ "(no metadata registers available)", -+ sh->config.dv_xmeta_en); -+ err = ENOTSUP; -+ goto error; -+ } -+ } -+ if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && -+ mlx5_flow_ext_mreg_supported(eth_dev) && sh->dv_regc0_mask) { -+ sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, -+ MLX5_FLOW_MREG_HTABLE_SZ, -+ false, true, eth_dev, -+ flow_dv_mreg_create_cb, -+ flow_dv_mreg_match_cb, -+ flow_dv_mreg_remove_cb, -+ flow_dv_mreg_clone_cb, -+ flow_dv_mreg_clone_free_cb); -+ if (!sh->mreg_cp_tbl) { -+ err = ENOMEM; -+ goto error; -+ } ++ if (eth_dev->data->promiscuous) { ++ rc = roc_nix_mac_promisc_mode_enable(nix, true); ++ if (rc) ++ plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc, ++ roc_error_msg_get(rc)); + } -+ } - #endif - if (!sh->tunnel_hub && sh->config.dv_miss_info) - err = mlx5_alloc_tunnel_hub(sh); -@@ -662,6 +701,10 @@ error: - mlx5_list_destroy(sh->dest_array_list); - sh->dest_array_list = NULL; } -+ if (sh->mreg_cp_tbl) { -+ mlx5_hlist_destroy(sh->mreg_cp_tbl); -+ sh->mreg_cp_tbl = NULL; -+ } - return err; - } -@@ -759,6 +802,10 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv) - mlx5_list_destroy(sh->dest_array_list); - sh->dest_array_list = NULL; + /* Update mac address to cnxk ethernet device */ +@@ -522,7 +528,7 @@ cnxk_nix_sq_flush(struct rte_eth_dev *eth_dev) + /* Wait for sq entries to be flushed */ + rc = roc_nix_tm_sq_flush_spin(sq); + if (rc) { +- plt_err("Failed to drain sq, rc=%d\n", rc); ++ plt_err("Failed to drain sq, rc=%d", rc); + goto exit; + } + if (data->tx_queue_state[i] == RTE_ETH_QUEUE_STATE_STARTED) { +@@ -544,8 +550,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct roc_nix *nix = &dev->nix; ++ struct cnxk_eth_rxq_sp *rxq_sp; ++ uint32_t buffsz = 0; + int rc = -EINVAL; +- uint32_t buffsz; + + frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en; + +@@ -561,8 +568,24 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + goto exit; } -+ if (sh->mreg_cp_tbl) { -+ mlx5_hlist_destroy(sh->mreg_cp_tbl); -+ sh->mreg_cp_tbl = NULL; + +- buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; +- old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD; ++ if (!eth_dev->data->nb_rx_queues) ++ goto skip_buffsz_check; ++ ++ /* Perform buff size check */ ++ if (data->min_rx_buf_size) { ++ buffsz = data->min_rx_buf_size; ++ } else if (eth_dev->data->rx_queues && eth_dev->data->rx_queues[0]) { ++ rxq_sp = cnxk_eth_rxq_to_sp(data->rx_queues[0]); ++ ++ if (rxq_sp->qconf.mp) ++ buffsz = rte_pktmbuf_data_room_size(rxq_sp->qconf.mp); + } - } ++ ++ /* Skip validation if RQ's are not yet setup */ ++ if (!buffsz) ++ goto skip_buffsz_check; ++ ++ buffsz -= RTE_PKTMBUF_HEADROOM; - /** -@@ -1545,13 +1592,6 @@ err_secondary: + /* Refuse MTU that requires the support of scattered packets + * when this feature has not been enabled before. +@@ -580,6 +603,8 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + goto exit; } - /* Create context for virtual machine VLAN workaround. */ - priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); -- if (sh->config.dv_flow_en) { -- err = mlx5_alloc_shared_dr(priv); -- if (err) -- goto error; -- if (mlx5_flex_item_port_init(eth_dev) < 0) -- goto error; + ++skip_buffsz_check: ++ old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD; + /* if new MTU was smaller than old one, then flush all SQs before MTU change */ + if (old_frame_size > frame_size) { + if (data->dev_started) { +@@ -591,19 +616,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) + + frame_size -= RTE_ETHER_CRC_LEN; + +- /* Update mtu on Tx */ +- rc = roc_nix_mac_mtu_set(nix, frame_size); +- if (rc) { +- plt_err("Failed to set MTU, rc=%d", rc); +- goto exit; - } - if (mlx5_devx_obj_ops_en(sh)) { - priv->obj_ops = devx_obj_ops; - mlx5_queue_counter_id_prepare(eth_dev); -@@ -1602,6 +1642,13 @@ err_secondary: - goto error; - } - rte_rwlock_init(&priv->ind_tbls_lock); -+ if (sh->config.dv_flow_en) { -+ err = mlx5_alloc_shared_dr(eth_dev); -+ if (err) -+ goto error; -+ if (mlx5_flex_item_port_init(eth_dev) < 0) -+ goto error; -+ } - if (priv->sh->config.dv_flow_en == 2) { - #ifdef HAVE_MLX5_HWS_SUPPORT - if (priv->sh->config.dv_esw_en) { -@@ -1682,43 +1729,6 @@ err_secondary: - err = -err; - goto error; +- +- /* Sync same frame size on Rx */ ++ /* Set frame size on Rx */ + rc = roc_nix_mac_max_rx_len_set(nix, frame_size); + if (rc) { +- /* Rollback to older mtu */ +- roc_nix_mac_mtu_set(nix, +- old_frame_size - RTE_ETHER_CRC_LEN); + plt_err("Failed to max Rx frame length, rc=%d", rc); + goto exit; } -- /* Query availability of metadata reg_c's. */ -- if (!priv->sh->metadata_regc_check_flag) { -- err = mlx5_flow_discover_mreg_c(eth_dev); -- if (err < 0) { -- err = -err; -- goto error; -- } -- } -- if (!mlx5_flow_ext_mreg_supported(eth_dev)) { -- DRV_LOG(DEBUG, -- "port %u extensive metadata register is not supported", -- eth_dev->data->port_id); -- if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { -- DRV_LOG(ERR, "metadata mode %u is not supported " -- "(no metadata registers available)", -- sh->config.dv_xmeta_en); -- err = ENOTSUP; -- goto error; -- } -- } -- if (sh->config.dv_flow_en && -- sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && -- mlx5_flow_ext_mreg_supported(eth_dev) && -- priv->sh->dv_regc0_mask) { -- priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, -- MLX5_FLOW_MREG_HTABLE_SZ, -- false, true, eth_dev, -- flow_dv_mreg_create_cb, -- flow_dv_mreg_match_cb, -- flow_dv_mreg_remove_cb, -- flow_dv_mreg_clone_cb, -- flow_dv_mreg_clone_free_cb); -- if (!priv->mreg_cp_tbl) { -- err = ENOMEM; -- goto error; -- } -- } - rte_spinlock_init(&priv->shared_act_sl); - mlx5_flow_counter_mode_config(eth_dev); - mlx5_flow_drop_action_config(eth_dev); -@@ -1737,8 +1747,6 @@ error: - priv->sh->config.dv_esw_en) - flow_hw_destroy_vport_action(eth_dev); - #endif -- if (priv->mreg_cp_tbl) -- mlx5_hlist_destroy(priv->mreg_cp_tbl); - if (priv->sh) - mlx5_os_free_shared_dr(priv); - if (priv->nl_socket_route >= 0) -@@ -2429,8 +2437,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, - list[ns].info.master = 0; - list[ns].info.representor = 0; - } -- if (list[ns].info.port_name == bd) -- ns++; -+ ns++; - break; - case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: - /* Fallthrough */ -@@ -2993,9 +3000,15 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, +diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_sec.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_sec.c +index b02dac4952..2cb2050faf 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_ethdev_sec.c ++++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_sec.c +@@ -135,8 +135,8 @@ cnxk_nix_inl_custom_meta_pool_cb(uintptr_t pmpool, uintptr_t *mpool, const char + return -EINVAL; + } - if (priv->sh) { - if (priv->q_counters != NULL && -- strcmp(ctr_name, "out_of_buffer") == 0) -+ strcmp(ctr_name, "out_of_buffer") == 0) { -+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { -+ DRV_LOG(WARNING, "Devx out_of_buffer counter is not supported in the secondary process"); -+ rte_errno = ENOTSUP; -+ return 1; -+ } - return mlx5_devx_cmd_queue_counter_query - (priv->q_counters, 0, (uint32_t *)stat); -+ } - MKSTR(path, "%s/ports/%d/hw_counters/%s", - priv->sh->ibdev_path, - priv->dev_port, -diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c -index 3a182de248..8d4a0a3dda 100644 ---- a/dpdk/drivers/net/mlx5/mlx5.c -+++ b/dpdk/drivers/net/mlx5/mlx5.c -@@ -1689,7 +1689,8 @@ mlx5_init_shared_dev_registers(struct mlx5_dev_ctx_shared *sh) - } else { - DRV_LOG(DEBUG, "ASO register: NONE"); - } -- mlx5_init_hws_flow_tags_registers(sh); -+ if (sh->config.dv_flow_en == 2) -+ mlx5_init_hws_flow_tags_registers(sh); +- rte_mempool_free(hp); + plt_free(hp->pool_config); ++ rte_mempool_free(hp); + + *aura_handle = 0; + *mpool = 0; +diff --git a/dpdk/drivers/net/cnxk/cnxk_flow.c b/dpdk/drivers/net/cnxk/cnxk_flow.c +index 08ab75e2bb..be0330fa04 100644 +--- a/dpdk/drivers/net/cnxk/cnxk_flow.c ++++ b/dpdk/drivers/net/cnxk/cnxk_flow.c +@@ -102,15 +102,19 @@ npc_rss_action_validate(struct rte_eth_dev *eth_dev, } - /** -@@ -2267,6 +2268,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) - mlx5_indirect_list_handles_release(dev); - #ifdef HAVE_MLX5_HWS_SUPPORT - flow_hw_destroy_vport_action(dev); -+ /* dr context will be closed after mlx5_os_free_shared_dr. */ - flow_hw_resource_release(dev); - flow_hw_clear_port_info(dev); - #endif -@@ -2279,7 +2281,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) - mlx5_free(priv->rxq_privs); - priv->rxq_privs = NULL; - } -- if (priv->txqs != NULL) { -+ if (priv->txqs != NULL && dev->data->tx_queues != NULL) { - /* XXX race condition if mlx5_tx_burst() is still running. */ - rte_delay_us_sleep(1000); - for (i = 0; (i != priv->txqs_n); ++i) -@@ -2288,16 +2290,20 @@ mlx5_dev_close(struct rte_eth_dev *dev) - priv->txqs = NULL; - } - mlx5_proc_priv_uninit(dev); -+ if (priv->drop_queue.hrxq) -+ mlx5_drop_action_destroy(dev); - if (priv->q_counters) { - mlx5_devx_cmd_destroy(priv->q_counters); - priv->q_counters = NULL; - } -- if (priv->drop_queue.hrxq) -- mlx5_drop_action_destroy(dev); -- if (priv->mreg_cp_tbl) -- mlx5_hlist_destroy(priv->mreg_cp_tbl); - mlx5_mprq_free_mp(dev); - mlx5_os_free_shared_dr(priv); -+#ifdef HAVE_MLX5_HWS_SUPPORT -+ if (priv->dr_ctx) { -+ claim_zero(mlx5dr_context_close(priv->dr_ctx)); -+ priv->dr_ctx = NULL; -+ } -+#endif - if (priv->rss_conf.rss_key != NULL) - mlx5_free(priv->rss_conf.rss_key); - if (priv->reta_idx != NULL) -diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h -index 263ebead7f..0c81bcab9f 100644 ---- a/dpdk/drivers/net/mlx5/mlx5.h -+++ b/dpdk/drivers/net/mlx5/mlx5.h -@@ -263,16 +263,29 @@ struct mlx5_counter_ctrl { - struct mlx5_xstats_ctrl { - /* Number of device stats. */ - uint16_t stats_n; -+ /* Number of device stats, for the 2nd port in bond. */ -+ uint16_t stats_n_2nd; - /* Number of device stats identified by PMD. */ -- uint16_t mlx5_stats_n; -+ uint16_t mlx5_stats_n; -+ /* First device counters index. */ -+ uint16_t dev_cnt_start; - /* Index in the device counters table. */ - uint16_t dev_table_idx[MLX5_MAX_XSTATS]; -+ /* Index in the output table. */ -+ uint16_t xstats_o_idx[MLX5_MAX_XSTATS]; - uint64_t base[MLX5_MAX_XSTATS]; - uint64_t xstats[MLX5_MAX_XSTATS]; - uint64_t hw_stats[MLX5_MAX_XSTATS]; - struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; -+ /* Index in the device counters table, for the 2nd port in bond. */ -+ uint16_t dev_table_idx_2nd[MLX5_MAX_XSTATS]; -+ /* Index in the output table, for the 2nd port in bond. */ -+ uint16_t xstats_o_idx_2nd[MLX5_MAX_XSTATS]; - }; + static void +-npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, +- const struct roc_npc_action *rss_action, +- uint32_t *flowkey_cfg) ++npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, const struct roc_npc_action *rss_action, ++ uint32_t *flowkey_cfg, uint64_t default_rss_types) + { + const struct roc_npc_action_rss *rss; ++ uint64_t rss_types; -+/* xstats array size. */ -+extern const unsigned int xstats_n; + rss = (const struct roc_npc_action_rss *)rss_action->conf; ++ rss_types = rss->types; ++ /* If no RSS types are specified, use default one */ ++ if (rss_types == 0) ++ rss_types = default_rss_types; + +- *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss->types, rss->level); ++ *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss_types, rss->level); + } + + static int +@@ -204,7 +208,8 @@ cnxk_map_actions(struct rte_eth_dev *eth_dev, const struct rte_flow_attr *attr, + goto err_exit; + in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; + in_actions[i].conf = actions->conf; +- npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg); ++ npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, ++ eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); + break; + + case RTE_FLOW_ACTION_TYPE_SECURITY: +@@ -503,6 +508,9 @@ cnxk_flow_get_aged_flows(struct rte_eth_dev *eth_dev, void **context, + + flow_age = &roc_npc->flow_age; + ++ if (!flow_age->age_flow_refcnt) ++ return 0; + - struct mlx5_stats_ctrl { - /* Base for imissed counter. */ - uint64_t imissed_base; -@@ -1473,6 +1486,8 @@ struct mlx5_dev_ctx_shared { - struct mlx5_hlist *flow_tbls; /* SWS flow table. */ - struct mlx5_hlist *groups; /* HWS flow group. */ - }; -+ struct mlx5_hlist *mreg_cp_tbl; -+ /* Hash table of Rx metadata register copy table. */ - struct mlx5_flow_tunnel_hub *tunnel_hub; - /* Direct Rules tables for FDB, NIC TX+RX */ - void *dr_drop_action; /* Pointer to DR drop action, any domain. */ -@@ -1862,11 +1877,7 @@ struct mlx5_priv { - rte_spinlock_t hw_ctrl_lock; - LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows; - LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows; -- struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; -- struct rte_flow_template_table *hw_esw_sq_miss_tbl; -- struct rte_flow_template_table *hw_esw_zero_tbl; -- struct rte_flow_template_table *hw_tx_meta_cpy_tbl; -- struct rte_flow_template_table *hw_lacp_rx_tbl; -+ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; - struct rte_flow_pattern_template *hw_tx_repr_tagging_pt; - struct rte_flow_actions_template *hw_tx_repr_tagging_at; - struct rte_flow_template_table *hw_tx_repr_tagging_tbl; -@@ -1900,8 +1911,6 @@ struct mlx5_priv { - int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ - int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ - struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ -- struct mlx5_hlist *mreg_cp_tbl; -- /* Hash table of Rx metadata register copy table. */ - struct mlx5_mtr_config mtr_config; /* Meter configuration */ - uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */ - struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */ -@@ -1989,6 +1998,30 @@ enum dr_dump_rec_type { - DR_DUMP_REC_TYPE_PMD_COUNTER = 4430, + do { + sn = plt_seqcount_read_begin(&flow_age->seq_cnt); + +diff --git a/dpdk/drivers/net/cnxk/version.map b/dpdk/drivers/net/cnxk/version.map +index 77f574bb16..078456a9ed 100644 +--- a/dpdk/drivers/net/cnxk/version.map ++++ b/dpdk/drivers/net/cnxk/version.map +@@ -16,4 +16,5 @@ EXPERIMENTAL { + INTERNAL { + global: + cnxk_nix_inb_mode_set; ++ cnxk_ethdev_rx_offload_cb_register; }; +diff --git a/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c b/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c +index 8a4e1419b4..56a9a85345 100644 +--- a/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c ++++ b/dpdk/drivers/net/cpfl/cpfl_flow_engine_fxp.c +@@ -95,7 +95,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev, -+#if defined(HAVE_MLX5_HWS_SUPPORT) -+static __rte_always_inline struct mlx5_hw_q_job * -+flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue) -+{ -+ MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size); -+ return priv->hw_q[queue].job_idx ? -+ priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL; -+} -+ -+static __rte_always_inline void -+flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue) -+{ -+ MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size); -+ priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job; -+} -+ -+struct mlx5_hw_q_job * -+mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue, -+ const struct rte_flow_action_handle *handle, -+ void *user_data, void *query_data, -+ enum mlx5_hw_job_type type, -+ struct rte_flow_error *error); -+#endif -+ - /** - * Indicates whether HW objects operations can be created by DevX. - * -@@ -2131,8 +2164,9 @@ int mlx5_get_module_eeprom(struct rte_eth_dev *dev, - struct rte_dev_eeprom_info *info); - int mlx5_os_read_dev_stat(struct mlx5_priv *priv, - const char *ctr_name, uint64_t *stat); --int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats); --int mlx5_os_get_stats_n(struct rte_eth_dev *dev); -+int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats); -+int mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, -+ uint16_t *n_stats, uint16_t *n_stats_sec); - void mlx5_os_stats_init(struct rte_eth_dev *dev); - int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev); + ret = cpfl_rule_process(itf, ad->ctlqp[cpq_id], ad->ctlqp[cpq_id + 1], + rim->rules, rim->rule_num, true); +- if (ret < 0) { ++ if (ret != 0) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "cpfl filter create flow fail"); + rte_free(rim); +@@ -292,6 +292,12 @@ cpfl_fxp_parse_action(struct cpfl_itf *itf, + + is_vsi = (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR || + dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR); ++ /* Added checks to throw an error for the invalid action types. */ ++ if (action_type == RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR && ++ dst_itf->type == CPFL_ITF_TYPE_REPRESENTOR) { ++ PMD_DRV_LOG(ERR, "Cannot use port_representor action for the represented_port"); ++ goto err; ++ } + if (is_vsi) + dev_id = cpfl_get_vsi_id(dst_itf); + else +diff --git a/dpdk/drivers/net/cpfl/cpfl_flow_parser.c b/dpdk/drivers/net/cpfl/cpfl_flow_parser.c +index a8f0488f21..a67c773d18 100644 +--- a/dpdk/drivers/net/cpfl/cpfl_flow_parser.c ++++ b/dpdk/drivers/net/cpfl/cpfl_flow_parser.c +@@ -198,6 +198,8 @@ cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields, + for (i = 0; i < len; i++) { + json_t *object; + const char *name, *mask; ++ uint32_t mask_32b = 0; ++ int ret; -@@ -2394,11 +2428,12 @@ int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh); - int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh); - void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh, - enum mlx5_access_aso_opc_mod aso_opc_mod); --int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue, -- struct mlx5_aso_mtr *mtr, struct mlx5_mtr_bulk *bulk, -- void *user_data, bool push); --int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue, -- struct mlx5_aso_mtr *mtr); -+int mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue, -+ struct mlx5_aso_mtr *mtr, -+ struct mlx5_mtr_bulk *bulk, -+ struct mlx5_hw_q_job *job, bool push); -+int mlx5_aso_mtr_wait(struct mlx5_priv *priv, -+ struct mlx5_aso_mtr *mtr, bool is_tmpl_api); - int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue, - struct mlx5_aso_ct_action *ct, - const struct rte_flow_action_conntrack *profile, -diff --git a/dpdk/drivers/net/mlx5/mlx5_devx.c b/dpdk/drivers/net/mlx5/mlx5_devx.c -index 9fa400fc48..4f08ddf899 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_devx.c -+++ b/dpdk/drivers/net/mlx5/mlx5_devx.c -@@ -592,7 +592,8 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) - DRV_LOG(ERR, "Failed to create CQ."); - goto error; - } -- rxq_data->delay_drop = priv->config.std_delay_drop; -+ if (!rxq_data->shared || !rxq_ctrl->started) -+ rxq_data->delay_drop = priv->config.std_delay_drop; - /* Create RQ using DevX API. */ - ret = mlx5_rxq_create_devx_rq_resources(rxq); - if (ret) { -diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c -index ab30e2c215..ec4bdd8af1 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c -+++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c -@@ -146,6 +146,12 @@ mlx5_dev_configure(struct rte_eth_dev *dev) - ret = mlx5_proc_priv_init(dev); - if (ret) - return ret; -+ ret = mlx5_dev_set_mtu(dev, dev->data->mtu); -+ if (ret) { -+ DRV_LOG(ERR, "port %u failed to set MTU to %u", dev->data->port_id, -+ dev->data->mtu); -+ return ret; -+ } + object = json_array_get(ob_fields, i); + name = cpfl_json_t_to_string(object, "name"); +@@ -213,20 +215,28 @@ cpfl_flow_js_pattern_key_proto_field(json_t *ob_fields, + + if (js_field->type == RTE_FLOW_ITEM_TYPE_ETH || + js_field->type == RTE_FLOW_ITEM_TYPE_IPV4) { +- mask = cpfl_json_t_to_string(object, "mask"); +- if (!mask) { +- PMD_DRV_LOG(ERR, "Can not parse string 'mask'."); +- goto err; +- } +- if (strlen(mask) > CPFL_JS_STR_SIZE - 1) { +- PMD_DRV_LOG(ERR, "The 'mask' is too long."); +- goto err; ++ /* Added a check for parsing mask value of the next_proto_id field. */ ++ if (strcmp(name, "next_proto_id") == 0) { ++ ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b); ++ if (ret < 0) { ++ PMD_DRV_LOG(ERR, "Cannot parse uint32 'mask'."); ++ goto err; ++ } ++ js_field->fields[i].mask_32b = mask_32b; ++ } else { ++ mask = cpfl_json_t_to_string(object, "mask"); ++ if (!mask) { ++ PMD_DRV_LOG(ERR, "Can not parse string 'mask'."); ++ goto err; ++ } ++ if (rte_strscpy(js_field->fields[i].mask, ++ mask, CPFL_JS_STR_SIZE) < 0) { ++ PMD_DRV_LOG(ERR, "The 'mask' is too long."); ++ goto err; ++ } + } +- strncpy(js_field->fields[i].mask, mask, CPFL_JS_STR_SIZE - 1); +- } else { +- uint32_t mask_32b; +- int ret; + ++ } else { + ret = cpfl_json_t_to_uint32(object, "mask", &mask_32b); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Can not parse uint32 'mask'."); +@@ -737,7 +747,6 @@ cpfl_flow_js_mr_layout(json_t *ob_layouts, struct cpfl_flow_js_mr_action_mod *js return 0; + + err: +- rte_free(js_mod->layout); + return -EINVAL; } -diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c -index 85e8c77c81..fdc7c3ea54 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_flow.c -+++ b/dpdk/drivers/net/mlx5/mlx5_flow.c -@@ -1953,18 +1953,20 @@ mlx5_flow_rxq_dynf_set(struct rte_eth_dev *dev) - if (rxq == NULL || rxq->ctrl == NULL) +@@ -1696,7 +1705,7 @@ cpfl_parse_check_prog_action(struct cpfl_flow_js_mr_key_action *key_act, + bool check_name; + + check_name = key_act->prog.has_name ? strcmp(prog->name, key_act->prog.name) == 0 +- : atol(prog->name) == key_act->prog.id; ++ : (uint32_t)atol(prog->name) == key_act->prog.id; + if (!check_name) { + PMD_DRV_LOG(ERR, "Not support this prog type: %s.", prog->name); + return -EINVAL; +@@ -2020,7 +2029,7 @@ cpfl_metadata_write_port_id(struct cpfl_itf *itf) + + dev_id = cpfl_get_port_id(itf); + if (dev_id == CPFL_INVALID_HW_ID) { +- PMD_DRV_LOG(ERR, "fail to get hw ID\n"); ++ PMD_DRV_LOG(ERR, "fail to get hw ID"); + return false; + } + cpfl_metadata_write16(&itf->adapter->meta, type, offset, dev_id << 3); +diff --git a/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c b/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c +index 0e710a007b..42553c9641 100644 +--- a/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c ++++ b/dpdk/drivers/net/cpfl/cpfl_fxp_rule.c +@@ -77,7 +77,7 @@ cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_m + + if (ret && ret != CPFL_ERR_CTLQ_NO_WORK && ret != CPFL_ERR_CTLQ_ERROR && + ret != CPFL_ERR_CTLQ_EMPTY) { +- PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret); ++ PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x", ret); + retries++; continue; - data = &rxq->ctrl->rxq; -- if (!rte_flow_dynf_metadata_avail()) { -- data->dynf_meta = 0; -- data->flow_meta_mask = 0; -- data->flow_meta_offset = -1; -- data->flow_meta_port_mask = 0; -- } else { -- data->dynf_meta = 1; -- data->flow_meta_mask = rte_flow_dynf_metadata_mask; -- data->flow_meta_offset = rte_flow_dynf_metadata_offs; -- data->flow_meta_port_mask = priv->sh->dv_meta_mask; -+ if (!data->shared || !rxq->ctrl->started) { -+ if (!rte_flow_dynf_metadata_avail()) { -+ data->dynf_meta = 0; -+ data->flow_meta_mask = 0; -+ data->flow_meta_offset = -1; -+ data->flow_meta_port_mask = 0; -+ } else { -+ data->dynf_meta = 1; -+ data->flow_meta_mask = rte_flow_dynf_metadata_mask; -+ data->flow_meta_offset = rte_flow_dynf_metadata_offs; -+ data->flow_meta_port_mask = priv->sh->dv_meta_mask; + } +@@ -92,6 +92,14 @@ cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_m + + /* TODO - process rx controlq message */ + for (i = 0; i < num_q_msg; i++) { ++ ret = q_msg[i].status; ++ if (ret != CPFL_CFG_PKT_ERR_OK && ++ q_msg[i].opcode != cpfl_ctlq_sem_query_del_rule_hash_addr) { ++ PMD_INIT_LOG(ERR, "Failed to process rx_ctrlq msg: %s", ++ cpfl_cfg_pkt_errormsg[ret]); ++ return ret; + } -+ data->mark_flag = mark_flag; ++ + if (q_msg[i].data_len > 0) + dma = q_msg[i].ctx.indirect.payload; + else +@@ -100,7 +108,7 @@ cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_m + buff_cnt = dma ? 1 : 0; + ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma); + if (ret) +- PMD_INIT_LOG(WARNING, "could not posted recv bufs\n"); ++ PMD_INIT_LOG(WARNING, "could not posted recv bufs"); } -- data->mark_flag = mark_flag; + break; } - } +@@ -123,7 +131,7 @@ cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, -@@ -2504,7 +2506,7 @@ int - flow_validate_modify_field_level(const struct rte_flow_action_modify_data *data, - struct rte_flow_error *error) - { -- if (data->level == 0) -+ if (data->level == 0 || data->field == RTE_FLOW_FIELD_FLEX_ITEM) - return 0; - if (data->field != RTE_FLOW_FIELD_TAG && - data->field != (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) -@@ -5228,8 +5230,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, - }; + /* prepare rule blob */ + if (!dma->va) { +- PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); ++ PMD_INIT_LOG(ERR, "dma mem passed to %s is null", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; +@@ -168,7 +176,7 @@ cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma, + uint16_t cfg_ctrl; - /* Check if already registered. */ -- MLX5_ASSERT(priv->mreg_cp_tbl); -- entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx); -+ MLX5_ASSERT(priv->sh->mreg_cp_tbl); -+ entry = mlx5_hlist_register(priv->sh->mreg_cp_tbl, mark_id, &ctx); - if (!entry) - return NULL; - return container_of(entry, struct mlx5_flow_mreg_copy_resource, -@@ -5268,10 +5270,10 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev, - return; - mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], - flow->rix_mreg_copy); -- if (!mcp_res || !priv->mreg_cp_tbl) -+ if (!mcp_res || !priv->sh->mreg_cp_tbl) - return; - MLX5_ASSERT(mcp_res->rix_flow); -- mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent); -+ mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, &mcp_res->hlist_ent); - flow->rix_mreg_copy = 0; - } + if (!dma->va) { +- PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__); ++ PMD_INIT_LOG(ERR, "dma mem passed to %s is null", __func__); + return -1; + } + blob = (union cpfl_rule_cfg_pkt_record *)dma->va; +diff --git a/dpdk/drivers/net/cpfl/cpfl_rules.h b/dpdk/drivers/net/cpfl/cpfl_rules.h +index d23eae8e91..10569b1fdc 100644 +--- a/dpdk/drivers/net/cpfl/cpfl_rules.h ++++ b/dpdk/drivers/net/cpfl/cpfl_rules.h +@@ -62,6 +62,17 @@ enum cpfl_cfg_pkt_error_code { + CPFL_CFG_PKT_ERR_EMAXCOL = 9 /* Max Hash Collision */ + }; -@@ -5293,14 +5295,14 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) - uint32_t mark_id; ++static const char * const cpfl_cfg_pkt_errormsg[] = { ++ [CPFL_CFG_PKT_ERR_ESRCH] = "Bad opcode", ++ [CPFL_CFG_PKT_ERR_EEXIST] = "The rule conflicts with already existed one", ++ [CPFL_CFG_PKT_ERR_ENOSPC] = "No space left in the table", ++ [CPFL_CFG_PKT_ERR_ERANGE] = "Parameter out of range", ++ [CPFL_CFG_PKT_ERR_ESBCOMP] = "Completion error", ++ [CPFL_CFG_PKT_ERR_ENOPIN] = "Entry cannot be pinned in cache", ++ [CPFL_CFG_PKT_ERR_ENOTFND] = "Entry does not exist", ++ [CPFL_CFG_PKT_ERR_EMAXCOL] = "Maximum Hash Collisions reached", ++}; ++ + /* macros for creating context for rule descriptor */ + #define MEV_RULE_VSI_ID_S 0 + #define MEV_RULE_VSI_ID_M \ +diff --git a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +index 8cc3d9f257..781f48cfac 100644 +--- a/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c ++++ b/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c +@@ -211,9 +211,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + unsigned int i, work_done, budget = 32; + struct link_config *lc = &pi->link_cfg; + struct adapter *adapter = pi->adapter; +- struct rte_eth_link new_link = { 0 }; + u8 old_link = pi->link_cfg.link_ok; + struct sge *s = &adapter->sge; ++ struct rte_eth_link new_link; - /* Check if default flow is registered. */ -- if (!priv->mreg_cp_tbl) -+ if (!priv->sh->mreg_cp_tbl) - return; - mark_id = MLX5_DEFAULT_COPY_ID; - ctx.data = &mark_id; -- entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx); -+ entry = mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx); - if (!entry) - return; -- mlx5_hlist_unregister(priv->mreg_cp_tbl, entry); -+ mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, entry); - } + for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { + if (!s->fw_evtq.desc) +@@ -232,6 +232,7 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, + rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS); + } - /** -@@ -5338,7 +5340,7 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, - */ - mark_id = MLX5_DEFAULT_COPY_ID; - ctx.data = &mark_id; -- if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx)) -+ if (mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx)) - return 0; - mcp_res = flow_mreg_add_copy_action(dev, mark_id, error); - if (!mcp_res) -@@ -5492,6 +5494,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, - } - break; - case RTE_FLOW_ACTION_TYPE_COUNT: -+ case RTE_FLOW_ACTION_TYPE_AGE: - if (encap) { - rte_memcpy(actions_tx, actions, - sizeof(struct rte_flow_action)); -@@ -5817,8 +5820,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, - struct mlx5_rte_flow_item_tag *tag_item_spec; - struct mlx5_rte_flow_item_tag *tag_item_mask; - uint32_t tag_id = 0; -- struct rte_flow_item *vlan_item_dst = NULL; -- const struct rte_flow_item *vlan_item_src = NULL; -+ bool vlan_actions; -+ struct rte_flow_item *orig_sfx_items = sfx_items; - const struct rte_flow_item *orig_items = items; - struct rte_flow_action *hw_mtr_action; - struct rte_flow_action *action_pre_head = NULL; -@@ -5835,6 +5838,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, ++ memset(&new_link, 0, sizeof(new_link)); + new_link.link_status = cxgbe_force_linkup(adapter) ? + RTE_ETH_LINK_UP : pi->link_cfg.link_ok; + new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0; +diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c +index ef4c06db6a..6fdbe80334 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c ++++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include - /* Prepare the suffix subflow items. */ - tag_item = sfx_items++; -+ tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG; - for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { - int item_type = items->type; + #include + #include +@@ -165,9 +166,15 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE; + uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; ++ struct fman_if *fif = dev->process_private; -@@ -5857,10 +5861,13 @@ flow_meter_split_prep(struct rte_eth_dev *dev, - sfx_items++; - break; - case RTE_FLOW_ITEM_TYPE_VLAN: -- /* Determine if copy vlan item below. */ -- vlan_item_src = items; -- vlan_item_dst = sfx_items++; -- vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID; -+ /* -+ * Copy VLAN items in case VLAN actions are performed. -+ * If there are no VLAN actions, these items will be VOID. -+ */ -+ memcpy(sfx_items, items, sizeof(*sfx_items)); -+ sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN; -+ sfx_items++; - break; - default: - break; -@@ -5877,6 +5884,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, - tag_action = actions_pre++; - } - /* Prepare the actions for prefix and suffix flow. */ -+ vlan_actions = false; - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - struct rte_flow_action *action_cur = NULL; + PMD_INIT_FUNC_TRACE(); -@@ -5907,16 +5915,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, - break; - case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: - case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: -- if (vlan_item_dst && vlan_item_src) { -- memcpy(vlan_item_dst, vlan_item_src, -- sizeof(*vlan_item_dst)); -- /* -- * Convert to internal match item, it is used -- * for vlan push and set vid. -- */ -- vlan_item_dst->type = (enum rte_flow_item_type) -- MLX5_RTE_FLOW_ITEM_TYPE_VLAN; -- } -+ vlan_actions = true; - break; - case RTE_FLOW_ACTION_TYPE_COUNT: - if (fm->def_policy) -@@ -5931,6 +5930,14 @@ flow_meter_split_prep(struct rte_eth_dev *dev, - actions_sfx++ : actions_pre++; - memcpy(action_cur, actions, sizeof(struct rte_flow_action)); - } -+ /* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */ -+ if (!vlan_actions) { -+ struct rte_flow_item *it = orig_sfx_items; -+ -+ for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++) -+ if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) -+ it->type = RTE_FLOW_ITEM_TYPE_VOID; ++ if (fif->is_shared_mac) { ++ DPAA_PMD_ERR("Cannot configure mtu from DPDK in VSP mode."); ++ return -ENOTSUP; + } - /* Add end action to the actions. */ - actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; - if (priv->sh->meter_aso_en) { -@@ -6020,8 +6027,6 @@ flow_meter_split_prep(struct rte_eth_dev *dev, - tag_action->type = (enum rte_flow_action_type) - MLX5_RTE_FLOW_ACTION_TYPE_TAG; - tag_action->conf = set_tag; -- tag_item->type = (enum rte_flow_item_type) -- MLX5_RTE_FLOW_ITEM_TYPE_TAG; - tag_item->spec = tag_item_spec; - tag_item->last = NULL; - tag_item->mask = tag_item_mask; -@@ -6849,6 +6854,19 @@ flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev, - &drop_split_info, error); - } ++ + /* + * Refuse mtu that requires the support of scattered packets + * when this feature has not been enabled before. +@@ -206,7 +213,8 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) + struct rte_intr_handle *intr_handle; + uint32_t max_rx_pktlen; + int speed, duplex; +- int ret, rx_status; ++ int ret, rx_status, socket_fd; ++ struct ifreq ifr; -+static int -+flow_count_vlan_items(const struct rte_flow_item items[]) -+{ -+ int items_n = 0; + PMD_INIT_FUNC_TRACE(); + +@@ -222,6 +230,26 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) + dpaa_intf->name); + return -EHOSTDOWN; + } + -+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { -+ if (items->type == RTE_FLOW_ITEM_TYPE_VLAN || -+ items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) -+ items_n++; -+ } -+ return items_n; -+} ++ socket_fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); ++ if (socket_fd == -1) { ++ DPAA_PMD_ERR("Cannot open IF socket"); ++ return -errno; ++ } + - /** - * The splitting for meter feature. - * -@@ -6904,6 +6922,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, - size_t act_size; - size_t item_size; - int actions_n = 0; -+ int vlan_items_n = 0; - int ret = 0; - - if (priv->mtr_en) -@@ -6963,9 +6982,11 @@ flow_create_split_meter(struct rte_eth_dev *dev, - act_size = (sizeof(struct rte_flow_action) * - (actions_n + METER_PREFIX_ACTION)) + - sizeof(struct mlx5_rte_flow_action_set_tag); -- /* Suffix items: tag, vlan, port id, end. */ --#define METER_SUFFIX_ITEM 4 -- item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + -+ /* Flow can have multiple VLAN items. Account for them in suffix items. */ -+ vlan_items_n = flow_count_vlan_items(items); -+ /* Suffix items: tag, [vlans], port id, end. */ -+#define METER_SUFFIX_ITEM 3 -+ item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) + - sizeof(struct mlx5_rte_flow_item_tag) * 2; - sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), - 0, SOCKET_ID_ANY); -diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h -index 6dde9de688..bde7dc43a8 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_flow.h -+++ b/dpdk/drivers/net/mlx5/mlx5_flow.h -@@ -77,7 +77,7 @@ enum mlx5_indirect_type { - /* Now, the maximal ports will be supported is 16, action number is 32M. */ - #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10 - --#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22 -+#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25 - #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1) - - /* 29-31: type, 25-28: owner port, 0-24: index */ -@@ -1759,6 +1759,28 @@ flow_hw_get_reg_id_from_ctx(void *dr_ctx, - return REG_NON; - } - -+static __rte_always_inline int -+flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val) -+{ -+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) -+ uint32_t port; -+ -+ MLX5_ETH_FOREACH_DEV(port, NULL) { -+ struct mlx5_priv *priv; -+ priv = rte_eth_devices[port].data->dev_private; ++ strncpy(ifr.ifr_name, dpaa_intf->name, IFNAMSIZ - 1); + -+ if (priv->dr_ctx == dr_ctx) { -+ *port_val = port; -+ return 0; ++ if (ioctl(socket_fd, SIOCGIFMTU, &ifr) < 0) { ++ DPAA_PMD_ERR("Cannot get interface mtu"); ++ close(socket_fd); ++ return -errno; + } -+ } -+#else -+ RTE_SET_USED(dr_ctx); -+ RTE_SET_USED(port_val); -+#endif -+ return -EINVAL; -+} + - void flow_hw_set_port_info(struct rte_eth_dev *dev); - void flow_hw_clear_port_info(struct rte_eth_dev *dev); - int flow_hw_create_vport_action(struct rte_eth_dev *dev); -@@ -2446,6 +2468,25 @@ struct mlx5_flow_hw_ctrl_rx { - [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; - }; - -+/* Contains all templates required for control flow rules in FDB with HWS. */ -+struct mlx5_flow_hw_ctrl_fdb { -+ struct rte_flow_pattern_template *esw_mgr_items_tmpl; -+ struct rte_flow_actions_template *regc_jump_actions_tmpl; -+ struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; -+ struct rte_flow_pattern_template *regc_sq_items_tmpl; -+ struct rte_flow_actions_template *port_actions_tmpl; -+ struct rte_flow_template_table *hw_esw_sq_miss_tbl; -+ struct rte_flow_pattern_template *port_items_tmpl; -+ struct rte_flow_actions_template *jump_one_actions_tmpl; -+ struct rte_flow_template_table *hw_esw_zero_tbl; -+ struct rte_flow_pattern_template *tx_meta_items_tmpl; -+ struct rte_flow_actions_template *tx_meta_actions_tmpl; -+ struct rte_flow_template_table *hw_tx_meta_cpy_tbl; -+ struct rte_flow_pattern_template *lacp_rx_items_tmpl; -+ struct rte_flow_actions_template *lacp_rx_actions_tmpl; -+ struct rte_flow_template_table *hw_lacp_rx_tbl; -+}; ++ close(socket_fd); ++ DPAA_PMD_INFO("Using kernel configured mtu size(%u)", ++ ifr.ifr_mtu); + - #define MLX5_CTRL_PROMISCUOUS (RTE_BIT32(0)) - #define MLX5_CTRL_ALL_MULTICAST (RTE_BIT32(1)) - #define MLX5_CTRL_BROADCAST (RTE_BIT32(2)) -diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c -index f311443472..ab9eb21e01 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c -+++ b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c -@@ -792,7 +792,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh, - struct mlx5_aso_mtr *aso_mtr, - struct mlx5_mtr_bulk *bulk, - bool need_lock, -- void *user_data, -+ struct mlx5_hw_q_job *job, - bool push) - { - volatile struct mlx5_aso_wqe *wqe = NULL; -@@ -819,7 +819,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh, - rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]); - /* Fill next WQE. */ - fm = &aso_mtr->fm; -- sq->elts[sq->head & mask].mtr = user_data ? user_data : aso_mtr; -+ sq->elts[sq->head & mask].user_data = job ? job : (void *)aso_mtr; - if (aso_mtr->type == ASO_METER_INDIRECT) { - if (likely(sh->config.dv_flow_en == 2)) - pool = aso_mtr->pool; -@@ -897,24 +897,6 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh, - return 1; - } ++ eth_conf->rxmode.mtu = ifr.ifr_mtu; + } --static void --mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums) --{ -- uint16_t size = 1 << sq->log_desc_n; -- uint16_t mask = size - 1; -- uint16_t i; -- struct mlx5_aso_mtr *aso_mtr = NULL; -- uint8_t exp_state = ASO_METER_WAIT; -- -- for (i = 0; i < aso_mtrs_nums; ++i) { -- aso_mtr = sq->elts[(sq->tail + i) & mask].mtr; -- MLX5_ASSERT(aso_mtr); -- (void)__atomic_compare_exchange_n(&aso_mtr->state, -- &exp_state, ASO_METER_READY, -- false, __ATOMIC_RELAXED, __ATOMIC_RELAXED); -- } --} -- - static void - mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) - { -@@ -925,7 +907,7 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) - uint32_t idx; - uint32_t next_idx = cq->cq_ci & mask; - uint16_t max; -- uint16_t n = 0; -+ uint16_t i, n = 0; + /* Rx offloads which are enabled by default */ +@@ -249,7 +277,8 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev) + max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; + } + +- fman_if_set_maxfrm(dev->process_private, max_rx_pktlen); ++ if (!fif->is_shared_mac) ++ fman_if_set_maxfrm(dev->process_private, max_rx_pktlen); + + if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { + DPAA_PMD_DEBUG("enabling scatter mode"); +@@ -363,7 +392,8 @@ dpaa_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, +- RTE_PTYPE_TUNNEL_ESP ++ RTE_PTYPE_TUNNEL_ESP, ++ RTE_PTYPE_UNKNOWN + }; + + PMD_INIT_FUNC_TRACE(); +@@ -941,7 +971,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + struct fman_if *fif = dev->process_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; + struct qm_mcc_initfq opts = {0}; +- u32 flags = 0; ++ u32 ch_id, flags = 0; int ret; + u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; + uint32_t max_rx_pktlen; +@@ -1065,7 +1095,9 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + DPAA_IF_RX_CONTEXT_STASH; + + /*Create a channel and associate given queue with the channel*/ +- qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); ++ qman_alloc_pool_range(&ch_id, 1, 1, 0); ++ rxq->ch_id = (u16)ch_id; ++ + opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; + opts.fqd.dest.channel = rxq->ch_id; + opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; +diff --git a/dpdk/drivers/net/dpaa/dpaa_rxtx.c b/dpdk/drivers/net/dpaa/dpaa_rxtx.c +index ce4f3d6c85..018d55bbdc 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_rxtx.c ++++ b/dpdk/drivers/net/dpaa/dpaa_rxtx.c +@@ -1034,7 +1034,7 @@ reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf) + /* Copy the data */ + data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy); + +- rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf, ++ rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(temp_mbuf, + void *, offset1), bytes_to_copy); + + /* Set new offsets and the temp buffers */ +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +index 8e610b6bba..873121524f 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +@@ -728,7 +728,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, + + total_nb_rx_desc += nb_rx_desc; + if (total_nb_rx_desc > MAX_NB_RX_DESC) { +- DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers", ++ DPAA2_PMD_WARN("Total nb_rx_desc exceeds %d limit. Please use Normal buffers", + MAX_NB_RX_DESC); + DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script"); + } +@@ -1063,7 +1063,7 @@ dpaa2_dev_rx_queue_count(void *rx_queue) + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return -EINVAL; + } +@@ -1390,8 +1390,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev) + for (i = 0; i < MAX_TCS; i++) + rte_free((void *)(size_t)priv->extract.tc_extract_param[i]); - if (need_lock) -@@ -957,7 +939,19 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) - cq->cq_ci++; - } while (1); - if (likely(n)) { -- mlx5_aso_mtrs_status_update(sq, n); -+ uint8_t exp_state = ASO_METER_WAIT; -+ struct mlx5_aso_mtr *aso_mtr; -+ __rte_unused bool verdict; -+ -+ for (i = 0; i < n; ++i) { -+ aso_mtr = sq->elts[(sq->tail + i) & mask].mtr; -+ MLX5_ASSERT(aso_mtr); -+ verdict = __atomic_compare_exchange_n(&aso_mtr->state, -+ &exp_state, ASO_METER_READY, -+ false, __ATOMIC_RELAXED, -+ __ATOMIC_RELAXED); -+ MLX5_ASSERT(verdict); -+ } - sq->tail += n; - rte_io_wmb(); - cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); -@@ -966,6 +960,82 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) - rte_spinlock_unlock(&sq->sqsl); - } +- if (priv->extract.qos_extract_param) +- rte_free((void *)(size_t)priv->extract.qos_extract_param); ++ rte_free((void *)(size_t)priv->extract.qos_extract_param); -+static __rte_always_inline struct mlx5_aso_sq * -+mlx5_aso_mtr_select_sq(struct mlx5_dev_ctx_shared *sh, uint32_t queue, -+ struct mlx5_aso_mtr *mtr, bool *need_lock) -+{ -+ struct mlx5_aso_sq *sq; -+ -+ if (likely(sh->config.dv_flow_en == 2) && -+ mtr->type == ASO_METER_INDIRECT) { -+ if (queue == MLX5_HW_INV_QUEUE) { -+ sq = &mtr->pool->sq[mtr->pool->nb_sq - 1]; -+ *need_lock = true; -+ } else { -+ sq = &mtr->pool->sq[queue]; -+ *need_lock = false; -+ } -+ } else { -+ sq = &sh->mtrmng->pools_mng.sq; -+ *need_lock = true; -+ } -+ return sq; -+} -+ -+#if defined(HAVE_MLX5_HWS_SUPPORT) -+static void -+mlx5_aso_poll_cq_mtr_hws(struct mlx5_priv *priv, struct mlx5_aso_sq *sq) -+{ -+#define MLX5_HWS_MTR_CMPL_NUM 4 -+ -+ int i, ret; -+ struct mlx5_aso_mtr *mtr; -+ uint8_t exp_state = ASO_METER_WAIT; -+ struct rte_flow_op_result res[MLX5_HWS_MTR_CMPL_NUM]; -+ __rte_unused bool verdict; -+ -+ rte_spinlock_lock(&sq->sqsl); -+repeat: -+ ret = mlx5_aso_pull_completion(sq, res, MLX5_HWS_MTR_CMPL_NUM); -+ if (ret) { -+ for (i = 0; i < ret; i++) { -+ struct mlx5_hw_q_job *job = res[i].user_data; -+ -+ MLX5_ASSERT(job); -+ mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, -+ MLX5_INDIRECT_ACTION_IDX_GET(job->action)); -+ MLX5_ASSERT(mtr); -+ verdict = __atomic_compare_exchange_n(&mtr->state, -+ &exp_state, ASO_METER_READY, -+ false, __ATOMIC_RELAXED, -+ __ATOMIC_RELAXED); -+ MLX5_ASSERT(verdict); -+ flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv)); -+ } -+ if (ret == MLX5_HWS_MTR_CMPL_NUM) -+ goto repeat; -+ } -+ rte_spinlock_unlock(&sq->sqsl); -+ -+#undef MLX5_HWS_MTR_CMPL_NUM -+} -+#else -+static void -+mlx5_aso_poll_cq_mtr_hws(__rte_unused struct mlx5_priv *priv, __rte_unused struct mlx5_aso_sq *sq) -+{ -+ MLX5_ASSERT(false); -+} -+#endif -+ -+static void -+mlx5_aso_poll_cq_mtr_sws(__rte_unused struct mlx5_priv *priv, -+ struct mlx5_aso_sq *sq) -+{ -+ mlx5_aso_mtr_completion_handle(sq, true); -+} -+ -+typedef void (*poll_cq_t)(struct mlx5_priv *, struct mlx5_aso_sq *); -+ - /** - * Update meter parameter by send WQE. - * -@@ -980,39 +1050,29 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ - int --mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue, -- struct mlx5_aso_mtr *mtr, -- struct mlx5_mtr_bulk *bulk, -- void *user_data, -- bool push) -+mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue, -+ struct mlx5_aso_mtr *mtr, -+ struct mlx5_mtr_bulk *bulk, -+ struct mlx5_hw_q_job *job, bool push) - { -- struct mlx5_aso_sq *sq; -- uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES; - bool need_lock; -+ struct mlx5_dev_ctx_shared *sh = priv->sh; -+ struct mlx5_aso_sq *sq = -+ mlx5_aso_mtr_select_sq(sh, queue, mtr, &need_lock); -+ uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES; -+ poll_cq_t poll_mtr_cq = -+ job ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws; - int ret; + DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name); + return 0; +@@ -1933,7 +1932,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, + if (ret == -1) + DPAA2_PMD_DEBUG("No change in status"); + else +- DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, ++ DPAA2_PMD_INFO("Port %d Link is %s", dev->data->port_id, + link.link_status ? "Up" : "Down"); -- if (likely(sh->config.dv_flow_en == 2) && -- mtr->type == ASO_METER_INDIRECT) { -- if (queue == MLX5_HW_INV_QUEUE) { -- sq = &mtr->pool->sq[mtr->pool->nb_sq - 1]; -- need_lock = true; -- } else { -- sq = &mtr->pool->sq[queue]; -- need_lock = false; -- } -- } else { -- sq = &sh->mtrmng->pools_mng.sq; -- need_lock = true; -- } - if (queue != MLX5_HW_INV_QUEUE) { - ret = mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk, -- need_lock, user_data, push); -+ need_lock, job, push); - return ret > 0 ? 0 : -1; + return ret; +@@ -2307,7 +2306,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, + dpaa2_ethq->tc_index, flow_id, + OPR_OPT_CREATE, &ocfg, 0); + if (ret) { +- DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret); ++ DPAA2_PMD_ERR("Error setting opr: ret: %d", ret); + return ret; + } + +@@ -2423,7 +2422,7 @@ rte_pmd_dpaa2_thread_init(void) + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return; + } +@@ -2838,7 +2837,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) + WRIOP_SS_INITIALIZER(priv); + ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); + if (ret < 0) { +- DPAA2_PMD_ERR(" Error(%d) in loading softparser\n", ++ DPAA2_PMD_ERR(" Error(%d) in loading softparser", + ret); + return ret; + } +@@ -2846,7 +2845,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) + ret = dpaa2_eth_enable_wriop_soft_parser(priv, + DPNI_SS_INGRESS); + if (ret < 0) { +- DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n", ++ DPAA2_PMD_ERR(" Error(%d) in enabling softparser", + ret); + return ret; + } +@@ -2929,7 +2928,7 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, + DPAA2_MAX_SGS * sizeof(struct qbman_sge), + rte_socket_id()); + if (dpaa2_tx_sg_pool == NULL) { +- DPAA2_PMD_ERR("SG pool creation failed\n"); ++ DPAA2_PMD_ERR("SG pool creation failed"); + return -ENOMEM; + } + } +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_flow.c b/dpdk/drivers/net/dpaa2/dpaa2_flow.c +index eec7e60650..e590f6f748 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_flow.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_flow.c +@@ -3360,7 +3360,7 @@ dpaa2_flow_verify_action( + rxq = priv->rx_vq[rss_conf->queue[i]]; + if (rxq->tc_index != attr->group) { + DPAA2_PMD_ERR( +- "Queue/Group combination are not supported\n"); ++ "Queue/Group combination are not supported"); + return -ENOTSUP; + } + } +@@ -3601,7 +3601,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow, + priv->token, &qos_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( +- "RSS QoS table can not be configured(%d)\n", ++ "RSS QoS table can not be configured(%d)", + ret); + return -1; + } +@@ -3718,14 +3718,14 @@ dpaa2_generic_flow_set(struct rte_flow *flow, + &priv->extract.tc_key_extract[flow->tc_id].dpkg); + if (ret < 0) { + DPAA2_PMD_ERR( +- "unable to set flow distribution.please check queue config\n"); ++ "unable to set flow distribution.please check queue config"); + return ret; + } + + /* Allocate DMA'ble memory to write the rules */ + param = (size_t)rte_malloc(NULL, 256, 64); + if (!param) { +- DPAA2_PMD_ERR("Memory allocation failure\n"); ++ DPAA2_PMD_ERR("Memory allocation failure"); + return -1; + } + +@@ -3747,7 +3747,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow, + priv->token, &tc_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( +- "RSS TC table cannot be configured: %d\n", ++ "RSS TC table cannot be configured: %d", + ret); + rte_free((void *)param); + return -1; +@@ -3772,7 +3772,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow, + priv->token, &qos_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( +- "RSS QoS dist can't be configured-%d\n", ++ "RSS QoS dist can't be configured-%d", + ret); + return -1; + } +@@ -3841,20 +3841,20 @@ dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr, + int ret = 0; + + if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) { +- DPAA2_PMD_ERR("Priority group is out of range\n"); ++ DPAA2_PMD_ERR("Priority group is out of range"); + ret = -ENOTSUP; + } + if (unlikely(attr->priority >= dpni_attr->fs_entries)) { +- DPAA2_PMD_ERR("Priority within the group is out of range\n"); ++ DPAA2_PMD_ERR("Priority within the group is out of range"); + ret = -ENOTSUP; + } + if (unlikely(attr->egress)) { + DPAA2_PMD_ERR( +- "Flow configuration is not supported on egress side\n"); ++ "Flow configuration is not supported on egress side"); + ret = -ENOTSUP; + } + if (unlikely(!attr->ingress)) { +- DPAA2_PMD_ERR("Ingress flag must be configured\n"); ++ DPAA2_PMD_ERR("Ingress flag must be configured"); + ret = -EINVAL; + } + return ret; +@@ -3933,7 +3933,7 @@ int dpaa2_flow_validate(struct rte_eth_dev *dev, + ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr); + if (ret < 0) { + DPAA2_PMD_ERR( +- "Failure to get dpni@%p attribute, err code %d\n", ++ "Failure to get dpni@%p attribute, err code %d", + dpni, ret); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ATTR, +@@ -3945,7 +3945,7 @@ int dpaa2_flow_validate(struct rte_eth_dev *dev, + ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr); + if (ret < 0) { + DPAA2_PMD_ERR( +- "Invalid attributes are given\n"); ++ "Invalid attributes are given"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ATTR, + flow_attr, "invalid"); +@@ -3955,7 +3955,7 @@ int dpaa2_flow_validate(struct rte_eth_dev *dev, + ret = dpaa2_dev_verify_patterns(pattern); + if (ret < 0) { + DPAA2_PMD_ERR( +- "Invalid pattern list is given\n"); ++ "Invalid pattern list is given"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, "invalid"); +@@ -3965,7 +3965,7 @@ int dpaa2_flow_validate(struct rte_eth_dev *dev, + ret = dpaa2_dev_verify_actions(actions); + if (ret < 0) { + DPAA2_PMD_ERR( +- "Invalid action list is given\n"); ++ "Invalid action list is given"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "invalid"); +@@ -4012,13 +4012,13 @@ struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev, + key_iova = (size_t)rte_zmalloc(NULL, 256, 64); + if (!key_iova) { + DPAA2_PMD_ERR( +- "Memory allocation failure for rule configuration\n"); ++ "Memory allocation failure for rule configuration"); + goto mem_failure; + } + mask_iova = (size_t)rte_zmalloc(NULL, 256, 64); + if (!mask_iova) { + DPAA2_PMD_ERR( +- "Memory allocation failure for rule configuration\n"); ++ "Memory allocation failure for rule configuration"); + goto mem_failure; + } + +@@ -4029,13 +4029,13 @@ struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev, + key_iova = (size_t)rte_zmalloc(NULL, 256, 64); + if (!key_iova) { + DPAA2_PMD_ERR( +- "Memory allocation failure for rule configuration\n"); ++ "Memory allocation failure for rule configuration"); + goto mem_failure; + } + mask_iova = (size_t)rte_zmalloc(NULL, 256, 64); + if (!mask_iova) { + DPAA2_PMD_ERR( +- "Memory allocation failure for rule configuration\n"); ++ "Memory allocation failure for rule configuration"); + goto mem_failure; + } + +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_mux.c b/dpdk/drivers/net/dpaa2/dpaa2_mux.c +index 2ff1a98fda..7dd5a60966 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_mux.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_mux.c +@@ -88,7 +88,7 @@ rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, + (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE); + if (!flow) { + DPAA2_PMD_ERR( +- "Memory allocation failure for rule configuration\n"); ++ "Memory allocation failure for rule configuration"); + goto creation_error; + } + key_iova = (void *)((size_t)flow + sizeof(struct rte_flow)); +@@ -211,7 +211,7 @@ rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, + + vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf); + if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) { +- DPAA2_PMD_ERR("Invalid destination id\n"); ++ DPAA2_PMD_ERR("Invalid destination id"); + goto creation_error; + } + dpdmux_action.dest_if = vf_conf->id; +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_recycle.c b/dpdk/drivers/net/dpaa2/dpaa2_recycle.c +index fbfdf360d1..4fde9b95a0 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_recycle.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_recycle.c +@@ -423,7 +423,7 @@ ls_mac_serdes_lpbk_support(uint16_t mac_id, + + sd_idx = ls_serdes_cfg_to_idx(sd_cfg, sd_id); + if (sd_idx < 0) { +- DPAA2_PMD_ERR("Serdes protocol(0x%02x) does not exist\n", ++ DPAA2_PMD_ERR("Serdes protocol(0x%02x) does not exist", + sd_cfg); + return false; + } +@@ -552,7 +552,7 @@ ls_serdes_eth_lpbk(uint16_t mac_id, int en) + (serdes_id - LSX_SERDES_1) * 0x10000, + sizeof(struct ccsr_ls_serdes) / 64 * 64 + 64); + if (!serdes_base) { +- DPAA2_PMD_ERR("Serdes register map failed\n"); ++ DPAA2_PMD_ERR("Serdes register map failed"); + return -ENOMEM; } - do { -- mlx5_aso_mtr_completion_handle(sq, need_lock); -+ poll_mtr_cq(priv, sq); - if (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk, -- need_lock, NULL, true)) -+ need_lock, job, true)) - return 0; - /* Waiting for wqe resource. */ - rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY); -@@ -1036,32 +1096,22 @@ mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue, - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ - int --mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue, -- struct mlx5_aso_mtr *mtr) -+mlx5_aso_mtr_wait(struct mlx5_priv *priv, -+ struct mlx5_aso_mtr *mtr, bool is_tmpl_api) - { -+ bool need_lock; - struct mlx5_aso_sq *sq; -+ struct mlx5_dev_ctx_shared *sh = priv->sh; - uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES; -- uint8_t state; -- bool need_lock; -+ uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED); -+ poll_cq_t poll_mtr_cq = -+ is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws; -- if (likely(sh->config.dv_flow_en == 2) && -- mtr->type == ASO_METER_INDIRECT) { -- if (queue == MLX5_HW_INV_QUEUE) { -- sq = &mtr->pool->sq[mtr->pool->nb_sq - 1]; -- need_lock = true; -- } else { -- sq = &mtr->pool->sq[queue]; -- need_lock = false; -- } -- } else { -- sq = &sh->mtrmng->pools_mng.sq; -- need_lock = true; -- } -- state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED); - if (state == ASO_METER_READY || state == ASO_METER_WAIT_ASYNC) - return 0; -+ sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock); - do { -- mlx5_aso_mtr_completion_handle(sq, need_lock); -+ poll_mtr_cq(priv, sq); - if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) == - ASO_METER_READY) +@@ -587,7 +587,7 @@ lx_serdes_eth_lpbk(uint16_t mac_id, int en) + (serdes_id - LSX_SERDES_1) * 0x10000, + sizeof(struct ccsr_lx_serdes) / 64 * 64 + 64); + if (!serdes_base) { +- DPAA2_PMD_ERR("Serdes register map failed\n"); ++ DPAA2_PMD_ERR("Serdes register map failed"); + return -ENOMEM; + } + +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c +index 23f7c4132d..b64232b88f 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c +@@ -640,7 +640,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q) + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { +- DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n", ++ DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d", + rte_gettid()); + return; + } +@@ -691,7 +691,7 @@ dump_err_pkts(struct dpaa2_queue *dpaa2_q) + hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE); + fas = hw_annot_addr; + +- DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:" ++ DPAA2_PMD_ERR("[%d] error packet on port[%d]:" + " fd_off: %d, fd_err: %x, fas_status: %x", + rte_lcore_id(), eth_data->port_id, + DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd), +@@ -976,7 +976,7 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); return 0; -diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -index 115d730317..863737ceba 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -+++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -@@ -267,21 +267,41 @@ struct field_modify_info modify_tcp[] = { - {0, 0, 0}, - }; + } +@@ -1107,7 +1107,7 @@ uint16_t dpaa2_dev_tx_conf(void *queue) + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -1256,7 +1256,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -1573,7 +1573,7 @@ dpaa2_dev_tx_multi_txq_ordered(void **queue, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -1747,7 +1747,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_sparser.c b/dpdk/drivers/net/dpaa2/dpaa2_sparser.c +index 63463c4fbf..eb649fb063 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_sparser.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_sparser.c +@@ -165,7 +165,7 @@ int dpaa2_eth_load_wriop_soft_parser(struct dpaa2_dev_priv *priv, + + addr = rte_malloc(NULL, sp_param.size, 64); + if (!addr) { +- DPAA2_PMD_ERR("Memory unavailable for soft parser param\n"); ++ DPAA2_PMD_ERR("Memory unavailable for soft parser param"); + return -1; + } --static void -+enum mlx5_l3_tunnel_detection { -+ l3_tunnel_none, -+ l3_tunnel_outer, -+ l3_tunnel_inner -+}; -+ -+static enum mlx5_l3_tunnel_detection - mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, -- uint8_t next_protocol, uint64_t *item_flags, -- int *tunnel) -+ uint8_t next_protocol, uint64_t item_flags, -+ uint64_t *l3_tunnel_flag) - { -+ enum mlx5_l3_tunnel_detection td = l3_tunnel_none; -+ - MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 || - item->type == RTE_FLOW_ITEM_TYPE_IPV6); -- if (next_protocol == IPPROTO_IPIP) { -- *item_flags |= MLX5_FLOW_LAYER_IPIP; -- *tunnel = 1; -- } -- if (next_protocol == IPPROTO_IPV6) { -- *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP; -- *tunnel = 1; -+ if ((item_flags & MLX5_FLOW_LAYER_OUTER_L3) == 0) { -+ switch (next_protocol) { -+ case IPPROTO_IPIP: -+ td = l3_tunnel_outer; -+ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPIP; -+ break; -+ case IPPROTO_IPV6: -+ td = l3_tunnel_outer; -+ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP; -+ break; -+ default: -+ break; -+ } -+ } else { -+ td = l3_tunnel_inner; -+ *l3_tunnel_flag = item->type == RTE_FLOW_ITEM_TYPE_IPV4 ? -+ MLX5_FLOW_LAYER_IPIP : -+ MLX5_FLOW_LAYER_IPV6_ENCAP; +@@ -174,7 +174,7 @@ int dpaa2_eth_load_wriop_soft_parser(struct dpaa2_dev_priv *priv, + + ret = dpni_load_sw_sequence(dpni, CMD_PRI_LOW, priv->token, &cfg); + if (ret) { +- DPAA2_PMD_ERR("dpni_load_sw_sequence failed\n"); ++ DPAA2_PMD_ERR("dpni_load_sw_sequence failed"); + rte_free(addr); + return ret; } -+ return td; - } +@@ -214,7 +214,7 @@ int dpaa2_eth_enable_wriop_soft_parser(struct dpaa2_dev_priv *priv, + if (cfg.param_size) { + param_addr = rte_malloc(NULL, cfg.param_size, 64); + if (!param_addr) { +- DPAA2_PMD_ERR("Memory unavailable for soft parser param\n"); ++ DPAA2_PMD_ERR("Memory unavailable for soft parser param"); + return -1; + } - static inline struct mlx5_hlist * -@@ -1925,7 +1945,7 @@ mlx5_flow_field_id_to_modify_info - if (priv->sh->config.dv_flow_en == 2) - reg = flow_hw_get_reg_id(dev, - RTE_FLOW_ITEM_TYPE_TAG, -- data->level); -+ tag_index); - else - reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, - tag_index, error); -@@ -5484,13 +5504,6 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, - &grp_info, error); - if (ret) +@@ -227,7 +227,7 @@ int dpaa2_eth_enable_wriop_soft_parser(struct dpaa2_dev_priv *priv, + + ret = dpni_enable_sw_sequence(dpni, CMD_PRI_LOW, priv->token, &cfg); + if (ret) { +- DPAA2_PMD_ERR("dpni_enable_sw_sequence failed for dpni%d\n", ++ DPAA2_PMD_ERR("dpni_enable_sw_sequence failed for dpni%d", + priv->hw_id); + rte_free(param_addr); return ret; -- if (attributes->group == target_group && -- !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET | -- MLX5_FLOW_ACTION_TUNNEL_MATCH))) -- return rte_flow_error_set(error, EINVAL, -- RTE_FLOW_ERROR_TYPE_ACTION, NULL, -- "target group must be other than" -- " the current flow group"); - if (table == 0) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, -@@ -5952,7 +5965,7 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx) - "cannot allocate resource memory"); - return NULL; +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_tm.c b/dpdk/drivers/net/dpaa2/dpaa2_tm.c +index 8fe5bfa013..c4efdf0af8 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_tm.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_tm.c +@@ -584,7 +584,7 @@ dpaa2_tm_configure_queue(struct rte_eth_dev *dev, struct dpaa2_tm_node *node) + return -1; } -- rte_memcpy(&entry->ft_type, -+ rte_memcpy(RTE_PTR_ADD(entry, offsetof(typeof(*entry), ft_type)), - RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), - key_len + data_len); - if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) -@@ -7062,11 +7075,13 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, - } - static int --validate_integrity_bits(const struct rte_flow_item_integrity *mask, -+validate_integrity_bits(const void *arg, - int64_t pattern_flags, uint64_t l3_flags, - uint64_t l4_flags, uint64_t ip4_flag, - struct rte_flow_error *error) - { -+ const struct rte_flow_item_integrity *mask = arg; +- DPAA2_PMD_DEBUG("tc_id = %d, channel = %d\n\n", tc_id, ++ DPAA2_PMD_DEBUG("tc_id = %d, channel = %d", tc_id, + node->parent->channel_id); + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, + ((node->parent->channel_id << 8) | tc_id), +@@ -653,7 +653,7 @@ dpaa2_tm_sort_and_configure(struct rte_eth_dev *dev, + int i; + + if (n == 1) { +- DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n", ++ DPAA2_PMD_DEBUG("node id = %d, priority = %d, index = %d", + nodes[n - 1]->id, nodes[n - 1]->priority, + n - 1); + dpaa2_tm_configure_queue(dev, nodes[n - 1]); +@@ -669,7 +669,7 @@ dpaa2_tm_sort_and_configure(struct rte_eth_dev *dev, + } + dpaa2_tm_sort_and_configure(dev, nodes, n - 1); + +- DPAA2_PMD_DEBUG("node id = %d\n, priority = %d, index = %d\n", ++ DPAA2_PMD_DEBUG("node id = %d, priority = %d, index = %d", + nodes[n - 1]->id, nodes[n - 1]->priority, + n - 1); + dpaa2_tm_configure_queue(dev, nodes[n - 1]); +@@ -684,6 +684,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int ret, t; ++ bool conf_schedule = false; + + /* Populate TCs */ + LIST_FOREACH(channel_node, &priv->nodes, next) { +@@ -709,7 +710,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + } + } + if (i > 0) { +- DPAA2_PMD_DEBUG("Configure queues\n"); ++ DPAA2_PMD_DEBUG("Configure queues"); + dpaa2_tm_sort_and_configure(dev, nodes, i); + } + } +@@ -733,13 +734,13 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + node->profile->params.peak.rate / (1024 * 1024); + /* root node */ + if (node->parent == NULL) { +- DPAA2_PMD_DEBUG("LNI S.rate = %u, burst =%u\n", ++ DPAA2_PMD_DEBUG("LNI S.rate = %u, burst =%u", + tx_cr_shaper.rate_limit, + tx_cr_shaper.max_burst_size); + param = 0x2; + param |= node->profile->params.pkt_length_adjust << 16; + } else { +- DPAA2_PMD_DEBUG("Channel = %d S.rate = %u\n", ++ DPAA2_PMD_DEBUG("Channel = %d S.rate = %u", + node->channel_id, + tx_cr_shaper.rate_limit); + param = (node->channel_id << 8); +@@ -757,7 +758,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + } + + LIST_FOREACH(channel_node, &priv->nodes, next) { +- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC]; ++ int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues]; + struct dpni_tx_priorities_cfg prio_cfg; + + memset(&prio_cfg, 0, sizeof(prio_cfg)); +@@ -767,6 +768,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + if (channel_node->level_id != CHANNEL_LEVEL) + continue; + ++ conf_schedule = false; + LIST_FOREACH(leaf_node, &priv->nodes, next) { + struct dpaa2_queue *leaf_dpaa2_q; + uint8_t leaf_tc_id; +@@ -789,6 +791,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + if (leaf_node->parent != channel_node) + continue; + ++ conf_schedule = true; + leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id]; + leaf_tc_id = leaf_dpaa2_q->tc_index; + /* Process sibling leaf nodes */ +@@ -829,8 +832,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + goto out; + } + is_wfq_grp = 1; +- conf[temp_leaf_node->id] = 1; + } ++ conf[temp_leaf_node->id] = 1; + } + if (is_wfq_grp) { + if (wfq_grp == 0) { +@@ -851,6 +854,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + } + conf[leaf_node->id] = 1; + } ++ if (!conf_schedule) ++ continue; + - if (mask->l3_ok && !(pattern_flags & l3_flags)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, -@@ -7255,6 +7270,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev, + if (wfq_grp > 1) { + prio_cfg.separate_groups = 1; + if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) { +@@ -864,6 +870,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + + prio_cfg.prio_group_A = 1; + prio_cfg.channel_idx = channel_node->channel_id; ++ DPAA2_PMD_DEBUG("########################################"); ++ DPAA2_PMD_DEBUG("Channel idx = %d", prio_cfg.channel_idx); ++ for (t = 0; t < DPNI_MAX_TC; t++) ++ DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d", t, ++ prio_cfg.tc_sched[t].mode, ++ prio_cfg.tc_sched[t].delta_bandwidth); ++ ++ DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps" ++ " = %d", prio_cfg.prio_group_A, ++ prio_cfg.prio_group_B, prio_cfg.separate_groups); + ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg); + if (ret) { + ret = -rte_tm_error_set(error, EINVAL, +@@ -871,15 +887,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + "Scheduling Failed\n"); + goto out; + } +- DPAA2_PMD_DEBUG("########################################\n"); +- DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx); +- for (t = 0; t < DPNI_MAX_TC; t++) { +- DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode); +- DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth); +- } +- DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A); +- DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B); +- DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups); + } return 0; + +diff --git a/dpdk/drivers/net/e1000/base/e1000_82575.c b/dpdk/drivers/net/e1000/base/e1000_82575.c +index 7c78649393..53900cf8f1 100644 +--- a/dpdk/drivers/net/e1000/base/e1000_82575.c ++++ b/dpdk/drivers/net/e1000/base/e1000_82575.c +@@ -1722,6 +1722,7 @@ STATIC s32 e1000_get_media_type_82575(struct e1000_hw *hw) + break; + } + /* Fall through for I2C based SGMII */ ++ /* Fall through */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ + ret_val = e1000_set_sfp_media_type_82575(hw); +diff --git a/dpdk/drivers/net/e1000/base/e1000_api.c b/dpdk/drivers/net/e1000/base/e1000_api.c +index 0f6e5afa3b..6697b4b64f 100644 +--- a/dpdk/drivers/net/e1000/base/e1000_api.c ++++ b/dpdk/drivers/net/e1000/base/e1000_api.c +@@ -295,6 +295,7 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) + case E1000_DEV_ID_PCH_RPL_I219_LM23: + case E1000_DEV_ID_PCH_RPL_I219_V23: + mac->type = e1000_pch_tgp; ++ break; + case E1000_DEV_ID_PCH_ADL_I219_LM17: + case E1000_DEV_ID_PCH_ADL_I219_V17: + case E1000_DEV_ID_PCH_RPL_I219_LM22: +diff --git a/dpdk/drivers/net/e1000/base/e1000_base.c b/dpdk/drivers/net/e1000/base/e1000_base.c +index ab73e1e59e..3ec32e7240 100644 +--- a/dpdk/drivers/net/e1000/base/e1000_base.c ++++ b/dpdk/drivers/net/e1000/base/e1000_base.c +@@ -107,7 +107,7 @@ void e1000_power_down_phy_copper_base(struct e1000_hw *hw) + return; + + /* If the management interface is not enabled, then power down */ +- if (phy->ops.check_reset_block(hw)) ++ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); } -+static __rte_always_inline uint8_t -+mlx5_flow_l3_next_protocol(const struct rte_flow_item *l3_item, -+ enum MLX5_SET_MATCHER key_type) -+{ -+#define MLX5_L3_NEXT_PROTOCOL(i, ms) \ -+ ((i)->type == RTE_FLOW_ITEM_TYPE_IPV4 ? \ -+ ((const struct rte_flow_item_ipv4 *)(i)->ms)->hdr.next_proto_id : \ -+ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6 ? \ -+ ((const struct rte_flow_item_ipv6 *)(i)->ms)->hdr.proto : \ -+ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT ? \ -+ ((const struct rte_flow_item_ipv6_frag_ext *)(i)->ms)->hdr.next_header :\ -+ 0xff) -+ -+ uint8_t next_protocol; +diff --git a/dpdk/drivers/net/e1000/base/meson.build b/dpdk/drivers/net/e1000/base/meson.build +index 528a33f958..5a7a87f8a7 100644 +--- a/dpdk/drivers/net/e1000/base/meson.build ++++ b/dpdk/drivers/net/e1000/base/meson.build +@@ -23,8 +23,7 @@ sources = [ + ] + + error_cflags = ['-Wno-uninitialized', '-Wno-unused-parameter', +- '-Wno-unused-variable', '-Wno-misleading-indentation', +- '-Wno-implicit-fallthrough'] ++ '-Wno-unused-variable', '-Wno-misleading-indentation'] + c_args = cflags + foreach flag: error_cflags + if cc.has_argument(flag) +diff --git a/dpdk/drivers/net/e1000/em_ethdev.c b/dpdk/drivers/net/e1000/em_ethdev.c +index c5a4dec693..f6875b0762 100644 +--- a/dpdk/drivers/net/e1000/em_ethdev.c ++++ b/dpdk/drivers/net/e1000/em_ethdev.c +@@ -1136,6 +1136,9 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) + struct rte_eth_link link; + int link_up, count; + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return -1; + -+ if (l3_item->mask != NULL && l3_item->spec != NULL) { -+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask); -+ if (next_protocol) -+ next_protocol &= MLX5_L3_NEXT_PROTOCOL(l3_item, spec); -+ else -+ next_protocol = 0xff; -+ } else if (key_type == MLX5_SET_MATCHER_HS_M && l3_item->mask != NULL) { -+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask); -+ } else if (key_type == MLX5_SET_MATCHER_HS_V && l3_item->spec != NULL) { -+ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec); -+ } else { -+ /* Reset for inner layer. */ -+ next_protocol = 0xff; -+ } -+ return next_protocol; + link_up = 0; + hw->mac.get_link_status = 1; + +diff --git a/dpdk/drivers/net/e1000/igb_ethdev.c b/dpdk/drivers/net/e1000/igb_ethdev.c +index 8858f975f8..222e359ed9 100644 +--- a/dpdk/drivers/net/e1000/igb_ethdev.c ++++ b/dpdk/drivers/net/e1000/igb_ethdev.c +@@ -3857,11 +3857,11 @@ igb_delete_2tuple_filter(struct rte_eth_dev *dev, + + filter_info->twotuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); +- rte_free(filter); + + E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); ++ rte_free(filter); + return 0; + } + +@@ -4298,7 +4298,6 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, + + filter_info->fivetuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); +- rte_free(filter); + + E1000_WRITE_REG(hw, E1000_FTQF(filter->index), + E1000_FTQF_VF_BP | E1000_FTQF_MASK); +@@ -4307,6 +4306,7 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, + E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); ++ rte_free(filter); + return 0; + } + +@@ -5053,7 +5053,7 @@ eth_igb_get_module_info(struct rte_eth_dev *dev, + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " +- "type to the driver maintainers.\n"); ++ "type to the driver maintainers."); + page_swap = true; + } + +diff --git a/dpdk/drivers/net/ena/base/ena_com.c b/dpdk/drivers/net/ena/base/ena_com.c +index 6953a1fa33..2f438597e6 100644 +--- a/dpdk/drivers/net/ena/base/ena_com.c ++++ b/dpdk/drivers/net/ena/base/ena_com.c +@@ -34,6 +34,8 @@ + + #define ENA_REGS_ADMIN_INTR_MASK 1 + ++#define ENA_MAX_BACKOFF_DELAY_EXP 16U + -+#undef MLX5_L3_NEXT_PROTOCOL -+} + #define ENA_MIN_ADMIN_POLL_US 100 + + #define ENA_MAX_ADMIN_POLL_US 5000 +@@ -177,6 +179,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, + static void comp_ctxt_release(struct ena_com_admin_queue *queue, + struct ena_comp_ctx *comp_ctx) + { ++ comp_ctx->user_cqe = NULL; + comp_ctx->occupied = false; + ATOMIC32_DEC(&queue->outstanding_cmds); + } +@@ -470,6 +473,9 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a + return; + } + ++ if (!comp_ctx->occupied) ++ return; + - /** - * Validate IB BTH item. - * -@@ -7451,6 +7500,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - return ret; - is_root = (uint64_t)ret; - for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { -+ enum mlx5_l3_tunnel_detection l3_tunnel_detection; -+ uint64_t l3_tunnel_flag; - int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); - int type = items->type; + comp_ctx->status = ENA_CMD_COMPLETED; + comp_ctx->comp_status = cqe->acq_common_descriptor.status; -@@ -7528,8 +7579,16 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - vlan_m = items->mask; - break; - case RTE_FLOW_ITEM_TYPE_IPV4: -- mlx5_flow_tunnel_ip_check(items, next_protocol, -- &item_flags, &tunnel); -+ next_protocol = mlx5_flow_l3_next_protocol -+ (items, (enum MLX5_SET_MATCHER)-1); -+ l3_tunnel_detection = -+ mlx5_flow_tunnel_ip_check(items, next_protocol, -+ item_flags, -+ &l3_tunnel_flag); -+ if (l3_tunnel_detection == l3_tunnel_inner) { -+ item_flags |= l3_tunnel_flag; -+ tunnel = 1; -+ } - ret = flow_dv_validate_item_ipv4(dev, items, item_flags, - last_item, ether_type, - error); -@@ -7537,23 +7596,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - return ret; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : - MLX5_FLOW_LAYER_OUTER_L3_IPV4; -- if (items->mask != NULL && -- ((const struct rte_flow_item_ipv4 *) -- items->mask)->hdr.next_proto_id) { -- next_protocol = -- ((const struct rte_flow_item_ipv4 *) -- (items->spec))->hdr.next_proto_id; -- next_protocol &= -- ((const struct rte_flow_item_ipv4 *) -- (items->mask))->hdr.next_proto_id; -- } else { -- /* Reset for inner layer. */ -- next_protocol = 0xff; -- } -+ if (l3_tunnel_detection == l3_tunnel_outer) -+ item_flags |= l3_tunnel_flag; - break; - case RTE_FLOW_ITEM_TYPE_IPV6: -- mlx5_flow_tunnel_ip_check(items, next_protocol, -- &item_flags, &tunnel); -+ next_protocol = mlx5_flow_l3_next_protocol -+ (items, (enum MLX5_SET_MATCHER)-1); -+ l3_tunnel_detection = -+ mlx5_flow_tunnel_ip_check(items, next_protocol, -+ item_flags, -+ &l3_tunnel_flag); -+ if (l3_tunnel_detection == l3_tunnel_inner) { -+ item_flags |= l3_tunnel_flag; -+ tunnel = 1; -+ } - ret = mlx5_flow_validate_item_ipv6(items, item_flags, - last_item, - ether_type, -@@ -7563,22 +7619,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - return ret; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : - MLX5_FLOW_LAYER_OUTER_L3_IPV6; -- if (items->mask != NULL && -- ((const struct rte_flow_item_ipv6 *) -- items->mask)->hdr.proto) { -- item_ipv6_proto = -- ((const struct rte_flow_item_ipv6 *) -- items->spec)->hdr.proto; -- next_protocol = -- ((const struct rte_flow_item_ipv6 *) -- items->spec)->hdr.proto; -- next_protocol &= -- ((const struct rte_flow_item_ipv6 *) -- items->mask)->hdr.proto; -- } else { -- /* Reset for inner layer. */ -- next_protocol = 0xff; -- } -+ if (l3_tunnel_detection == l3_tunnel_outer) -+ item_flags |= l3_tunnel_flag; - break; - case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: - ret = flow_dv_validate_item_ipv6_frag_ext(items, -@@ -7589,19 +7631,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, - last_item = tunnel ? - MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : - MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; -- if (items->mask != NULL && -- ((const struct rte_flow_item_ipv6_frag_ext *) -- items->mask)->hdr.next_header) { -- next_protocol = -- ((const struct rte_flow_item_ipv6_frag_ext *) -- items->spec)->hdr.next_header; -- next_protocol &= -- ((const struct rte_flow_item_ipv6_frag_ext *) -- items->mask)->hdr.next_header; -- } else { -- /* Reset for inner layer. */ -- next_protocol = 0xff; -- } -+ next_protocol = mlx5_flow_l3_next_protocol -+ (items, (enum MLX5_SET_MATCHER)-1); - break; - case RTE_FLOW_ITEM_TYPE_TCP: - ret = mlx5_flow_validate_item_tcp -@@ -9985,14 +10016,13 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, +@@ -545,8 +551,9 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, + + static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) { - const struct rte_flow_item_geneve_opt *geneve_opt_m; - const struct rte_flow_item_geneve_opt *geneve_opt_v; -- const struct rte_flow_item_geneve_opt *geneve_opt_vv = item->spec; -- void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); -+ const struct rte_flow_item_geneve_opt *orig_spec = item->spec; - void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); - rte_be32_t opt_data_key = 0, opt_data_mask = 0; -- uint32_t *data; -+ size_t option_byte_len; - int ret = 0; ++ exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp); + delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us); +- delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); ++ delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp)); + ENA_USLEEP(delay_us); + } -- if (MLX5_ITEM_VALID(item, key_type)) -+ if (MLX5_ITEM_VALID(item, key_type) || !orig_spec) - return -1; - MLX5_ITEM_UPDATE(item, key_type, geneve_opt_v, geneve_opt_m, - &rte_flow_item_geneve_opt_mask); -@@ -10005,36 +10035,15 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, - return ret; - } - } -- /* -- * Set the option length in GENEVE header if not requested. -- * The GENEVE TLV option length is expressed by the option length field -- * in the GENEVE header. -- * If the option length was not requested but the GENEVE TLV option item -- * is present we set the option length field implicitly. -- */ -- if (!MLX5_GET16(fte_match_set_misc, misc_v, geneve_opt_len)) { -- if (key_type & MLX5_SET_MATCHER_M) -- MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, -- MLX5_GENEVE_OPTLEN_MASK); -- else -- MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, -- geneve_opt_v->option_len + 1); -- } -- /* Set the data. */ -- if (key_type == MLX5_SET_MATCHER_SW_V) -- data = geneve_opt_vv->data; -- else -- data = geneve_opt_v->data; -- if (data) { -- memcpy(&opt_data_key, data, -- RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), -- sizeof(opt_data_key))); -- memcpy(&opt_data_mask, geneve_opt_m->data, -- RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), -- sizeof(opt_data_mask))); -+ /* Convert the option length from DW to bytes for using memcpy. */ -+ option_byte_len = RTE_MIN((size_t)(orig_spec->option_len * 4), -+ sizeof(rte_be32_t)); -+ if (geneve_opt_v->data) { -+ memcpy(&opt_data_key, geneve_opt_v->data, option_byte_len); -+ memcpy(&opt_data_mask, geneve_opt_m->data, option_byte_len); - MLX5_SET(fte_match_set_misc3, misc3_v, -- geneve_tlv_option_0_data, -- rte_be_to_cpu_32(opt_data_key & opt_data_mask)); -+ geneve_tlv_option_0_data, -+ rte_be_to_cpu_32(opt_data_key & opt_data_mask)); - } - return ret; - } -@@ -13658,6 +13667,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev, - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "Connection is not supported"); -+ if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) { -+ rte_flow_error_set(error, EINVAL, -+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -+ "CT supports port indexes up to " -+ RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT)); -+ return 0; +@@ -3134,16 +3141,18 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + int ena_com_allocate_customer_metrics_buffer(struct ena_com_dev *ena_dev) + { + struct ena_customer_metrics *customer_metrics = &ena_dev->customer_metrics; ++ customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE; ++ customer_metrics->buffer_virt_addr = NULL; + + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + customer_metrics->buffer_len, + customer_metrics->buffer_virt_addr, + customer_metrics->buffer_dma_addr, + customer_metrics->buffer_dma_handle); +- if (unlikely(customer_metrics->buffer_virt_addr == NULL)) ++ if (unlikely(customer_metrics->buffer_virt_addr == NULL)) { ++ customer_metrics->buffer_len = 0; + return ENA_COM_NO_MEM; +- +- customer_metrics->buffer_len = ENA_CUSTOMER_METRICS_BUFFER_SIZE; + } - idx = flow_dv_aso_ct_alloc(dev, error); - if (!idx) - return rte_flow_error_set(error, rte_errno, -@@ -13707,6 +13723,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev, - int tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL); - int item_type = items->type; - uint64_t last_item = wks->last_item; -+ enum mlx5_l3_tunnel_detection l3_tunnel_detection; -+ uint64_t l3_tunnel_flag; - int ret; - switch (item_type) { -@@ -13750,94 +13768,47 @@ flow_dv_translate_items(struct rte_eth_dev *dev, - MLX5_FLOW_LAYER_OUTER_VLAN); - break; - case RTE_FLOW_ITEM_TYPE_IPV4: -- mlx5_flow_tunnel_ip_check(items, next_protocol, -- &wks->item_flags, &tunnel); -+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); -+ l3_tunnel_detection = -+ mlx5_flow_tunnel_ip_check(items, next_protocol, -+ wks->item_flags, -+ &l3_tunnel_flag); -+ if (l3_tunnel_detection == l3_tunnel_inner) { -+ wks->item_flags |= l3_tunnel_flag; -+ tunnel = 1; -+ } - flow_dv_translate_item_ipv4(key, items, tunnel, - wks->group, key_type); - wks->priority = MLX5_PRIORITY_MAP_L3; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : - MLX5_FLOW_LAYER_OUTER_L3_IPV4; -- if (items->mask != NULL && -- items->spec != NULL && -- ((const struct rte_flow_item_ipv4 *) -- items->mask)->hdr.next_proto_id) { -- next_protocol = -- ((const struct rte_flow_item_ipv4 *) -- (items->spec))->hdr.next_proto_id; -- next_protocol &= -- ((const struct rte_flow_item_ipv4 *) -- (items->mask))->hdr.next_proto_id; -- } else if (key_type == MLX5_SET_MATCHER_HS_M && -- items->mask != NULL) { -- next_protocol = ((const struct rte_flow_item_ipv4 *) -- (items->mask))->hdr.next_proto_id; -- } else if (key_type == MLX5_SET_MATCHER_HS_V && -- items->spec != NULL) { -- next_protocol = ((const struct rte_flow_item_ipv4 *) -- (items->spec))->hdr.next_proto_id; -- } else { -- /* Reset for inner layer. */ -- next_protocol = 0xff; -- } -+ if (l3_tunnel_detection == l3_tunnel_outer) -+ wks->item_flags |= l3_tunnel_flag; - break; - case RTE_FLOW_ITEM_TYPE_IPV6: -- mlx5_flow_tunnel_ip_check(items, next_protocol, -- &wks->item_flags, &tunnel); -+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); -+ l3_tunnel_detection = -+ mlx5_flow_tunnel_ip_check(items, next_protocol, -+ wks->item_flags, -+ &l3_tunnel_flag); -+ if (l3_tunnel_detection == l3_tunnel_inner) { -+ wks->item_flags |= l3_tunnel_flag; -+ tunnel = 1; -+ } - flow_dv_translate_item_ipv6(key, items, tunnel, - wks->group, key_type); - wks->priority = MLX5_PRIORITY_MAP_L3; - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : - MLX5_FLOW_LAYER_OUTER_L3_IPV6; -- if (items->mask != NULL && -- items->spec != NULL && -- ((const struct rte_flow_item_ipv6 *) -- items->mask)->hdr.proto) { -- next_protocol = -- ((const struct rte_flow_item_ipv6 *) -- items->spec)->hdr.proto; -- next_protocol &= -- ((const struct rte_flow_item_ipv6 *) -- items->mask)->hdr.proto; -- } else if (key_type == MLX5_SET_MATCHER_HS_M && -- items->mask != NULL) { -- next_protocol = ((const struct rte_flow_item_ipv6 *) -- (items->mask))->hdr.proto; -- } else if (key_type == MLX5_SET_MATCHER_HS_V && -- items->spec != NULL) { -- next_protocol = ((const struct rte_flow_item_ipv6 *) -- (items->spec))->hdr.proto; -- } else { -- /* Reset for inner layer. */ -- next_protocol = 0xff; -- } -+ if (l3_tunnel_detection == l3_tunnel_outer) -+ wks->item_flags |= l3_tunnel_flag; - break; - case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: - flow_dv_translate_item_ipv6_frag_ext - (key, items, tunnel, key_type); - last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : - MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; -- if (items->mask != NULL && -- items->spec != NULL && -- ((const struct rte_flow_item_ipv6_frag_ext *) -- items->mask)->hdr.next_header) { -- next_protocol = -- ((const struct rte_flow_item_ipv6_frag_ext *) -- items->spec)->hdr.next_header; -- next_protocol &= -- ((const struct rte_flow_item_ipv6_frag_ext *) -- items->mask)->hdr.next_header; -- } else if (key_type == MLX5_SET_MATCHER_HS_M && -- items->mask != NULL) { -- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *) -- (items->mask))->hdr.next_header; -- } else if (key_type == MLX5_SET_MATCHER_HS_V && -- items->spec != NULL) { -- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *) -- (items->spec))->hdr.next_header; -- } else { -- /* Reset for inner layer. */ -- next_protocol = 0xff; -- } -+ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); - break; - case RTE_FLOW_ITEM_TYPE_TCP: - flow_dv_translate_item_tcp(key, items, tunnel, key_type); -@@ -14280,7 +14251,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, - * Avoid be overwritten by other sub mlx5_flows. - */ - if (wks.geneve_tlv_option) -- dev_flow->flow->geneve_tlv_option = wks.geneve_tlv_option; -+ dev_flow->flow->geneve_tlv_option += wks.geneve_tlv_option; return 0; } +diff --git a/dpdk/drivers/net/ena/base/ena_plat_dpdk.h b/dpdk/drivers/net/ena/base/ena_plat_dpdk.h +index 665ac2f0cc..ba4a525898 100644 +--- a/dpdk/drivers/net/ena/base/ena_plat_dpdk.h ++++ b/dpdk/drivers/net/ena/base/ena_plat_dpdk.h +@@ -26,7 +26,6 @@ + #include -@@ -15420,7 +15391,8 @@ error: - SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, - handle_idx, dh, next) { - /* hrxq is union, don't clear it if the flag is not set. */ -- if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { -+ if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq && -+ !dh->dvh.rix_sample && !dh->dvh.rix_dest_array) { - mlx5_hrxq_release(dev, dh->rix_hrxq); - dh->rix_hrxq = 0; - } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { -@@ -15884,9 +15856,9 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) - flow_dv_aso_ct_release(dev, flow->ct, NULL); - else if (flow->age) - flow_dv_aso_age_release(dev, flow->age); -- if (flow->geneve_tlv_option) { -+ while (flow->geneve_tlv_option) { - flow_dev_geneve_tlv_option_resource_release(priv->sh); -- flow->geneve_tlv_option = 0; -+ flow->geneve_tlv_option--; - } - while (flow->dev_handles) { - uint32_t tmp_idx = flow->dev_handles; -@@ -16350,6 +16322,8 @@ flow_dv_action_create(struct rte_eth_dev *dev, - case RTE_FLOW_ACTION_TYPE_CONNTRACK: - ret = flow_dv_translate_create_conntrack(dev, action->conf, - err); -+ if (!ret) -+ break; - idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret); - break; - default: -@@ -17675,9 +17649,8 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, - } - } - tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl); -- if (priority < RTE_COLOR_RED) -- flow_dv_match_meta_reg(matcher.mask.buf, -- (enum modify_reg)color_reg_c_idx, color_mask, color_mask); -+ flow_dv_match_meta_reg(matcher.mask.buf, -+ (enum modify_reg)color_reg_c_idx, color_mask, color_mask); - matcher.priority = priority; - matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, - matcher.mask.size); -@@ -17711,7 +17684,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, - static int - __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, - struct mlx5_flow_meter_sub_policy *sub_policy, -- uint8_t egress, uint8_t transfer, bool match_src_port, -+ uint8_t egress, uint8_t transfer, bool *match_src_port, - struct mlx5_meter_policy_acts acts[RTE_COLORS]) - { - struct mlx5_priv *priv = dev->data->dev_private; -@@ -17726,9 +17699,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, - .reserved = 0, - }; - int i; -+ uint16_t priority; - int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err); - struct mlx5_sub_policy_color_rule *color_rule; -- bool svport_match; - struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL}; + #include +-#include - if (ret < 0) -@@ -17761,13 +17734,12 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, - TAILQ_INSERT_TAIL(&sub_policy->color_rules[i], - color_rule, next_port); - color_rule->src_port = priv->representor_id; -- /* No use. */ -- attr.priority = i; -+ priority = (match_src_port[i] == match_src_port[RTE_COLOR_GREEN]) ? -+ MLX5_MTR_POLICY_MATCHER_PRIO : (MLX5_MTR_POLICY_MATCHER_PRIO + 1); - /* Create matchers for colors. */ -- svport_match = (i != RTE_COLOR_RED) ? match_src_port : false; - if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx, -- MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy, -- &attr, svport_match, NULL, -+ priority, sub_policy, -+ &attr, match_src_port[i], NULL, - &color_rule->matcher, &flow_err)) { - DRV_LOG(ERR, "Failed to create color%u matcher.", i); - goto err_exit; -@@ -17777,7 +17749,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, - color_reg_c_idx, (enum rte_color)i, - color_rule->matcher, - acts[i].actions_n, acts[i].dv_actions, -- svport_match, NULL, &color_rule->rule, -+ match_src_port[i], NULL, &color_rule->rule, - &attr)) { - DRV_LOG(ERR, "Failed to create color%u rule.", i); - goto err_exit; -@@ -17825,7 +17797,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, - uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; - uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0; - bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX); -- bool match_src_port = false; -+ bool match_src_port[RTE_COLORS] = {false}; - int i; + typedef uint64_t u64; + typedef uint32_t u32; +@@ -70,14 +69,7 @@ typedef uint64_t dma_addr_t; + #define ENA_UDELAY(x) rte_delay_us_block(x) - /* If RSS or Queue, no previous actions / rules is created. */ -@@ -17896,7 +17868,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, - acts[i].dv_actions[acts[i].actions_n] = - port_action->action; - acts[i].actions_n++; -- match_src_port = true; -+ match_src_port[i] = true; - break; - case MLX5_FLOW_FATE_DROP: - case MLX5_FLOW_FATE_JUMP: -@@ -17948,7 +17920,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, - acts[i].dv_actions[acts[i].actions_n++] = - tbl_data->jump.action; - if (mtr_policy->act_cnt[i].modify_hdr) -- match_src_port = !!transfer; -+ match_src_port[i] = !!transfer; - break; - default: - /*Queue action do nothing*/ -@@ -17962,9 +17934,9 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, - "Failed to create policy rules per domain."); - goto err_exit; - } -- if (match_src_port) { -- mtr_policy->match_port = match_src_port; -- mtr_policy->hierarchy_match_port = match_src_port; -+ if (match_src_port[RTE_COLOR_GREEN] || match_src_port[RTE_COLOR_YELLOW]) { -+ mtr_policy->match_port = 1; -+ mtr_policy->hierarchy_match_port = 1; + #define ENA_TOUCH(x) ((void)(x)) +-/* Redefine memcpy with caution: rte_memcpy can be simply aliased to memcpy, so +- * make the redefinition only if it's safe (and beneficial) to do so. +- */ +-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64_MEMCPY) || \ +- defined(RTE_ARCH_ARM_NEON_MEMCPY) +-#undef memcpy +-#define memcpy rte_memcpy +-#endif ++ + #define wmb rte_wmb + #define rmb rte_rmb + #define mb rte_mb +diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c +index dc846d2e84..f3962aa76e 100644 +--- a/dpdk/drivers/net/ena/ena_ethdev.c ++++ b/dpdk/drivers/net/ena/ena_ethdev.c +@@ -37,10 +37,10 @@ + #define ENA_MIN_RING_DESC 128 + + /* +- * We should try to keep ENA_CLEANUP_BUF_SIZE lower than ++ * We should try to keep ENA_CLEANUP_BUF_THRESH lower than + * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache. + */ +-#define ENA_CLEANUP_BUF_SIZE 256 ++#define ENA_CLEANUP_BUF_THRESH 256 + + #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) + +@@ -648,18 +648,13 @@ static inline void ena_rx_mbuf_prepare(struct ena_ring *rx_ring, + packet_type |= RTE_PTYPE_L3_IPV6; } + +- if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) { ++ if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag || ++ !(packet_type & (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP))) { + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; + } else { + if (unlikely(ena_rx_ctx->l4_csum_err)) { + ++rx_stats->l4_csum_bad; +- /* +- * For the L4 Rx checksum offload the HW may indicate +- * bad checksum although it's valid. Because of that, +- * we're setting the UNKNOWN flag to let the app +- * re-verify the checksum. +- */ +- ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; ++ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; + } else { + ++rx_stats->l4_csum_good; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; +@@ -797,7 +792,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { +- if (rc == -ENA_COM_UNSUPPORTED) ++ if (rc == ENA_COM_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); + else + PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); +@@ -841,7 +836,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter) + + rc = ena_com_set_host_attributes(&adapter->ena_dev); + if (rc) { +- if (rc == -ENA_COM_UNSUPPORTED) ++ if (rc == ENA_COM_UNSUPPORTED) + PMD_DRV_LOG(WARNING, "Cannot set host attributes\n"); + else + PMD_DRV_LOG(ERR, "Cannot set host attributes\n"); +@@ -3105,33 +3100,12 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) return 0; - err_exit: -@@ -18026,6 +17998,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) - uint8_t egress, transfer; - struct rte_flow_error error; - struct mlx5_meter_policy_acts acts[RTE_COLORS]; -+ bool match_src_port[RTE_COLORS] = {false}; - int ret; + } - egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; -@@ -18101,7 +18074,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) - /* Create default policy rules. */ - ret = __flow_dv_create_domain_policy_rules(dev, - &def_policy->sub_policy, -- egress, transfer, false, acts); -+ egress, transfer, match_src_port, acts); - if (ret) { - DRV_LOG(ERR, "Failed to create default policy rules."); - goto def_policy_error; -@@ -18660,7 +18633,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, - struct { - struct mlx5_flow_meter_policy *fm_policy; - struct mlx5_flow_meter_info *next_fm; -- struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS]; -+ struct mlx5_sub_policy_color_rule *tag_rule[RTE_COLORS]; - } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} }; - uint32_t fm_cnt = 0; - uint32_t i, j; -@@ -18694,14 +18667,22 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, - mtr_policy = fm_info[i].fm_policy; - rte_spinlock_lock(&mtr_policy->sl); - sub_policy = mtr_policy->sub_policys[domain][0]; -- for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) { -+ for (j = 0; j < RTE_COLORS; j++) { - uint8_t act_n = 0; -- struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; -+ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr = NULL; - struct mlx5_flow_dv_port_id_action_resource *port_action; -+ uint8_t fate_action; +-static __rte_always_inline size_t +-ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean, +- struct rte_mbuf *mbuf, +- size_t mbuf_cnt, +- size_t buf_size) +-{ +- struct rte_mbuf *m_next; +- +- while (mbuf != NULL) { +- m_next = mbuf->next; +- mbufs_to_clean[mbuf_cnt++] = mbuf; +- if (mbuf_cnt == buf_size) { +- rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean, +- (unsigned int)mbuf_cnt); +- mbuf_cnt = 0; +- } +- mbuf = m_next; +- } +- +- return mbuf_cnt; +-} +- + static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) + { +- struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE]; ++ struct rte_mbuf *pkts_to_clean[ENA_CLEANUP_BUF_THRESH]; + struct ena_ring *tx_ring = (struct ena_ring *)txp; + size_t mbuf_cnt = 0; ++ size_t pkt_cnt = 0; + unsigned int total_tx_descs = 0; + unsigned int total_tx_pkts = 0; + uint16_t cleanup_budget; +@@ -3162,8 +3136,13 @@ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) -- if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR && -- mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_PORT_ID) -- continue; -+ if (j == RTE_COLOR_RED) { -+ fate_action = MLX5_FLOW_FATE_DROP; -+ } else { -+ fate_action = mtr_policy->act_cnt[j].fate_action; -+ modify_hdr = mtr_policy->act_cnt[j].modify_hdr; -+ if (fate_action != MLX5_FLOW_FATE_MTR && -+ fate_action != MLX5_FLOW_FATE_PORT_ID && -+ fate_action != MLX5_FLOW_FATE_DROP) -+ continue; + mbuf = tx_info->mbuf; + if (fast_free) { +- mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt, +- ENA_CLEANUP_BUF_SIZE); ++ pkts_to_clean[pkt_cnt++] = mbuf; ++ mbuf_cnt += mbuf->nb_segs; ++ if (mbuf_cnt >= ENA_CLEANUP_BUF_THRESH) { ++ rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt); ++ mbuf_cnt = 0; ++ pkt_cnt = 0; + } - color_rule = mlx5_malloc(MLX5_MEM_ZERO, - sizeof(struct mlx5_sub_policy_color_rule), - 0, SOCKET_ID_ANY); -@@ -18713,9 +18694,8 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, - goto err_exit; - } - color_rule->src_port = src_port; -- modify_hdr = mtr_policy->act_cnt[j].modify_hdr; - /* Prepare to create color rule. */ -- if (mtr_policy->act_cnt[j].fate_action == MLX5_FLOW_FATE_MTR) { -+ if (fate_action == MLX5_FLOW_FATE_MTR) { - next_fm = fm_info[i].next_fm; - if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) { - mlx5_free(color_rule); -@@ -18742,7 +18722,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, - } - acts.dv_actions[act_n++] = tbl_data->jump.action; - acts.actions_n = act_n; -- } else { -+ } else if (fate_action == MLX5_FLOW_FATE_PORT_ID) { - port_action = - mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], - mtr_policy->act_cnt[j].rix_port_id_action); -@@ -18755,6 +18735,9 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, - acts.dv_actions[act_n++] = modify_hdr->action; - acts.dv_actions[act_n++] = port_action->action; - acts.actions_n = act_n; -+ } else { -+ acts.dv_actions[act_n++] = mtr_policy->dr_drop_action[domain]; -+ acts.actions_n = act_n; - } - fm_info[i].tag_rule[j] = color_rule; - TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port); -@@ -18786,7 +18769,7 @@ err_exit: - mtr_policy = fm_info[i].fm_policy; - rte_spinlock_lock(&mtr_policy->sl); - sub_policy = mtr_policy->sub_policys[domain][0]; -- for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) { -+ for (j = 0; j < RTE_COLORS; j++) { - color_rule = fm_info[i].tag_rule[j]; - if (!color_rule) - continue; -@@ -19116,8 +19099,7 @@ flow_dv_get_aged_flows(struct rte_eth_dev *dev, - LIST_FOREACH(act, &age_info->aged_aso, next) { - nb_flows++; - if (nb_contexts) { -- context[nb_flows - 1] = -- act->age_params.context; -+ context[nb_flows - 1] = act->age_params.context; - if (!(--nb_contexts)) - break; - } -@@ -19675,11 +19657,13 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, + } else { + rte_pktmbuf_free(mbuf); } +@@ -3186,8 +3165,7 @@ static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) } - if (next_mtr && *policy_mode == MLX5_MTR_POLICY_MODE_ALL) { -- if (!(action_flags[RTE_COLOR_GREEN] & action_flags[RTE_COLOR_YELLOW] & -- MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) -+ uint64_t hierarchy_type_flag = -+ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | MLX5_FLOW_ACTION_JUMP; -+ if (!(action_flags[RTE_COLOR_GREEN] & hierarchy_type_flag) || -+ !(action_flags[RTE_COLOR_YELLOW] & hierarchy_type_flag)) - return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY, - NULL, -- "Meter hierarchy supports meter action only."); -+ "Unsupported action in meter hierarchy."); - } - /* If both colors have RSS, the attributes should be the same. */ - if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN], -diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c -index da873ae2e2..af4df13b2f 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c -+++ b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c -@@ -104,12 +104,40 @@ struct mlx5_tbl_multi_pattern_ctx { - #define MLX5_EMPTY_MULTI_PATTERN_CTX {{{0,}},} + if (mbuf_cnt != 0) +- rte_mempool_put_bulk(mbufs_to_clean[0]->pool, +- (void **)mbufs_to_clean, mbuf_cnt); ++ rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt); + + /* Notify completion handler that full cleanup was performed */ + if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) +diff --git a/dpdk/drivers/net/enetc/enetc_ethdev.c b/dpdk/drivers/net/enetc/enetc_ethdev.c +index c9352f0746..d8c30ef150 100644 +--- a/dpdk/drivers/net/enetc/enetc_ethdev.c ++++ b/dpdk/drivers/net/enetc/enetc_ethdev.c +@@ -150,7 +150,7 @@ print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); +- ENETC_PMD_NOTICE("%s%s\n", name, buf); ++ ENETC_PMD_NOTICE("%s%s", name, buf); + } -+static __rte_always_inline struct mlx5_hw_q_job * -+flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, -+ const struct rte_flow_action_handle *handle, -+ void *user_data, void *query_data, -+ enum mlx5_hw_job_type type, -+ enum mlx5_hw_indirect_type indirect_type, -+ struct rte_flow_error *error); -+static void -+flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow, -+ struct rte_flow_error *error); -+ static int - mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev, - struct rte_flow_template_table *tbl, - struct mlx5_tbl_multi_pattern_ctx *mpat, - struct rte_flow_error *error); +@@ -197,7 +197,7 @@ enetc_hardware_init(struct enetc_eth_hw *hw) + char *first_byte; + + ENETC_PMD_NOTICE("MAC is not available for this SI, " +- "set random MAC\n"); ++ "set random MAC"); + mac = (uint32_t *)hw->mac.addr; + *mac = (uint32_t)rte_rand(); + first_byte = (char *)mac; +diff --git a/dpdk/drivers/net/enetfec/enet_ethdev.c b/dpdk/drivers/net/enetfec/enet_ethdev.c +index 898aad1c37..8c7067fbb5 100644 +--- a/dpdk/drivers/net/enetfec/enet_ethdev.c ++++ b/dpdk/drivers/net/enetfec/enet_ethdev.c +@@ -253,7 +253,7 @@ enetfec_eth_link_update(struct rte_eth_dev *dev, + link.link_status = lstatus; + link.link_speed = RTE_ETH_SPEED_NUM_1G; + +- ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, ++ ENETFEC_PMD_INFO("Port (%d) link is %s", dev->data->port_id, + "Up"); -+static __rte_always_inline enum mlx5_indirect_list_type -+flow_hw_inlist_type_get(const struct rte_flow_action *actions); -+ -+static bool -+mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error) -+{ -+ const struct mlx5_priv *priv = dev->data->dev_private; -+ -+ if (!priv->dr_ctx) { -+ rte_flow_error_set(error, EINVAL, -+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -+ "non-template flow engine was not configured"); -+ return false; -+ } -+ return true; -+} -+ - static __rte_always_inline int - mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type) - { -@@ -274,21 +302,6 @@ static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = { - .hdr.ether_type = 0, - }; + return rte_eth_linkstatus_set(dev, &link); +@@ -462,7 +462,7 @@ enetfec_rx_queue_setup(struct rte_eth_dev *dev, + } --static __rte_always_inline struct mlx5_hw_q_job * --flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue) --{ -- MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size); -- return priv->hw_q[queue].job_idx ? -- priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL; --} -- --static __rte_always_inline void --flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue) --{ -- MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size); -- priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job; --} -- - static inline enum mlx5dr_matcher_insert_mode - flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type) - { -@@ -1010,15 +1023,19 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev, - if (!shared_rss || __flow_hw_act_data_shared_rss_append - (priv, acts, - (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS, -- action_src, action_dst, idx, shared_rss)) -+ action_src, action_dst, idx, shared_rss)) { -+ DRV_LOG(WARNING, "Indirect RSS action index %d translate failed", act_idx); - return -1; -+ } - break; - case MLX5_INDIRECT_ACTION_TYPE_COUNT: - if (__flow_hw_act_data_shared_cnt_append(priv, acts, - (enum rte_flow_action_type) - MLX5_RTE_FLOW_ACTION_TYPE_COUNT, -- action_src, action_dst, act_idx)) -+ action_src, action_dst, act_idx)) { -+ DRV_LOG(WARNING, "Indirect count action translate failed"); - return -1; -+ } - break; - case MLX5_INDIRECT_ACTION_TYPE_AGE: - /* Not supported, prevent by validate function. */ -@@ -1026,15 +1043,19 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev, - break; - case MLX5_INDIRECT_ACTION_TYPE_CT: - if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, -- idx, &acts->rule_acts[action_dst])) -+ idx, &acts->rule_acts[action_dst])) { -+ DRV_LOG(WARNING, "Indirect CT action translate failed"); - return -1; -+ } - break; - case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: - if (__flow_hw_act_data_shared_mtr_append(priv, acts, - (enum rte_flow_action_type) - MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK, -- action_src, action_dst, idx)) -+ action_src, action_dst, idx)) { -+ DRV_LOG(WARNING, "Indirect meter mark action translate failed"); - return -1; -+ } - break; - case MLX5_INDIRECT_ACTION_TYPE_QUOTA: - flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx); -@@ -1455,7 +1476,7 @@ flow_hw_meter_compile(struct rte_eth_dev *dev, - acts->rule_acts[jump_pos].action = (!!group) ? - acts->jump->hws_action : - acts->jump->root_action; -- if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) -+ if (mlx5_aso_mtr_wait(priv, aso_mtr, true)) - return -ENOMEM; - return 0; - } -@@ -1532,7 +1553,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions) - static __rte_always_inline struct mlx5_aso_mtr * - flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, - const struct rte_flow_action *action, -- void *user_data, bool push) -+ struct mlx5_hw_q_job *job, bool push) - { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; -@@ -1540,6 +1561,8 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, - struct mlx5_aso_mtr *aso_mtr; - struct mlx5_flow_meter_info *fm; - uint32_t mtr_id; -+ uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK << -+ MLX5_INDIRECT_ACTION_TYPE_OFFSET; + if (queue_idx >= ENETFEC_MAX_Q) { +- ENETFEC_PMD_ERR("Invalid queue id %" PRIu16 ", max %d\n", ++ ENETFEC_PMD_ERR("Invalid queue id %" PRIu16 ", max %d", + queue_idx, ENETFEC_MAX_Q); + return -EINVAL; + } +diff --git a/dpdk/drivers/net/enetfec/enet_uio.c b/dpdk/drivers/net/enetfec/enet_uio.c +index 6539cbb354..9f4e896985 100644 +--- a/dpdk/drivers/net/enetfec/enet_uio.c ++++ b/dpdk/drivers/net/enetfec/enet_uio.c +@@ -177,7 +177,7 @@ config_enetfec_uio(struct enetfec_private *fep) - if (meter_mark->profile == NULL) - return NULL; -@@ -1558,15 +1581,16 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, - ASO_METER_WAIT : ASO_METER_WAIT_ASYNC; - aso_mtr->offset = mtr_id - 1; - aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN; -+ job->action = (void *)(handle | mtr_id); - /* Update ASO flow meter by wqe. */ -- if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr, -- &priv->mtr_bulk, user_data, push)) { -+ if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr, -+ &priv->mtr_bulk, job, push)) { - mlx5_ipool_free(pool->idx_pool, mtr_id); - return NULL; + /* Mapping is done only one time */ + if (enetfec_count > 0) { +- ENETFEC_PMD_INFO("Mapped!\n"); ++ ENETFEC_PMD_INFO("Mapped!"); + return 0; } - /* Wait for ASO object completion. */ - if (queue == MLX5_HW_INV_QUEUE && -- mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) { -+ mlx5_aso_mtr_wait(priv, aso_mtr, true)) { - mlx5_ipool_free(pool->idx_pool, mtr_id); - return NULL; + +@@ -191,7 +191,7 @@ config_enetfec_uio(struct enetfec_private *fep) + /* Open device file */ + uio_job->uio_fd = open(uio_device_file_name, O_RDWR); + if (uio_job->uio_fd < 0) { +- ENETFEC_PMD_WARN("Unable to open ENETFEC_UIO file\n"); ++ ENETFEC_PMD_WARN("Unable to open ENETFEC_UIO file"); + return -1; } -@@ -1584,10 +1608,18 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev, - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; - struct mlx5_aso_mtr *aso_mtr; -+ struct mlx5_hw_q_job *job = -+ flow_hw_action_job_init(priv, queue, NULL, NULL, NULL, -+ MLX5_HW_Q_JOB_TYPE_CREATE, -+ MLX5_HW_INDIRECT_TYPE_LEGACY, NULL); -- aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true); -- if (!aso_mtr) -+ if (!job) +@@ -230,7 +230,7 @@ enetfec_configure(void) + + d = opendir(FEC_UIO_DEVICE_SYS_ATTR_PATH); + if (d == NULL) { +- ENETFEC_PMD_ERR("\nError opening directory '%s': %s\n", ++ ENETFEC_PMD_ERR("Error opening directory '%s': %s", + FEC_UIO_DEVICE_SYS_ATTR_PATH, strerror(errno)); return -1; -+ aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, true); -+ if (!aso_mtr) { -+ flow_hw_job_put(priv, job, queue); -+ return -1; -+ } + } +@@ -249,7 +249,7 @@ enetfec_configure(void) + ret = sscanf(dir->d_name + strlen("uio"), "%d", + &uio_minor_number); + if (ret < 0) +- ENETFEC_PMD_ERR("Error: not find minor number\n"); ++ ENETFEC_PMD_ERR("Error: not find minor number"); + /* + * Open file uioX/name and read first line which + * contains the name for the device. Based on the +@@ -259,7 +259,7 @@ enetfec_configure(void) + ret = file_read_first_line(FEC_UIO_DEVICE_SYS_ATTR_PATH, + dir->d_name, "name", uio_name); + if (ret != 0) { +- ENETFEC_PMD_INFO("file_read_first_line failed\n"); ++ ENETFEC_PMD_INFO("file_read_first_line failed"); + closedir(d); + return -1; + } +diff --git a/dpdk/drivers/net/enic/enic_ethdev.c b/dpdk/drivers/net/enic/enic_ethdev.c +index b04b6c9aa1..1121874346 100644 +--- a/dpdk/drivers/net/enic/enic_ethdev.c ++++ b/dpdk/drivers/net/enic/enic_ethdev.c +@@ -670,7 +670,7 @@ static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add) + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; - /* Compile METER_MARK action */ - acts[aso_mtr_pos].action = pool->action; -@@ -1722,15 +1754,9 @@ flow_hw_translate_indirect_meter(struct rte_eth_dev *dev, - const struct rte_flow_indirect_update_flow_meter_mark **flow_conf = - (typeof(flow_conf))action_conf->conf; + rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); +- ENICPMD_LOG(DEBUG, " %s address %s\n", ++ ENICPMD_LOG(DEBUG, " %s address %s", + add ? "add" : "remove", mac_str); + } -- /* -- * Masked indirect handle set dr5 action during template table -- * translation. -- */ -- if (!dr_rule->action) { -- ret = flow_dr_set_meter(priv, dr_rule, action_conf); -- if (ret) -- return ret; -- } -+ ret = flow_dr_set_meter(priv, dr_rule, action_conf); -+ if (ret) -+ return ret; - if (!act_data->shared_meter.conf_masked) { - if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS) - flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color); -@@ -2512,6 +2538,9 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, +@@ -693,7 +693,7 @@ static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev, + rte_is_broadcast_ether_addr(addr)) { + rte_ether_format_addr(mac_str, + RTE_ETHER_ADDR_FMT_SIZE, addr); +- ENICPMD_LOG(ERR, " invalid multicast address %s\n", ++ ENICPMD_LOG(ERR, " invalid multicast address %s", + mac_str); + return -EINVAL; + } +@@ -701,7 +701,7 @@ static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev, + + /* Flush all if requested */ + if (nb_mc_addr == 0 || mc_addr_set == NULL) { +- ENICPMD_LOG(DEBUG, " flush multicast addresses\n"); ++ ENICPMD_LOG(DEBUG, " flush multicast addresses"); + for (i = 0; i < enic->mc_count; i++) { + addr = &enic->mc_addrs[i]; + debug_log_add_del_addr(addr, false); +@@ -714,7 +714,7 @@ static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev, + } + + if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) { +- ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n", ++ ENICPMD_LOG(ERR, " too many multicast addresses: max=%d", + ENIC_MULTICAST_PERFECT_FILTERS); + return -ENOSPC; + } +@@ -980,7 +980,7 @@ static int udp_tunnel_common_check(struct enic *enic, + tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE) + return -ENOTSUP; + if (!enic->overlay_offload) { +- ENICPMD_LOG(DEBUG, " overlay offload is not supported\n"); ++ ENICPMD_LOG(DEBUG, " overlay offload is not supported"); + return -ENOTSUP; } return 0; - err: -+ /* If rte_errno was not initialized and reached error state. */ -+ if (!rte_errno) -+ rte_errno = EINVAL; - err = rte_errno; - __flow_hw_action_template_destroy(dev, acts); - return rte_flow_error_set(error, err, -@@ -2865,6 +2894,30 @@ flow_hw_modify_field_construct(struct mlx5_hw_q_job *job, - return 0; +@@ -993,10 +993,10 @@ static int update_tunnel_port(struct enic *enic, uint16_t port, bool vxlan) + cfg = vxlan ? OVERLAY_CFG_VXLAN_PORT_UPDATE : + OVERLAY_CFG_GENEVE_PORT_UPDATE; + if (vnic_dev_overlay_offload_cfg(enic->vdev, cfg, port)) { +- ENICPMD_LOG(DEBUG, " failed to update tunnel port\n"); ++ ENICPMD_LOG(DEBUG, " failed to update tunnel port"); + return -EINVAL; + } +- ENICPMD_LOG(DEBUG, " updated %s port to %u\n", ++ ENICPMD_LOG(DEBUG, " updated %s port to %u", + vxlan ? "vxlan" : "geneve", port); + if (vxlan) + enic->vxlan_port = port; +@@ -1027,7 +1027,7 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev, + * "Adding" a new port number replaces it. + */ + if (tnl->udp_port == port || tnl->udp_port == 0) { +- ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n", ++ ENICPMD_LOG(DEBUG, " %u is already configured or invalid", + tnl->udp_port); + return -EINVAL; + } +@@ -1059,7 +1059,7 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev, + * which is tied to inner RSS and TSO. + */ + if (tnl->udp_port != port) { +- ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port\n", ++ ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port", + tnl->udp_port); + return -EINVAL; + } +@@ -1323,7 +1323,7 @@ static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + } + if (eth_da.nb_representor_ports > 0 && + eth_da.type != RTE_ETH_REPRESENTOR_VF) { +- ENICPMD_LOG(ERR, "unsupported representor type: %s\n", ++ ENICPMD_LOG(ERR, "unsupported representor type: %s", + pci_dev->device.devargs->args); + return -ENOTSUP; + } +diff --git a/dpdk/drivers/net/enic/enic_flow.c b/dpdk/drivers/net/enic/enic_flow.c +index e6c9ad442a..758000ea21 100644 +--- a/dpdk/drivers/net/enic/enic_flow.c ++++ b/dpdk/drivers/net/enic/enic_flow.c +@@ -1351,14 +1351,14 @@ static void + enic_dump_actions(const struct filter_action_v2 *ea) + { + if (ea->type == FILTER_ACTION_RQ_STEERING) { +- ENICPMD_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx); ++ ENICPMD_LOG(INFO, "Action(V1), queue: %u", ea->rq_idx); + } else if (ea->type == FILTER_ACTION_V2) { +- ENICPMD_LOG(INFO, "Actions(V2)\n"); ++ ENICPMD_LOG(INFO, "Actions(V2)"); + if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG) +- ENICPMD_LOG(INFO, "\tqueue: %u\n", ++ ENICPMD_LOG(INFO, "\tqueue: %u", + enic_sop_rq_idx_to_rte_idx(ea->rq_idx)); + if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG) +- ENICPMD_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id); ++ ENICPMD_LOG(INFO, "\tfilter_id: %u", ea->filter_id); + } + } + +@@ -1374,13 +1374,13 @@ enic_dump_filter(const struct filter_v2 *filt) + + switch (filt->type) { + case FILTER_IPV4_5TUPLE: +- ENICPMD_LOG(INFO, "FILTER_IPV4_5TUPLE\n"); ++ ENICPMD_LOG(INFO, "FILTER_IPV4_5TUPLE"); + break; + case FILTER_USNIC_IP: + case FILTER_DPDK_1: + /* FIXME: this should be a loop */ + gp = &filt->u.generic_1; +- ENICPMD_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n", ++ ENICPMD_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x", + gp->val_vlan, gp->mask_vlan); + + if (gp->mask_flags & FILTER_GENERIC_1_IPV4) +@@ -1438,7 +1438,7 @@ enic_dump_filter(const struct filter_v2 *filt) + ? "ipfrag(y)" : "ipfrag(n)"); + else + sprintf(ipfrag, "%s ", "ipfrag(x)"); +- ENICPMD_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp, ++ ENICPMD_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s", ip4, ip6, udp, + tcp, tcpudp, ip4csum, l4csum, ipfrag); + + for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) { +@@ -1455,7 +1455,7 @@ enic_dump_filter(const struct filter_v2 *filt) + bp += 2; + } + *bp = '\0'; +- ENICPMD_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf); ++ ENICPMD_LOG(INFO, "\tL%u mask: %s", i + 2, buf); + bp = buf; + for (j = 0; j <= mbyte; j++) { + sprintf(bp, "%02x", +@@ -1463,11 +1463,11 @@ enic_dump_filter(const struct filter_v2 *filt) + bp += 2; + } + *bp = '\0'; +- ENICPMD_LOG(INFO, "\tL%u val: %s\n", i + 2, buf); ++ ENICPMD_LOG(INFO, "\tL%u val: %s", i + 2, buf); + } + break; + default: +- ENICPMD_LOG(INFO, "FILTER UNKNOWN\n"); ++ ENICPMD_LOG(INFO, "FILTER UNKNOWN"); + break; + } } - -+/** -+ * Release any actions allocated for the flow rule during actions construction. -+ * -+ * @param[in] flow -+ * Pointer to flow structure. -+ */ -+static void -+flow_hw_release_actions(struct rte_eth_dev *dev, -+ uint32_t queue, -+ struct rte_flow_hw *flow) -+{ -+ struct mlx5_priv *priv = dev->data->dev_private; -+ struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; +diff --git a/dpdk/drivers/net/enic/enic_vf_representor.c b/dpdk/drivers/net/enic/enic_vf_representor.c +index 5d8d29135c..8469e06de9 100644 +--- a/dpdk/drivers/net/enic/enic_vf_representor.c ++++ b/dpdk/drivers/net/enic/enic_vf_representor.c +@@ -64,7 +64,7 @@ static int enic_vf_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, + /* Pass vf not pf because of cq index calculation. See enic_alloc_wq */ + err = enic_alloc_wq(&vf->enic, queue_idx, socket_id, nb_desc); + if (err) { +- ENICPMD_LOG(ERR, "error in allocating wq\n"); ++ ENICPMD_LOG(ERR, "error in allocating wq"); + return err; + } + return 0; +@@ -104,7 +104,7 @@ static int enic_vf_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, + ret = enic_alloc_rq(&vf->enic, queue_idx, socket_id, mp, nb_desc, + rx_conf->rx_free_thresh); + if (ret) { +- ENICPMD_LOG(ERR, "error in allocating rq\n"); ++ ENICPMD_LOG(ERR, "error in allocating rq"); + return ret; + } + return 0; +@@ -230,14 +230,14 @@ static int enic_vf_dev_start(struct rte_eth_dev *eth_dev) + /* enic_enable */ + ret = enic_alloc_rx_queue_mbufs(pf, &pf->rq[index]); + if (ret) { +- ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs\n"); ++ ENICPMD_LOG(ERR, "Failed to alloc sop RX queue mbufs"); + return ret; + } + ret = enic_alloc_rx_queue_mbufs(pf, data_rq); + if (ret) { + /* Release the allocated mbufs for the sop rq*/ + enic_rxmbuf_queue_release(pf, &pf->rq[index]); +- ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs\n"); ++ ENICPMD_LOG(ERR, "Failed to alloc data RX queue mbufs"); + return ret; + } + enic_start_rq(pf, vf->pf_rq_sop_idx); +@@ -430,7 +430,7 @@ static int enic_vf_stats_get(struct rte_eth_dev *eth_dev, + /* Get VF stats via PF */ + err = vnic_dev_stats_dump(vf->enic.vdev, &vs); + if (err) { +- ENICPMD_LOG(ERR, "error in getting stats\n"); ++ ENICPMD_LOG(ERR, "error in getting stats"); + return err; + } + stats->ipackets = vs->rx.rx_frames_ok; +@@ -453,7 +453,7 @@ static int enic_vf_stats_reset(struct rte_eth_dev *eth_dev) + /* Ask PF to clear VF stats */ + err = vnic_dev_stats_clear(vf->enic.vdev); + if (err) +- ENICPMD_LOG(ERR, "error in clearing stats\n"); ++ ENICPMD_LOG(ERR, "error in clearing stats"); + return err; + } + +@@ -581,7 +581,7 @@ static int get_vf_config(struct enic_vf_representor *vf) + /* VF MAC */ + err = vnic_dev_get_mac_addr(vf->enic.vdev, vf->mac_addr.addr_bytes); + if (err) { +- ENICPMD_LOG(ERR, "error in getting MAC address\n"); ++ ENICPMD_LOG(ERR, "error in getting MAC address"); + return err; + } + rte_ether_addr_copy(&vf->mac_addr, vf->eth_dev->data->mac_addrs); +@@ -591,7 +591,7 @@ static int get_vf_config(struct enic_vf_representor *vf) + offsetof(struct vnic_enet_config, mtu), + sizeof(c->mtu), &c->mtu); + if (err) { +- ENICPMD_LOG(ERR, "error in getting MTU\n"); ++ ENICPMD_LOG(ERR, "error in getting MTU"); + return err; + } + /* +diff --git a/dpdk/drivers/net/failsafe/failsafe_args.c b/dpdk/drivers/net/failsafe/failsafe_args.c +index b203e02d9a..1b8f1d3050 100644 +--- a/dpdk/drivers/net/failsafe/failsafe_args.c ++++ b/dpdk/drivers/net/failsafe/failsafe_args.c +@@ -248,7 +248,7 @@ fs_parse_device_param(struct rte_eth_dev *dev, const char *param, + goto free_args; + } else { + ERROR("Unrecognized device type: %.*s", (int)b, param); +- return -EINVAL; ++ ret = -EINVAL; + } + free_args: + free(args); +@@ -406,7 +406,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params) + kvlist = rte_kvargs_parse(mut_params, + pmd_failsafe_init_parameters); + if (kvlist == NULL) { +- ERROR("Error parsing parameters, usage:\n" ++ ERROR("Error parsing parameters, usage:" + PMD_FAILSAFE_PARAM_STRING); + return -1; + } +diff --git a/dpdk/drivers/net/failsafe/failsafe_eal.c b/dpdk/drivers/net/failsafe/failsafe_eal.c +index d71b512f81..e79d3b4120 100644 +--- a/dpdk/drivers/net/failsafe/failsafe_eal.c ++++ b/dpdk/drivers/net/failsafe/failsafe_eal.c +@@ -16,7 +16,7 @@ fs_ethdev_portid_get(const char *name, uint16_t *port_id) + size_t len; + + if (name == NULL) { +- DEBUG("Null pointer is specified\n"); ++ DEBUG("Null pointer is specified"); + return -EINVAL; + } + len = strlen(name); +diff --git a/dpdk/drivers/net/failsafe/failsafe_ether.c b/dpdk/drivers/net/failsafe/failsafe_ether.c +index 031f3eb13f..dc4aba6e30 100644 +--- a/dpdk/drivers/net/failsafe/failsafe_ether.c ++++ b/dpdk/drivers/net/failsafe/failsafe_ether.c +@@ -38,7 +38,7 @@ fs_flow_complain(struct rte_flow_error *error) + errstr = "unknown type"; + else + errstr = errstrlist[error->type]; +- ERROR("Caught error type %d (%s): %s%s\n", ++ ERROR("Caught error type %d (%s): %s%s", + error->type, errstr, + error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", + error->cause), buf) : "", +@@ -640,7 +640,7 @@ failsafe_eth_new_event_callback(uint16_t port_id, + if (sdev->state >= DEV_PROBED) + continue; + if (dev->device == NULL) { +- WARN("Trying to probe malformed device %s.\n", ++ WARN("Trying to probe malformed device %s.", + sdev->devargs.name); + continue; + } +diff --git a/dpdk/drivers/net/failsafe/failsafe_intr.c b/dpdk/drivers/net/failsafe/failsafe_intr.c +index 969ded6ced..68b7310b85 100644 +--- a/dpdk/drivers/net/failsafe/failsafe_intr.c ++++ b/dpdk/drivers/net/failsafe/failsafe_intr.c +@@ -173,17 +173,17 @@ fs_rx_event_proxy_service_install(struct fs_priv *priv) + /* run the service */ + ret = rte_service_component_runstate_set(priv->rxp.sid, 1); + if (ret < 0) { +- ERROR("Failed Setting component runstate\n"); ++ ERROR("Failed Setting component runstate"); + return ret; + } + ret = rte_service_set_stats_enable(priv->rxp.sid, 1); + if (ret < 0) { +- ERROR("Failed enabling stats\n"); ++ ERROR("Failed enabling stats"); + return ret; + } + ret = rte_service_runstate_set(priv->rxp.sid, 1); + if (ret < 0) { +- ERROR("Failed to run service\n"); ++ ERROR("Failed to run service"); + return ret; + } + priv->rxp.sstate = SS_READY; +diff --git a/dpdk/drivers/net/fm10k/fm10k_ethdev.c b/dpdk/drivers/net/fm10k/fm10k_ethdev.c +index 4d3c4c10cf..cc2012786d 100644 +--- a/dpdk/drivers/net/fm10k/fm10k_ethdev.c ++++ b/dpdk/drivers/net/fm10k/fm10k_ethdev.c +@@ -3057,7 +3057,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = pdev->intr_handle; +- int diag, i; ++ int diag, i, ret; + struct fm10k_macvlan_filter_info *macvlan; + + PMD_INIT_FUNC_TRACE(); +@@ -3146,21 +3146,24 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + diag = fm10k_stats_reset(dev); + if (diag != 0) { + PMD_INIT_LOG(ERR, "Stats reset failed: %d", diag); +- return diag; ++ ret = diag; ++ goto err_stat; + } + + /* Reset the hw */ + diag = fm10k_reset_hw(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_reset_hw; + } + + /* Setup mailbox service */ + diag = fm10k_setup_mbx_service(hw); + if (diag != FM10K_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_mbx; + } + + /*PF/VF has different interrupt handling mechanism */ +@@ -3199,7 +3202,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + + if (switch_ready == false) { + PMD_INIT_LOG(ERR, "switch is not ready"); +- return -1; ++ ret = -1; ++ goto err_switch_ready; + } + } + +@@ -3234,7 +3238,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + + if (!hw->mac.default_vid) { + PMD_INIT_LOG(ERR, "default VID is not ready"); +- return -1; ++ ret = -1; ++ goto err_vid; + } + } + +@@ -3243,6 +3248,28 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) + MAIN_VSI_POOL_NUMBER); + + return 0; + -+ if (flow->fate_type == MLX5_FLOW_FATE_JUMP) -+ flow_hw_jump_release(dev, flow->jump); -+ else if (flow->fate_type == MLX5_FLOW_FATE_QUEUE) -+ mlx5_hrxq_obj_release(dev, flow->hrxq); -+ if (mlx5_hws_cnt_id_valid(flow->cnt_id)) -+ flow_hw_age_count_release(priv, queue, flow, NULL); -+ if (flow->mtr_id) -+ mlx5_ipool_free(pool->idx_pool, flow->mtr_id); -+} ++err_vid: ++err_switch_ready: ++ rte_intr_disable(intr_handle); + - /** - * Construct flow action array. - * -@@ -2980,7 +3033,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - (dev, queue, action, table, it_idx, - at->action_flags, job->flow, - &rule_acts[act_data->action_dst])) -- return -1; -+ goto error; - break; - case RTE_FLOW_ACTION_TYPE_VOID: - break; -@@ -3000,7 +3053,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - jump = flow_hw_jump_action_register - (dev, &table->cfg, jump_group, NULL); - if (!jump) -- return -1; -+ goto error; - rule_acts[act_data->action_dst].action = - (!!attr.group) ? jump->hws_action : jump->root_action; - job->flow->jump = jump; -@@ -3012,7 +3065,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - ft_flag, - action); - if (!hrxq) -- return -1; -+ goto error; - rule_acts[act_data->action_dst].action = hrxq->action; - job->flow->hrxq = hrxq; - job->flow->fate_type = MLX5_FLOW_FATE_QUEUE; -@@ -3022,19 +3075,19 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - if (flow_hw_shared_action_get - (dev, act_data, item_flags, - &rule_acts[act_data->action_dst])) -- return -1; -+ goto error; ++ if (hw->mac.type == fm10k_mac_pf) { ++ fm10k_dev_disable_intr_pf(dev); ++ rte_intr_callback_unregister(intr_handle, ++ fm10k_dev_interrupt_handler_pf, (void *)dev); ++ } else { ++ fm10k_dev_disable_intr_vf(dev); ++ rte_intr_callback_unregister(intr_handle, ++ fm10k_dev_interrupt_handler_vf, (void *)dev); ++ } ++ ++err_mbx: ++err_reset_hw: ++err_stat: ++ rte_free(dev->data->mac_addrs); ++ dev->data->mac_addrs = NULL; ++ ++ return ret; + } + + static int +diff --git a/dpdk/drivers/net/gve/base/gve_adminq.c b/dpdk/drivers/net/gve/base/gve_adminq.c +index 343bd13d67..438c0c5441 100644 +--- a/dpdk/drivers/net/gve/base/gve_adminq.c ++++ b/dpdk/drivers/net/gve/base/gve_adminq.c +@@ -11,7 +11,7 @@ + #define GVE_ADMINQ_SLEEP_LEN 20 + #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100 + +-#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n Expected: length=%d, feature_mask=%x.\n Actual: length=%d, feature_mask=%x." ++#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error: Expected: length=%d, feature_mask=%x. Actual: length=%d, feature_mask=%x." + + #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver." + +diff --git a/dpdk/drivers/net/gve/base/gve_osdep.h b/dpdk/drivers/net/gve/base/gve_osdep.h +index a3702f4b8c..a6eb52306f 100644 +--- a/dpdk/drivers/net/gve/base/gve_osdep.h ++++ b/dpdk/drivers/net/gve/base/gve_osdep.h +@@ -29,22 +29,46 @@ + #include + #endif + +-typedef uint8_t u8; +-typedef uint16_t u16; +-typedef uint32_t u32; +-typedef uint64_t u64; ++#ifndef u8 ++#define u8 uint8_t ++#endif ++#ifndef u16 ++#define u16 uint16_t ++#endif ++#ifndef u32 ++#define u32 uint32_t ++#endif ++#ifndef u64 ++#define u64 uint64_t ++#endif + +-typedef rte_be16_t __sum16; ++#ifndef __sum16 ++#define __sum16 rte_be16_t ++#endif + +-typedef rte_be16_t __be16; +-typedef rte_be32_t __be32; +-typedef rte_be64_t __be64; ++#ifndef __be16 ++#define __be16 rte_be16_t ++#endif ++#ifndef __be32 ++#define __be32 rte_be32_t ++#endif ++#ifndef __be64 ++#define __be64 rte_be64_t ++#endif + +-typedef rte_le16_t __le16; +-typedef rte_le32_t __le32; +-typedef rte_le64_t __le64; ++#ifndef __le16 ++#define __le16 rte_le16_t ++#endif ++#ifndef __le32 ++#define __le32 rte_le32_t ++#endif ++#ifndef __le64 ++#define __le64 rte_le64_t ++#endif + +-typedef rte_iova_t dma_addr_t; ++#ifndef dma_addr_t ++#define dma_addr_t rte_iova_t ++#endif + + #define ETH_MIN_MTU RTE_ETHER_MIN_MTU + #define ETH_ALEN RTE_ETHER_ADDR_LEN +diff --git a/dpdk/drivers/net/gve/gve_ethdev.c b/dpdk/drivers/net/gve/gve_ethdev.c +index ecd37ff37f..bd683a64d7 100644 +--- a/dpdk/drivers/net/gve/gve_ethdev.c ++++ b/dpdk/drivers/net/gve/gve_ethdev.c +@@ -140,11 +140,16 @@ gve_start_queues(struct rte_eth_dev *dev) + PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues); + return ret; + } +- for (i = 0; i < num_queues; i++) +- if (gve_tx_queue_start(dev, i) != 0) { ++ for (i = 0; i < num_queues; i++) { ++ if (gve_is_gqi(priv)) ++ ret = gve_tx_queue_start(dev, i); ++ else ++ ret = gve_tx_queue_start_dqo(dev, i); ++ if (ret != 0) { + PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i); + goto err_tx; + } ++ } + + num_queues = dev->data->nb_rx_queues; + priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues; +@@ -167,9 +172,15 @@ gve_start_queues(struct rte_eth_dev *dev) + return 0; + + err_rx: +- gve_stop_rx_queues(dev); ++ if (gve_is_gqi(priv)) ++ gve_stop_rx_queues(dev); ++ else ++ gve_stop_rx_queues_dqo(dev); + err_tx: +- gve_stop_tx_queues(dev); ++ if (gve_is_gqi(priv)) ++ gve_stop_tx_queues(dev); ++ else ++ gve_stop_tx_queues_dqo(dev); + return ret; + } + +@@ -193,10 +204,16 @@ gve_dev_start(struct rte_eth_dev *dev) + static int + gve_dev_stop(struct rte_eth_dev *dev) + { ++ struct gve_priv *priv = dev->data->dev_private; + dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; + +- gve_stop_tx_queues(dev); +- gve_stop_rx_queues(dev); ++ if (gve_is_gqi(priv)) { ++ gve_stop_tx_queues(dev); ++ gve_stop_rx_queues(dev); ++ } else { ++ gve_stop_tx_queues_dqo(dev); ++ gve_stop_rx_queues_dqo(dev); ++ } + + dev->data->dev_started = 0; + +diff --git a/dpdk/drivers/net/gve/gve_ethdev.h b/dpdk/drivers/net/gve/gve_ethdev.h +index 58d8943e71..133860488c 100644 +--- a/dpdk/drivers/net/gve/gve_ethdev.h ++++ b/dpdk/drivers/net/gve/gve_ethdev.h +@@ -33,6 +33,8 @@ + RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_TCP_SEG) + ++#define GVE_TX_CKSUM_OFFLOAD_MASK_DQO (GVE_TX_CKSUM_OFFLOAD_MASK | RTE_MBUF_F_TX_IP_CKSUM) ++ + /* A list of pages registered with the device during setup and used by a queue + * as buffers + */ +diff --git a/dpdk/drivers/net/gve/gve_rx_dqo.c b/dpdk/drivers/net/gve/gve_rx_dqo.c +index 7c7a8c48d0..3f694a4d9a 100644 +--- a/dpdk/drivers/net/gve/gve_rx_dqo.c ++++ b/dpdk/drivers/net/gve/gve_rx_dqo.c +@@ -10,66 +10,36 @@ + static inline void + gve_rx_refill_dqo(struct gve_rx_queue *rxq) + { +- volatile struct gve_rx_desc_dqo *rx_buf_ring; + volatile struct gve_rx_desc_dqo *rx_buf_desc; + struct rte_mbuf *nmb[rxq->nb_rx_hold]; + uint16_t nb_refill = rxq->nb_rx_hold; +- uint16_t nb_desc = rxq->nb_rx_desc; + uint16_t next_avail = rxq->bufq_tail; + struct rte_eth_dev *dev; + uint64_t dma_addr; +- uint16_t delta; + int i; + + if (rxq->nb_rx_hold < rxq->free_thresh) + return; + +- rx_buf_ring = rxq->rx_ring; +- delta = nb_desc - next_avail; +- if (unlikely(delta < nb_refill)) { +- if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, delta) == 0)) { +- for (i = 0; i < delta; i++) { +- rx_buf_desc = &rx_buf_ring[next_avail + i]; +- rxq->sw_ring[next_avail + i] = nmb[i]; +- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); +- rx_buf_desc->header_buf_addr = 0; +- rx_buf_desc->buf_addr = dma_addr; +- } +- nb_refill -= delta; +- next_avail = 0; +- rxq->nb_rx_hold -= delta; +- } else { +- rxq->stats.no_mbufs_bulk++; +- rxq->stats.no_mbufs += nb_desc - next_avail; +- dev = &rte_eth_devices[rxq->port_id]; +- dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; +- PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", +- rxq->port_id, rxq->queue_id); +- return; +- } ++ if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill))) { ++ rxq->stats.no_mbufs_bulk++; ++ rxq->stats.no_mbufs += nb_refill; ++ dev = &rte_eth_devices[rxq->port_id]; ++ dev->data->rx_mbuf_alloc_failed += nb_refill; ++ PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", ++ rxq->port_id, rxq->queue_id); ++ return; + } + +- if (nb_desc - next_avail >= nb_refill) { +- if (likely(rte_pktmbuf_alloc_bulk(rxq->mpool, nmb, nb_refill) == 0)) { +- for (i = 0; i < nb_refill; i++) { +- rx_buf_desc = &rx_buf_ring[next_avail + i]; +- rxq->sw_ring[next_avail + i] = nmb[i]; +- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); +- rx_buf_desc->header_buf_addr = 0; +- rx_buf_desc->buf_addr = dma_addr; +- } +- next_avail += nb_refill; +- rxq->nb_rx_hold -= nb_refill; +- } else { +- rxq->stats.no_mbufs_bulk++; +- rxq->stats.no_mbufs += nb_desc - next_avail; +- dev = &rte_eth_devices[rxq->port_id]; +- dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail; +- PMD_DRV_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u", +- rxq->port_id, rxq->queue_id); +- } ++ for (i = 0; i < nb_refill; i++) { ++ rx_buf_desc = &rxq->rx_ring[next_avail]; ++ rxq->sw_ring[next_avail] = nmb[i]; ++ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i])); ++ rx_buf_desc->header_buf_addr = 0; ++ rx_buf_desc->buf_addr = dma_addr; ++ next_avail = (next_avail + 1) & (rxq->nb_rx_desc - 1); + } +- ++ rxq->nb_rx_hold -= nb_refill; + rte_write32(next_avail, rxq->qrx_tail); + + rxq->bufq_tail = next_avail; +@@ -102,6 +72,8 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + if (rx_desc->generation != rxq->cur_gen_bit) break; - case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: - enc_item = ((const struct rte_flow_action_vxlan_encap *) - action->conf)->definition; - if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL)) -- return -1; -+ goto error; - break; - case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: - enc_item = ((const struct rte_flow_action_nvgre_encap *) - action->conf)->definition; - if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL)) -- return -1; -+ goto error; - break; - case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: - raw_encap_data = -@@ -3063,12 +3116,12 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - hw_acts, - action); - if (ret) -- return -1; -+ goto error; - break; - case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: - port_action = action->conf; - if (!priv->hw_vport[port_action->port_id]) -- return -1; -+ goto error; - rule_acts[act_data->action_dst].action = - priv->hw_vport[port_action->port_id]; - break; -@@ -3088,7 +3141,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - jump = flow_hw_jump_action_register - (dev, &table->cfg, aso_mtr->fm.group, NULL); - if (!jump) -- return -1; -+ goto error; - MLX5_ASSERT - (!rule_acts[act_data->action_dst + 1].action); - rule_acts[act_data->action_dst + 1].action = -@@ -3096,8 +3149,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - jump->root_action; - job->flow->jump = jump; - job->flow->fate_type = MLX5_FLOW_FATE_JUMP; -- if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) -- return -1; -+ if (mlx5_aso_mtr_wait(priv, aso_mtr, true)) -+ goto error; - break; - case RTE_FLOW_ACTION_TYPE_AGE: - age = action->conf; -@@ -3112,7 +3165,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - job->flow->res_idx, - error); - if (age_idx == 0) -- return -rte_errno; -+ goto error; - job->flow->age_idx = age_idx; - if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) - /* -@@ -3123,11 +3176,10 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + ++ rte_io_rmb(); ++ + if (unlikely(rx_desc->rx_error)) { + rxq->stats.errors++; + continue; +@@ -127,7 +99,7 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + rxm->ol_flags = 0; + + rxm->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; +- rxm->hash.rss = rte_be_to_cpu_32(rx_desc->hash); ++ rxm->hash.rss = rte_le_to_cpu_32(rx_desc->hash); + + rx_pkts[nb_rx++] = rxm; + bytes += pkt_len; +@@ -135,14 +107,12 @@ gve_rx_burst_dqo(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + if (nb_rx > 0) { + rxq->rx_tail = rx_id; +- if (rx_id_bufq != rxq->next_avail) +- rxq->next_avail = rx_id_bufq; +- +- gve_rx_refill_dqo(rxq); ++ rxq->next_avail = rx_id_bufq; + + rxq->stats.packets += nb_rx; + rxq->stats.bytes += bytes; + } ++ gve_rx_refill_dqo(rxq); + + return nb_rx; + } +@@ -335,34 +305,36 @@ static int + gve_rxq_mbufs_alloc_dqo(struct gve_rx_queue *rxq) + { + struct rte_mbuf *nmb; ++ uint16_t rx_mask; + uint16_t i; + int diag; + +- diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], rxq->nb_rx_desc); ++ rx_mask = rxq->nb_rx_desc - 1; ++ diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], ++ rx_mask); + if (diag < 0) { + rxq->stats.no_mbufs_bulk++; +- for (i = 0; i < rxq->nb_rx_desc - 1; i++) { ++ for (i = 0; i < rx_mask; i++) { + nmb = rte_pktmbuf_alloc(rxq->mpool); + if (!nmb) break; - /* Fall-through. */ - case RTE_FLOW_ACTION_TYPE_COUNT: -- /* If the port is engaged in resource sharing, do not use queue cache. */ -- cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue; -+ cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); - ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx); - if (ret != 0) -- return ret; -+ goto error; - ret = mlx5_hws_cnt_pool_get_action_offset - (priv->hws_cpool, - cnt_id, -@@ -3135,7 +3187,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - &rule_acts[act_data->action_dst].counter.offset - ); - if (ret != 0) -- return ret; -+ goto error; - job->flow->cnt_id = cnt_id; - break; - case MLX5_RTE_FLOW_ACTION_TYPE_COUNT: -@@ -3146,7 +3198,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - &rule_acts[act_data->action_dst].counter.offset - ); - if (ret != 0) -- return ret; -+ goto error; - job->flow->cnt_id = act_data->shared_counter.id; - break; - case RTE_FLOW_ACTION_TYPE_CONNTRACK: -@@ -3154,7 +3206,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - ((uint32_t)(uintptr_t)action->conf); - if (flow_hw_ct_compile(dev, queue, ct_idx, - &rule_acts[act_data->action_dst])) -- return -1; -+ goto error; - break; - case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK: - mtr_id = act_data->shared_meter.id & -@@ -3162,7 +3214,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - /* Find ASO object. */ - aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id); - if (!aso_mtr) -- return -1; -+ goto error; - rule_acts[act_data->action_dst].action = - pool->action; - rule_acts[act_data->action_dst].aso_meter.offset = -@@ -3177,7 +3229,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - act_data->action_dst, action, - rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE); - if (ret != 0) -- return ret; -+ goto error; - break; - default: - break; -@@ -3215,6 +3267,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, - if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) - job->flow->cnt_id = hw_acts->cnt_id; + rxq->sw_ring[i] = nmb; + } + if (i < rxq->nb_rx_desc - 1) { +- rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i; ++ rxq->stats.no_mbufs += rx_mask - i; + return -ENOMEM; + } + } + +- for (i = 0; i < rxq->nb_rx_desc; i++) { +- if (i == rxq->nb_rx_desc - 1) +- break; ++ for (i = 0; i < rx_mask; i++) { + nmb = rxq->sw_ring[i]; + rxq->rx_ring[i].buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); + rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i); + } ++ rxq->rx_ring[rx_mask].buf_id = rte_cpu_to_le_16(rx_mask); + + rxq->nb_rx_hold = 0; +- rxq->bufq_tail = rxq->nb_rx_desc - 1; ++ rxq->bufq_tail = rx_mask; + + rte_write32(rxq->bufq_tail, rxq->qrx_tail); + +diff --git a/dpdk/drivers/net/gve/gve_tx.c b/dpdk/drivers/net/gve/gve_tx.c +index 2e0d001109..bb21b90635 100644 +--- a/dpdk/drivers/net/gve/gve_tx.c ++++ b/dpdk/drivers/net/gve/gve_tx.c +@@ -681,7 +681,7 @@ gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) + + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); + +- dev->data->rx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; ++ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; + } +diff --git a/dpdk/drivers/net/gve/gve_tx_dqo.c b/dpdk/drivers/net/gve/gve_tx_dqo.c +index 16101de84f..ce3681b6c6 100644 +--- a/dpdk/drivers/net/gve/gve_tx_dqo.c ++++ b/dpdk/drivers/net/gve/gve_tx_dqo.c +@@ -13,7 +13,7 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq) + struct gve_tx_compl_desc *compl_desc; + struct gve_tx_queue *aim_txq; + uint16_t nb_desc_clean; +- struct rte_mbuf *txe; ++ struct rte_mbuf *txe, *txe_next; + uint16_t compl_tag; + uint16_t next; + +@@ -24,6 +24,8 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq) + if (compl_desc->generation != txq->cur_gen_bit) + return; + ++ rte_io_rmb(); ++ + compl_tag = rte_le_to_cpu_16(compl_desc->completion_tag); + + aim_txq = txq->txqs[compl_desc->id]; +@@ -43,10 +45,15 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq) + PMD_DRV_LOG(DEBUG, "GVE_COMPL_TYPE_DQO_REINJECTION !!!"); + /* FALLTHROUGH */ + case GVE_COMPL_TYPE_DQO_PKT: ++ /* free all segments. */ + txe = aim_txq->sw_ring[compl_tag]; +- if (txe != NULL) { ++ while (txe != NULL) { ++ txe_next = txe->next; + rte_pktmbuf_free_seg(txe); +- txe = NULL; ++ if (aim_txq->sw_ring[compl_tag] == txe) ++ aim_txq->sw_ring[compl_tag] = NULL; ++ txe = txe_next; ++ compl_tag = (compl_tag + 1) & (aim_txq->sw_size - 1); + } + break; + case GVE_COMPL_TYPE_DQO_MISS: +@@ -83,6 +90,8 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + uint16_t tx_id; + uint16_t sw_id; + uint64_t bytes; ++ uint16_t first_sw_id; ++ uint8_t csum; + + sw_ring = txq->sw_ring; + txr = txq->tx_ring; +@@ -107,32 +116,36 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + + ol_flags = tx_pkt->ol_flags; + nb_used = tx_pkt->nb_segs; ++ first_sw_id = sw_id; ++ ++ csum = !!(ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK_DQO); + + do { +- txd = &txr[tx_id]; ++ if (sw_ring[sw_id] != NULL) ++ PMD_DRV_LOG(DEBUG, "Overwriting an entry in sw_ring"); + ++ txd = &txr[tx_id]; + sw_ring[sw_id] = tx_pkt; + + /* fill Tx descriptor */ + txd->pkt.buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt)); + txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO; +- txd->pkt.compl_tag = rte_cpu_to_le_16(sw_id); ++ txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id); + txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, GVE_TX_MAX_BUF_SIZE_DQO); ++ txd->pkt.end_of_packet = 0; ++ txd->pkt.checksum_offload_enable = csum; + + /* size of desc_ring and sw_ring could be different */ + tx_id = (tx_id + 1) & mask; + sw_id = (sw_id + 1) & sw_mask; + +- bytes += tx_pkt->pkt_len; ++ bytes += tx_pkt->data_len; + tx_pkt = tx_pkt->next; + } while (tx_pkt); + + /* fill the last descriptor with End of Packet (EOP) bit */ + txd->pkt.end_of_packet = 1; + +- if (ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK) +- txd->pkt.checksum_offload_enable = 1; +- + txq->nb_free -= nb_used; + txq->nb_used += nb_used; + } +@@ -384,7 +397,7 @@ gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id) + + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); + +- dev->data->rx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; ++ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; + } +diff --git a/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c b/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c +index fecb653401..f0e1139a98 100644 +--- a/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c ++++ b/dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c +@@ -471,7 +471,7 @@ int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev) + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs < HINIC_MIN_AEQS) { +- PMD_DRV_LOG(ERR, "PMD need %d AEQs, Chip has %d\n", ++ PMD_DRV_LOG(ERR, "PMD need %d AEQs, Chip has %d", + HINIC_MIN_AEQS, num_aeqs); + return -EINVAL; + } +diff --git a/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c b/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c +index 92a7cc1a11..a75a6953ad 100644 +--- a/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c ++++ b/dpdk/drivers/net/hinic/base/hinic_pmd_mbox.c +@@ -310,7 +310,7 @@ static int mbox_msg_ack_aeqn(struct hinic_hwdev *hwdev) + /* This is used for ovs */ + msg_ack_aeqn = HINIC_AEQN_1; + } else { +- PMD_DRV_LOG(ERR, "Warning: Invalid aeq num: %d\n", aeq_num); ++ PMD_DRV_LOG(ERR, "Warning: Invalid aeq num: %d", aeq_num); + msg_ack_aeqn = -1; + } + +@@ -372,13 +372,13 @@ static int init_mbox_info(struct hinic_recv_mbox *mbox_info) + + mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->mbox) { +- PMD_DRV_LOG(ERR, "Alloc mbox buf_in mem failed\n"); ++ PMD_DRV_LOG(ERR, "Alloc mbox buf_in mem failed"); + return -ENOMEM; + } + + mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!mbox_info->buf_out) { +- PMD_DRV_LOG(ERR, "Alloc mbox buf_out mem failed\n"); ++ PMD_DRV_LOG(ERR, "Alloc mbox buf_out mem failed"); + err = -ENOMEM; + goto alloc_buf_out_err; + } +diff --git a/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c b/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c +index 8c08d63286..a08020313f 100644 +--- a/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c ++++ b/dpdk/drivers/net/hinic/base/hinic_pmd_niccfg.c +@@ -683,7 +683,7 @@ int hinic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err || !out_size || pause_info.mgmt_msg_head.status) { +- PMD_DRV_LOG(ERR, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x\n", ++ PMD_DRV_LOG(ERR, "Failed to get pause info, err: %d, status: 0x%x, out size: 0x%x", + err, pause_info.mgmt_msg_head.status, out_size); + return -EIO; + } +@@ -1332,7 +1332,7 @@ int hinic_get_mgmt_version(void *hwdev, char *fw) + &fw_ver, sizeof(fw_ver), &fw_ver, + &out_size); + if (err || !out_size || fw_ver.mgmt_msg_head.status) { +- PMD_DRV_LOG(ERR, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x\n", ++ PMD_DRV_LOG(ERR, "Failed to get mgmt version, err: %d, status: 0x%x, out size: 0x%x", + err, fw_ver.mgmt_msg_head.status, out_size); + return -EIO; + } +@@ -1767,7 +1767,7 @@ int hinic_set_fdir_filter(void *hwdev, u8 filter_type, u8 qid, u8 type_enable, + &port_filer_cmd, &out_size); + if (err || !out_size || port_filer_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set port Q filter failed, err: %d, status: 0x%x, out size: 0x%x, type: 0x%x," +- " enable: 0x%x, qid: 0x%x, filter_type_enable: 0x%x\n", ++ " enable: 0x%x, qid: 0x%x, filter_type_enable: 0x%x", + err, port_filer_cmd.mgmt_msg_head.status, out_size, + filter_type, enable, qid, type_enable); + return -EIO; +@@ -1819,7 +1819,7 @@ int hinic_set_normal_filter(void *hwdev, u8 qid, u8 normal_type_enable, + &port_filer_cmd, &out_size); + if (err || !out_size || port_filer_cmd.mgmt_msg_head.status) { + PMD_DRV_LOG(ERR, "Set normal filter failed, err: %d, status: 0x%x, out size: 0x%x, fdir_flag: 0x%x," +- " enable: 0x%x, qid: 0x%x, normal_type_enable: 0x%x, key:0x%x\n", ++ " enable: 0x%x, qid: 0x%x, normal_type_enable: 0x%x, key:0x%x", + err, port_filer_cmd.mgmt_msg_head.status, out_size, + flag, enable, qid, normal_type_enable, key); + return -EIO; +diff --git a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c +index d4978e0649..cb5c013b21 100644 +--- a/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c ++++ b/dpdk/drivers/net/hinic/hinic_pmd_ethdev.c +@@ -1914,7 +1914,7 @@ static int hinic_flow_ctrl_set(struct rte_eth_dev *dev, + nic_dev->nic_pause.rx_pause = nic_pause.rx_pause; + nic_dev->nic_pause.tx_pause = nic_pause.tx_pause; + +- PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n", ++ PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s", + nic_pause.tx_pause ? "on" : "off", + nic_pause.rx_pause ? "on" : "off", + nic_pause.auto_neg ? "on" : "off"); +@@ -2559,7 +2559,7 @@ static int hinic_pf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id) + + valid_cos_bitmap = hwdev->cfg_mgmt->svc_cap.valid_cos_bitmap; + if (!valid_cos_bitmap) { +- PMD_DRV_LOG(ERR, "PF has none cos to support\n"); ++ PMD_DRV_LOG(ERR, "PF has none cos to support"); + return -EFAULT; + } + +diff --git a/dpdk/drivers/net/hns3/hns3_common.c b/dpdk/drivers/net/hns3/hns3_common.c +index 8f224aa00c..7a36673c95 100644 +--- a/dpdk/drivers/net/hns3/hns3_common.c ++++ b/dpdk/drivers/net/hns3/hns3_common.c +@@ -85,7 +85,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info) + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | + RTE_ETH_TX_OFFLOAD_VLAN_INSERT); + +- if (!hw->port_base_vlan_cfg.state) ++ if (!hns->is_vf && !hw->port_base_vlan_cfg.state) + info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT; + + if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) +@@ -224,7 +224,7 @@ hns3_parse_dev_caps_mask(const char *key, const char *value, void *extra_args) + static int + hns3_parse_mbx_time_limit(const char *key, const char *value, void *extra_args) + { +- uint32_t val; ++ uint64_t val; + + RTE_SET_USED(key); + +@@ -308,7 +308,7 @@ hns3_parse_devargs(struct rte_eth_dev *dev) + &hns3_parse_mbx_time_limit, &mbx_time_limit_ms); + if (!hns->is_vf) + (void)rte_kvargs_process(kvlist, +- HNS3_DEVARG_FDIR_VALN_MATCH_MODE, ++ HNS3_DEVARG_FDIR_VLAN_MATCH_MODE, + &hns3_parse_vlan_match_mode, + &hns->pf.fdir.vlan_match_mode); + +diff --git a/dpdk/drivers/net/hns3/hns3_common.h b/dpdk/drivers/net/hns3/hns3_common.h +index cf9593bd0c..166852026f 100644 +--- a/dpdk/drivers/net/hns3/hns3_common.h ++++ b/dpdk/drivers/net/hns3/hns3_common.h +@@ -27,7 +27,7 @@ enum { + + #define HNS3_DEVARG_MBX_TIME_LIMIT_MS "mbx_time_limit_ms" + +-#define HNS3_DEVARG_FDIR_VALN_MATCH_MODE "fdir_vlan_match_mode" ++#define HNS3_DEVARG_FDIR_VLAN_MATCH_MODE "fdir_vlan_match_mode" + + #define MSEC_PER_SEC 1000L + #define USEC_PER_MSEC 1000L +diff --git a/dpdk/drivers/net/hns3/hns3_dcb.c b/dpdk/drivers/net/hns3/hns3_dcb.c +index 2831d3dc62..915e4eb768 100644 +--- a/dpdk/drivers/net/hns3/hns3_dcb.c ++++ b/dpdk/drivers/net/hns3/hns3_dcb.c +@@ -1499,7 +1499,6 @@ hns3_dcb_info_update(struct hns3_adapter *hns, uint8_t num_tc) + static int + hns3_dcb_hw_configure(struct hns3_adapter *hns) + { +- struct rte_eth_dcb_rx_conf *dcb_rx_conf; + struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; + enum hns3_fc_status fc_status = hw->current_fc_status; +@@ -1519,12 +1518,8 @@ hns3_dcb_hw_configure(struct hns3_adapter *hns) + } + + if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) { +- dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf; +- if (dcb_rx_conf->nb_tcs == 0) +- hw->dcb_info.pfc_en = 1; /* tc0 only */ +- else +- hw->dcb_info.pfc_en = +- RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t); ++ hw->dcb_info.pfc_en = ++ RTE_LEN2MASK((uint8_t)HNS3_MAX_USER_PRIO, uint8_t); + + hw->dcb_info.hw_pfc_map = + hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en); +diff --git a/dpdk/drivers/net/hns3/hns3_dump.c b/dpdk/drivers/net/hns3/hns3_dump.c +index cb369be5be..a3b58e0a8f 100644 +--- a/dpdk/drivers/net/hns3/hns3_dump.c ++++ b/dpdk/drivers/net/hns3/hns3_dump.c +@@ -242,7 +242,7 @@ hns3_get_rx_queue(struct rte_eth_dev *dev) + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) { + rx_queues = dev->data->rx_queues; + if (rx_queues == NULL || rx_queues[queue_id] == NULL) { +- hns3_err(hw, "detect rx_queues is NULL!\n"); ++ hns3_err(hw, "detect rx_queues is NULL!"); + return NULL; + } + +@@ -267,7 +267,7 @@ hns3_get_tx_queue(struct rte_eth_dev *dev) + for (queue_id = 0; queue_id < dev->data->nb_tx_queues; queue_id++) { + tx_queues = dev->data->tx_queues; + if (tx_queues == NULL || tx_queues[queue_id] == NULL) { +- hns3_err(hw, "detect tx_queues is NULL!\n"); ++ hns3_err(hw, "detect tx_queues is NULL!"); + return NULL; + } + +@@ -297,7 +297,7 @@ hns3_get_rxtx_fake_queue_info(FILE *file, struct rte_eth_dev *dev) + if (dev->data->nb_rx_queues < dev->data->nb_tx_queues) { + rx_queues = hw->fkq_data.rx_queues; + if (rx_queues == NULL || rx_queues[queue_id] == NULL) { +- hns3_err(hw, "detect rx_queues is NULL!\n"); ++ hns3_err(hw, "detect rx_queues is NULL!"); + return; + } + rxq = (struct hns3_rx_queue *)rx_queues[queue_id]; +@@ -311,7 +311,7 @@ hns3_get_rxtx_fake_queue_info(FILE *file, struct rte_eth_dev *dev) + queue_id = 0; + + if (tx_queues == NULL || tx_queues[queue_id] == NULL) { +- hns3_err(hw, "detect tx_queues is NULL!\n"); ++ hns3_err(hw, "detect tx_queues is NULL!"); + return; + } + txq = (struct hns3_tx_queue *)tx_queues[queue_id]; +@@ -961,7 +961,7 @@ hns3_rx_descriptor_dump(const struct rte_eth_dev *dev, uint16_t queue_id, + return -EINVAL; + + if (num > rxq->nb_rx_desc) { +- hns3_err(hw, "Invalid BD num=%u\n", num); ++ hns3_err(hw, "Invalid BD num=%u", num); + return -EINVAL; + } + +@@ -1003,7 +1003,7 @@ hns3_tx_descriptor_dump(const struct rte_eth_dev *dev, uint16_t queue_id, + return -EINVAL; + + if (num > txq->nb_tx_desc) { +- hns3_err(hw, "Invalid BD num=%u\n", num); ++ hns3_err(hw, "Invalid BD num=%u", num); + return -EINVAL; + } + +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c +index ae81368f68..dde27715c0 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev.c ++++ b/dpdk/drivers/net/hns3/hns3_ethdev.c +@@ -380,7 +380,7 @@ hns3_interrupt_handler(void *param) + hns3_warn(hw, "received reset interrupt"); + hns3_schedule_reset(hns); + } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { +- hns3_dev_handle_mbx_msg(hw); ++ hns3pf_handle_mbx_msg(hw); + } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { + hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " + "ras_int_stat:0x%x cmdq_int_stat:0x%x", +@@ -2738,6 +2738,7 @@ hns3_get_capability(struct hns3_hw *hw) + hw->rss_info.ipv6_sctp_offload_supported = false; + hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE; + pf->support_multi_tc_pause = false; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; + return 0; + } + +@@ -2758,6 +2759,7 @@ hns3_get_capability(struct hns3_hw *hw) + hw->rss_info.ipv6_sctp_offload_supported = true; + hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; + pf->support_multi_tc_pause = true; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; + + return 0; + } +@@ -5545,28 +5547,14 @@ is_pf_reset_done(struct hns3_hw *hw) + static enum hns3_reset_level + hns3_detect_reset_event(struct hns3_hw *hw) + { +- struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + enum hns3_reset_level new_req = HNS3_NONE_RESET; +- enum hns3_reset_level last_req; + uint32_t vector0_intr_state; + +- last_req = hns3_get_reset_level(hns, &hw->reset.pending); + vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); +- if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { +- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) + new_req = HNS3_IMP_RESET; +- } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { +- __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) + new_req = HNS3_GLOBAL_RESET; +- } +- +- if (new_req == HNS3_NONE_RESET) +- return HNS3_NONE_RESET; +- +- if (last_req == HNS3_NONE_RESET || last_req < new_req) { +- hns3_schedule_delayed_reset(hns); +- hns3_warn(hw, "High level reset detected, delay do reset"); +- } + + return new_req; + } +@@ -5586,10 +5574,14 @@ hns3_is_reset_pending(struct hns3_adapter *hns) + return false; + + new_req = hns3_detect_reset_event(hw); ++ if (new_req == HNS3_NONE_RESET) ++ return false; ++ + last_req = hns3_get_reset_level(hns, &hw->reset.pending); +- if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET && +- new_req < last_req) { +- hns3_warn(hw, "High level reset %d is pending", last_req); ++ if (last_req == HNS3_NONE_RESET || last_req < new_req) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ hns3_schedule_delayed_reset(hns); ++ hns3_warn(hw, "High level reset detected, delay do reset"); + return true; + } + last_req = hns3_get_reset_level(hns, &hw->reset.request); +@@ -6054,7 +6046,7 @@ hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa) + { + struct hns3_sfp_info_cmd *resp; + uint32_t tmp_fec_capa; +- uint8_t auto_state; ++ uint8_t auto_state = 0; + struct hns3_cmd_desc desc; + int ret; + +@@ -6675,7 +6667,8 @@ RTE_PMD_REGISTER_PARAM_STRING(net_hns3, + HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common " + HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common " + HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> " +- HNS3_DEVARG_MBX_TIME_LIMIT_MS "= "); ++ HNS3_DEVARG_MBX_TIME_LIMIT_MS "= " ++ HNS3_DEVARG_FDIR_VLAN_MATCH_MODE "=strict|nostrict "); + RTE_LOG_REGISTER_SUFFIX(hns3_logtype_init, init, NOTICE); + RTE_LOG_REGISTER_SUFFIX(hns3_logtype_driver, driver, NOTICE); + #ifdef RTE_ETHDEV_DEBUG_RX +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h +index 12d8299def..00d226d71c 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev.h ++++ b/dpdk/drivers/net/hns3/hns3_ethdev.h +@@ -485,6 +485,9 @@ struct hns3_queue_intr { + #define HNS3_PKTS_DROP_STATS_MODE1 0 + #define HNS3_PKTS_DROP_STATS_MODE2 1 + ++#define HNS3_RX_DMA_ADDR_ALIGN_128 128 ++#define HNS3_RX_DMA_ADDR_ALIGN_64 64 ++ + struct hns3_hw { + struct rte_eth_dev_data *data; + void *io_base; +@@ -552,6 +555,11 @@ struct hns3_hw { + * direction. + */ + uint8_t min_tx_pkt_len; ++ /* ++ * The required alignment of the DMA address of the RX buffer. ++ * See HNS3_RX_DMA_ADDR_ALIGN_XXX for available values. ++ */ ++ uint16_t rx_dma_addr_align; + + struct hns3_queue_intr intr; + /* +diff --git a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +index 916cc0fb1b..d4d691ad86 100644 +--- a/dpdk/drivers/net/hns3/hns3_ethdev_vf.c ++++ b/dpdk/drivers/net/hns3/hns3_ethdev_vf.c +@@ -91,11 +91,13 @@ hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + /* mac address was checked by upper level interface */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, +- RTE_ETHER_ADDR_LEN, false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_ADD); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -110,12 +112,13 @@ hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr) + { + /* mac address was checked by upper level interface */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_REMOVE, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, +- false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_REMOVE); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -134,6 +137,7 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *old_addr; + uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */ + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + + /* +@@ -146,9 +150,10 @@ hns3vf_set_default_mac_addr(struct rte_eth_dev *dev, + memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes, + RTE_ETHER_ADDR_LEN); + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, +- HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes, +- HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, ++ HNS3_MBX_MAC_VLAN_UC_MODIFY); ++ memcpy(req.data, addr_bytes, HNS3_TWO_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) { + /* + * The hns3 VF PMD depends on the hns3 PF kernel ethdev +@@ -185,12 +190,13 @@ hns3vf_add_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, +- HNS3_MBX_MAC_VLAN_MC_ADD, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, ++ HNS3_MBX_MAC_VLAN_MC_ADD); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -206,12 +212,13 @@ hns3vf_remove_mc_mac_addr(struct hns3_hw *hw, + struct rte_ether_addr *mac_addr) + { + char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, +- HNS3_MBX_MAC_VLAN_MC_REMOVE, +- mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, ++ HNS3_MBX_MAC_VLAN_MC_REMOVE); ++ memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, + mac_addr); +@@ -254,11 +261,12 @@ hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc, + * the packets with vlan tag in promiscuous mode. + */ + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); +- req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; +- req->msg[1] = en_bc_pmc ? 1 : 0; +- req->msg[2] = en_uc_pmc ? 1 : 0; +- req->msg[3] = en_mc_pmc ? 1 : 0; +- req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; ++ req->msg.code = HNS3_MBX_SET_PROMISC_MODE; ++ req->msg.en_bc = en_bc_pmc ? 1 : 0; ++ req->msg.en_uc = en_uc_pmc ? 1 : 0; ++ req->msg.en_mc = en_mc_pmc ? 1 : 0; ++ req->msg.en_limit_promisc = ++ hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) +@@ -347,30 +355,26 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id, + bool mmap, enum hns3_ring_type queue_type, + uint16_t queue_id) + { +- struct hns3_vf_bind_vector_msg bind_msg; ++ struct hns3_vf_to_pf_msg req = {0}; + const char *op_str; +- uint16_t code; + int ret; + +- memset(&bind_msg, 0, sizeof(bind_msg)); +- code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : ++ req.code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : + HNS3_MBX_UNMAP_RING_TO_VECTOR; +- bind_msg.vector_id = (uint8_t)vector_id; ++ req.vector_id = (uint8_t)vector_id; ++ req.ring_num = 1; + + if (queue_type == HNS3_RING_TYPE_RX) +- bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX; ++ req.ring_param[0].int_gl_index = HNS3_RING_GL_RX; + else +- bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX; +- +- bind_msg.param[0].ring_type = queue_type; +- bind_msg.ring_num = 1; +- bind_msg.param[0].tqp_index = queue_id; ++ req.ring_param[0].int_gl_index = HNS3_RING_GL_TX; ++ req.ring_param[0].ring_type = queue_type; ++ req.ring_param[0].tqp_index = queue_id; + op_str = mmap ? "Map" : "Unmap"; +- ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, +- sizeof(bind_msg), false, NULL, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) +- hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", +- op_str, queue_id, bind_msg.vector_id, ret); ++ hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret = %d.", ++ op_str, queue_id, req.vector_id, ret); + + return ret; + } +@@ -453,10 +457,12 @@ cfg_err: + static int + hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu) + { ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu, +- sizeof(mtu), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_MTU, 0); ++ memcpy(req.data, &mtu, sizeof(mtu)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret); + +@@ -563,13 +569,8 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + val = hns3_read_dev(hw, HNS3_VF_RST_ING); + hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); + val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); +- if (clearval) { +- hw->reset.stats.global_cnt++; +- hns3_warn(hw, "Global reset detected, clear reset status"); +- } else { +- hns3_schedule_delayed_reset(hns); +- hns3_warn(hw, "Global reset detected, don't clear reset status"); +- } ++ hw->reset.stats.global_cnt++; ++ hns3_warn(hw, "Global reset detected, clear reset status"); + + ret = HNS3VF_VECTOR0_EVENT_RST; + goto out; +@@ -584,9 +585,9 @@ hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval) + + val = 0; + ret = HNS3VF_VECTOR0_EVENT_OTHER; ++ + out: +- if (clearval) +- *clearval = val; ++ *clearval = val; + return ret; + } + +@@ -612,7 +613,7 @@ hns3vf_interrupt_handler(void *param) + hns3_schedule_reset(hns); + break; + case HNS3VF_VECTOR0_EVENT_MBX: +- hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); + break; + default: + break; +@@ -647,12 +648,13 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) + uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED; + uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); ++ struct hns3_vf_to_pf_msg req; + + __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, + __ATOMIC_RELEASE); + +- (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); ++ (void)hns3vf_mbx_send(hw, &req, false, NULL, 0); + + while (remain_ms > 0) { + rte_delay_ms(HNS3_POLL_RESPONE_MS); +@@ -663,7 +665,7 @@ hns3vf_get_push_lsc_cap(struct hns3_hw *hw) + * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE + * mailbox from PF driver to get this capability. + */ +- hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); + if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != + HNS3_PF_PUSH_LSC_CAP_UNKNOWN) + break; +@@ -705,6 +707,7 @@ hns3vf_get_capability(struct hns3_hw *hw) + hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN; + hw->rss_info.ipv6_sctp_offload_supported = false; + hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64; + return 0; + } + +@@ -722,6 +725,7 @@ hns3vf_get_capability(struct hns3_hw *hw) + hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2; + hw->rss_info.ipv6_sctp_offload_supported = true; + hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; ++ hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128; + + return 0; + } +@@ -747,12 +751,13 @@ hns3vf_check_tqp_info(struct hns3_hw *hw) + static int + hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t resp_msg; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, +- HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0, +- true, &resp_msg, sizeof(resp_msg)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_GET_PORT_BASE_VLAN_STATE); ++ ret = hns3vf_mbx_send(hw, &req, true, &resp_msg, sizeof(resp_msg)); + if (ret) { + if (ret == -ETIME) { + /* +@@ -793,10 +798,12 @@ hns3vf_get_queue_info(struct hns3_hw *hw) + { + #define HNS3VF_TQPS_RSS_INFO_LEN 6 + uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true, +- resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_QINFO, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, ++ resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret); + return ret; +@@ -834,10 +841,11 @@ hns3vf_get_basic_info(struct hns3_hw *hw) + { + uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE]; + struct hns3_basic_info *basic_info; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0, +- true, resp_msg, sizeof(resp_msg)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_BASIC_INFO, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, resp_msg, sizeof(resp_msg)); + if (ret) { + hns3_err(hw, "failed to get basic info from PF, ret = %d.", + ret); +@@ -857,10 +865,11 @@ static int + hns3vf_get_host_mac_addr(struct hns3_hw *hw) + { + uint8_t host_mac[RTE_ETHER_ADDR_LEN]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0, +- true, host_mac, RTE_ETHER_ADDR_LEN); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_MAC_ADDR, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, host_mac, RTE_ETHER_ADDR_LEN); + if (ret) { + hns3_err(hw, "Failed to get mac addr from PF: %d", ret); + return ret; +@@ -909,6 +918,7 @@ static void + hns3vf_request_link_info(struct hns3_hw *hw) + { + struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); ++ struct hns3_vf_to_pf_msg req; + bool send_req; + int ret; + +@@ -920,8 +930,8 @@ hns3vf_request_link_info(struct hns3_hw *hw) + if (!send_req) + return; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, +- NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) { + hns3_err(hw, "failed to fetch link status, ret = %d", ret); + return; +@@ -965,19 +975,18 @@ hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, + static int + hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) + { +-#define HNS3VF_VLAN_MBX_MSG_LEN 5 ++ struct hns3_mbx_vlan_filter *vlan_filter; ++ struct hns3_vf_to_pf_msg req = {0}; + struct hns3_hw *hw = &hns->hw; +- uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN]; +- uint16_t proto = htons(RTE_ETHER_TYPE_VLAN); +- uint8_t is_kill = on ? 0 : 1; + +- msg_data[0] = is_kill; +- memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); +- memcpy(&msg_data[3], &proto, sizeof(proto)); ++ req.code = HNS3_MBX_SET_VLAN; ++ req.subcode = HNS3_MBX_VLAN_FILTER; ++ vlan_filter = (struct hns3_mbx_vlan_filter *)req.data; ++ vlan_filter->is_kill = on ? 0 : 1; ++ vlan_filter->proto = rte_cpu_to_le_16(RTE_ETHER_TYPE_VLAN); ++ vlan_filter->vlan_id = rte_cpu_to_le_16(vlan_id); + +- return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER, +- msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL, +- 0); ++ return hns3vf_mbx_send(hw, &req, true, NULL, 0); + } + + static int +@@ -1006,6 +1015,7 @@ hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + static int + hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + int ret; + +@@ -1013,9 +1023,10 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + return 0; + + msg_data = enable ? 1 : 0; +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, +- HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data, +- sizeof(msg_data), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_ENABLE_VLAN_FILTER); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "%s vlan filter failed, ret = %d.", + enable ? "enable" : "disable", ret); +@@ -1026,12 +1037,15 @@ hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) + static int + hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + int ret; + + msg_data = enable ? 1 : 0; +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, +- &msg_data, sizeof(msg_data), false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, ++ HNS3_MBX_VLAN_RX_OFF_CFG); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) + hns3_err(hw, "vf %s strip failed, ret = %d.", + enable ? "enable" : "disable", ret); +@@ -1175,11 +1189,13 @@ hns3vf_dev_configure_vlan(struct rte_eth_dev *dev) + static int + hns3vf_set_alive(struct hns3_hw *hw, bool alive) + { ++ struct hns3_vf_to_pf_msg req; + uint8_t msg_data; + + msg_data = alive ? 1 : 0; +- return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data, +- sizeof(msg_data), false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_SET_ALIVE, 0); ++ memcpy(req.data, &msg_data, sizeof(msg_data)); ++ return hns3vf_mbx_send(hw, &req, false, NULL, 0); + } + + static void +@@ -1187,11 +1203,12 @@ hns3vf_keep_alive_handler(void *param) + { + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct hns3_adapter *hns = eth_dev->data->dev_private; ++ struct hns3_vf_to_pf_msg req; + struct hns3_hw *hw = &hns->hw; + int ret; + +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, +- false, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_KEEP_ALIVE, 0); ++ ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); + if (ret) + hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", + ret); +@@ -1330,9 +1347,11 @@ err_init_hardware: + static int + hns3vf_clear_vport_list(struct hns3_hw *hw) + { +- return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL, +- HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false, +- NULL, 0); ++ struct hns3_vf_to_pf_msg req; ++ ++ hns3vf_mbx_setup(&req, HNS3_MBX_HANDLE_VF_TBL, ++ HNS3_MBX_VPORT_LIST_CLEAR); ++ return hns3vf_mbx_send(hw, &req, false, NULL, 0); + } + + static int +@@ -1709,11 +1728,25 @@ is_vf_reset_done(struct hns3_hw *hw) + return true; + } + ++static enum hns3_reset_level ++hns3vf_detect_reset_event(struct hns3_hw *hw) ++{ ++ enum hns3_reset_level reset = HNS3_NONE_RESET; ++ uint32_t cmdq_stat_reg; ++ ++ cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); ++ if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) ++ reset = HNS3_VF_RESET; ++ ++ return reset; ++} ++ + bool + hns3vf_is_reset_pending(struct hns3_adapter *hns) + { ++ enum hns3_reset_level last_req; + struct hns3_hw *hw = &hns->hw; +- enum hns3_reset_level reset; ++ enum hns3_reset_level new_req; + + /* + * According to the protocol of PCIe, FLR to a PF device resets the PF +@@ -1736,13 +1769,18 @@ hns3vf_is_reset_pending(struct hns3_adapter *hns) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return false; + +- hns3vf_check_event_cause(hns, NULL); +- reset = hns3vf_get_reset_level(hw, &hw->reset.pending); +- if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && +- hw->reset.level < reset) { +- hns3_warn(hw, "High level reset %d is pending", reset); ++ new_req = hns3vf_detect_reset_event(hw); ++ if (new_req == HNS3_NONE_RESET) ++ return false; ++ ++ last_req = hns3vf_get_reset_level(hw, &hw->reset.pending); ++ if (last_req == HNS3_NONE_RESET || last_req < new_req) { ++ __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); ++ hns3_schedule_delayed_reset(hns); ++ hns3_warn(hw, "High level reset detected, delay do reset"); + return true; + } ++ + return false; + } + +@@ -1801,12 +1839,13 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns) + static int + hns3vf_prepare_reset(struct hns3_adapter *hns) + { ++ struct hns3_vf_to_pf_msg req; + struct hns3_hw *hw = &hns->hw; + int ret; + + if (hw->reset.level == HNS3_VF_FUNC_RESET) { +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, +- 0, true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_RESET, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + return ret; + } +diff --git a/dpdk/drivers/net/hns3/hns3_fdir.c b/dpdk/drivers/net/hns3/hns3_fdir.c +index d100e58d10..75a200c713 100644 +--- a/dpdk/drivers/net/hns3/hns3_fdir.c ++++ b/dpdk/drivers/net/hns3/hns3_fdir.c +@@ -836,6 +836,7 @@ int hns3_fdir_filter_init(struct hns3_adapter *hns) + .key_len = sizeof(struct hns3_fdir_key_conf), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, ++ .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE, + }; + int ret; + +diff --git a/dpdk/drivers/net/hns3/hns3_flow.c b/dpdk/drivers/net/hns3/hns3_flow.c +index 7fbe65313c..db318854af 100644 +--- a/dpdk/drivers/net/hns3/hns3_flow.c ++++ b/dpdk/drivers/net/hns3/hns3_flow.c +@@ -283,7 +283,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id, + cnt = hns3_counter_lookup(dev, id); + if (cnt) { + if (!cnt->indirect || cnt->indirect != indirect) +- return rte_flow_error_set(error, ENOTSUP, ++ return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + cnt, + "Counter id is used, indirect flag not match"); +@@ -1182,6 +1182,11 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + "Tunnel packets must configure " + "with mask"); + ++ if (rule->key_conf.spec.tunnel_type != 0) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, "Too many tunnel headers!"); ++ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: +diff --git a/dpdk/drivers/net/hns3/hns3_intr.c b/dpdk/drivers/net/hns3/hns3_intr.c +index 916bf30dcb..3f6b9e7fc4 100644 +--- a/dpdk/drivers/net/hns3/hns3_intr.c ++++ b/dpdk/drivers/net/hns3/hns3_intr.c +@@ -1806,7 +1806,7 @@ enable_tm_err_intr(struct hns3_adapter *hns, bool en) + + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) +- hns3_err(hw, "fail to %s TM QCN mem errors, ret = %d\n", ++ hns3_err(hw, "fail to %s TM QCN mem errors, ret = %d", + en ? "enable" : "disable", ret); + + return ret; +@@ -1847,7 +1847,7 @@ enable_common_err_intr(struct hns3_adapter *hns, bool en) + + ret = hns3_cmd_send(hw, &desc[0], RTE_DIM(desc)); + if (ret) +- hns3_err(hw, "fail to %s common err interrupts, ret = %d\n", ++ hns3_err(hw, "fail to %s common err interrupts, ret = %d", + en ? "enable" : "disable", ret); + + return ret; +@@ -1984,7 +1984,7 @@ query_num_bds(struct hns3_hw *hw, bool is_ras, uint32_t *mpf_bd_num, + pf_bd_num_val = rte_le_to_cpu_32(desc.data[1]); + if (mpf_bd_num_val < mpf_min_bd_num || pf_bd_num_val < pf_min_bd_num) { + hns3_err(hw, "error bd num: mpf(%u), min_mpf(%u), " +- "pf(%u), min_pf(%u)\n", mpf_bd_num_val, mpf_min_bd_num, ++ "pf(%u), min_pf(%u)", mpf_bd_num_val, mpf_min_bd_num, + pf_bd_num_val, pf_min_bd_num); + return -EINVAL; + } +@@ -2061,7 +2061,7 @@ hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc, + opcode = HNS3_OPC_QUERY_CLEAR_PF_RAS_INT; + break; + default: +- hns3_err(hw, "error hardware err_type = %d\n", err_type); ++ hns3_err(hw, "error hardware err_type = %d", err_type); + return -EINVAL; + } + +@@ -2069,7 +2069,7 @@ hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc, + hns3_cmd_setup_basic_desc(&desc[0], opcode, true); + ret = hns3_cmd_send(hw, &desc[0], num); + if (ret) { +- hns3_err(hw, "query hw err int 0x%x cmd failed, ret = %d\n", ++ hns3_err(hw, "query hw err int 0x%x cmd failed, ret = %d", + opcode, ret); + return ret; + } +@@ -2097,7 +2097,7 @@ hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc, + hns3_cmd_reuse_desc(&desc[0], false); + ret = hns3_cmd_send(hw, &desc[0], num); + if (ret) +- hns3_err(hw, "clear all hw err int cmd failed, ret = %d\n", ++ hns3_err(hw, "clear all hw err int cmd failed, ret = %d", + ret); + + return ret; +@@ -2252,6 +2252,12 @@ hns3_handle_module_error_data(struct hns3_hw *hw, uint32_t *buf, + sum_err_info = (struct hns3_sum_err_info *)&buf[offset++]; + mod_num = sum_err_info->mod_num; + reset_type = sum_err_info->reset_type; ++ ++ if (reset_type >= HNS3_MAX_RESET) { ++ hns3_err(hw, "invalid reset type = %u", reset_type); ++ return; ++ } ++ + if (reset_type && reset_type != HNS3_NONE_RESET) + hns3_atomic_set_bit(reset_type, &hw->reset.request); + +diff --git a/dpdk/drivers/net/hns3/hns3_mbx.c b/dpdk/drivers/net/hns3/hns3_mbx.c +index f1743c195e..9cdbc1668a 100644 +--- a/dpdk/drivers/net/hns3/hns3_mbx.c ++++ b/dpdk/drivers/net/hns3/hns3_mbx.c +@@ -11,8 +11,6 @@ + #include "hns3_intr.h" + #include "hns3_rxtx.h" + +-#define HNS3_CMD_CODE_OFFSET 2 +- + static const struct errno_respcode_map err_code_map[] = { + {0, 0}, + {1, -EPERM}, +@@ -26,6 +24,14 @@ static const struct errno_respcode_map err_code_map[] = { + {95, -EOPNOTSUPP}, + }; + ++void ++hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, uint8_t code, uint8_t subcode) ++{ ++ memset(req, 0, sizeof(struct hns3_vf_to_pf_msg)); ++ req->code = code; ++ req->subcode = subcode; ++} ++ + static int + hns3_resp_to_errno(uint16_t resp_code) + { +@@ -72,7 +78,7 @@ hns3_get_mbx_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + return -EIO; + } + +- hns3_dev_handle_mbx_msg(hw); ++ hns3vf_handle_mbx_msg(hw); + rte_delay_us(HNS3_WAIT_RESP_US); + + if (hw->mbx_resp.received_match_resp) +@@ -120,44 +126,24 @@ hns3_mbx_prepare_resp(struct hns3_hw *hw, uint16_t code, uint16_t subcode) + } + + int +-hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +- const uint8_t *msg_data, uint8_t msg_len, bool need_resp, +- uint8_t *resp_data, uint16_t resp_len) ++hns3vf_mbx_send(struct hns3_hw *hw, ++ struct hns3_vf_to_pf_msg *req, bool need_resp, ++ uint8_t *resp_data, uint16_t resp_len) + { +- struct hns3_mbx_vf_to_pf_cmd *req; ++ struct hns3_mbx_vf_to_pf_cmd *cmd; + struct hns3_cmd_desc desc; +- bool is_ring_vector_msg; +- int offset; + int ret; + +- req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; +- +- /* first two bytes are reserved for code & subcode */ +- if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) { +- hns3_err(hw, +- "VF send mbx msg fail, msg len %u exceeds max payload len %d", +- msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET); +- return -EINVAL; +- } +- + hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); +- req->msg[0] = code; +- is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) || +- (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) || +- (code == HNS3_MBX_GET_RING_VECTOR_MAP); +- if (!is_ring_vector_msg) +- req->msg[1] = subcode; +- if (msg_data) { +- offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET; +- memcpy(&req->msg[offset], msg_data, msg_len); +- } ++ cmd = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; ++ cmd->msg = *req; + + /* synchronous send */ + if (need_resp) { +- req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; ++ cmd->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; + rte_spinlock_lock(&hw->mbx_resp.lock); +- hns3_mbx_prepare_resp(hw, code, subcode); +- req->match_id = hw->mbx_resp.match_id; ++ hns3_mbx_prepare_resp(hw, req->code, req->subcode); ++ cmd->match_id = hw->mbx_resp.match_id; + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) { + rte_spinlock_unlock(&hw->mbx_resp.lock); +@@ -166,7 +152,8 @@ hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, + return ret; + } + +- ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len); ++ ret = hns3_get_mbx_resp(hw, req->code, req->subcode, ++ resp_data, resp_len); + rte_spinlock_unlock(&hw->mbx_resp.lock); + } else { + /* asynchronous send */ +@@ -193,17 +180,17 @@ static void + hns3vf_handle_link_change_event(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { ++ struct hns3_mbx_link_status *link_info = ++ (struct hns3_mbx_link_status *)req->msg.msg_data; + uint8_t link_status, link_duplex; +- uint16_t *msg_q = req->msg; + uint8_t support_push_lsc; + uint32_t link_speed; + +- memcpy(&link_speed, &msg_q[2], sizeof(link_speed)); +- link_status = rte_le_to_cpu_16(msg_q[1]); +- link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); +- hns3vf_update_link_status(hw, link_status, link_speed, +- link_duplex); +- support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u; ++ link_status = (uint8_t)rte_le_to_cpu_16(link_info->link_status); ++ link_speed = rte_le_to_cpu_32(link_info->speed); ++ link_duplex = (uint8_t)rte_le_to_cpu_16(link_info->duplex); ++ hns3vf_update_link_status(hw, link_status, link_speed, link_duplex); ++ support_push_lsc = (link_info->flag) & 1u; + hns3vf_update_push_lsc_cap(hw, support_push_lsc); + } + +@@ -212,7 +199,6 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { + enum hns3_reset_level reset_level; +- uint16_t *msg_q = req->msg; + + /* + * PF has asserted reset hence VF should go in pending +@@ -220,7 +206,7 @@ hns3_handle_asserting_reset(struct hns3_hw *hw, + * has been completely reset. After this stack should + * eventually be re-initialized. + */ +- reset_level = rte_le_to_cpu_16(msg_q[1]); ++ reset_level = rte_le_to_cpu_16(req->msg.reset_level); + hns3_atomic_set_bit(reset_level, &hw->reset.pending); + + hns3_warn(hw, "PF inform reset level %d", reset_level); +@@ -242,8 +228,9 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + * to match the request. + */ + if (req->match_id == resp->match_id) { +- resp->resp_status = hns3_resp_to_errno(req->msg[3]); +- memcpy(resp->additional_info, &req->msg[4], ++ resp->resp_status = ++ hns3_resp_to_errno(req->msg.resp_status); ++ memcpy(resp->additional_info, &req->msg.resp_data, + HNS3_MBX_MAX_RESP_DATA_SIZE); + rte_io_wmb(); + resp->received_match_resp = true; +@@ -256,7 +243,8 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + * support copy request's match_id to its response. So VF follows the + * original scheme to process. + */ +- msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2]; ++ msg_data = (uint32_t)req->msg.vf_mbx_msg_code << ++ HNS3_MBX_RESP_CODE_OFFSET | req->msg.vf_mbx_msg_subcode; + if (resp->req_msg_data != msg_data) { + hns3_warn(hw, + "received response tag (%u) is mismatched with requested tag (%u)", +@@ -264,8 +252,8 @@ hns3_handle_mbx_response(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) + return; + } + +- resp->resp_status = hns3_resp_to_errno(req->msg[3]); +- memcpy(resp->additional_info, &req->msg[4], ++ resp->resp_status = hns3_resp_to_errno(req->msg.resp_status); ++ memcpy(resp->additional_info, &req->msg.resp_data, + HNS3_MBX_MAX_RESP_DATA_SIZE); + rte_io_wmb(); + resp->received_match_resp = true; +@@ -296,11 +284,8 @@ static void + hns3pf_handle_link_change_event(struct hns3_hw *hw, + struct hns3_mbx_vf_to_pf_cmd *req) + { +-#define LINK_STATUS_OFFSET 1 +-#define LINK_FAIL_CODE_OFFSET 2 +- +- if (!req->msg[LINK_STATUS_OFFSET]) +- hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]); ++ if (!req->msg.link_status) ++ hns3_link_fail_parse(hw, req->msg.link_fail_code); + + hns3_update_linkstatus_and_event(hw, true); + } +@@ -309,8 +294,7 @@ static void + hns3_update_port_base_vlan_info(struct hns3_hw *hw, + struct hns3_mbx_pf_to_vf_cmd *req) + { +-#define PVID_STATE_OFFSET 1 +- uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ? ++ uint16_t new_pvid_state = req->msg.pvid_state ? + HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; + /* + * Currently, hardware doesn't support more than two layers VLAN offload +@@ -359,7 +343,7 @@ hns3_handle_mbx_msg_out_intr(struct hns3_hw *hw) + while (next_to_use != tail) { + desc = &crq->desc[next_to_use]; + req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; +- opcode = req->msg[0] & 0xff; ++ opcode = req->msg.code & 0xff; + + flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag); + if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B)) +@@ -388,9 +372,57 @@ scan_next: + } + + void +-hns3_dev_handle_mbx_msg(struct hns3_hw *hw) ++hns3pf_handle_mbx_msg(struct hns3_hw *hw) ++{ ++ struct hns3_cmq_ring *crq = &hw->cmq.crq; ++ struct hns3_mbx_vf_to_pf_cmd *req; ++ struct hns3_cmd_desc *desc; ++ uint16_t flag; ++ ++ rte_spinlock_lock(&hw->cmq.crq.lock); ++ ++ while (!hns3_cmd_crq_empty(hw)) { ++ if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { ++ rte_spinlock_unlock(&hw->cmq.crq.lock); ++ return; ++ } ++ desc = &crq->desc[crq->next_to_use]; ++ req = (struct hns3_mbx_vf_to_pf_cmd *)desc->data; ++ ++ flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); ++ if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { ++ hns3_warn(hw, ++ "dropped invalid mailbox message, code = %u", ++ req->msg.code); ++ ++ /* dropping/not processing this invalid message */ ++ crq->desc[crq->next_to_use].flag = 0; ++ hns3_mbx_ring_ptr_move_crq(crq); ++ continue; ++ } ++ ++ switch (req->msg.code) { ++ case HNS3_MBX_PUSH_LINK_STATUS: ++ hns3pf_handle_link_change_event(hw, req); ++ break; ++ default: ++ hns3_err(hw, "received unsupported(%u) mbx msg", ++ req->msg.code); ++ break; ++ } ++ crq->desc[crq->next_to_use].flag = 0; ++ hns3_mbx_ring_ptr_move_crq(crq); ++ } ++ ++ /* Write back CMDQ_RQ header pointer, IMP need this pointer */ ++ hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); ++ ++ rte_spinlock_unlock(&hw->cmq.crq.lock); ++} ++ ++void ++hns3vf_handle_mbx_msg(struct hns3_hw *hw) + { +- struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_cmq_ring *crq = &hw->cmq.crq; + struct hns3_mbx_pf_to_vf_cmd *req; + struct hns3_cmd_desc *desc; +@@ -401,7 +433,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + rte_spinlock_lock(&hw->cmq.crq.lock); + + handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY || +- !rte_thread_is_intr()) && hns->is_vf; ++ !rte_thread_is_intr()); + if (handle_out) { + /* + * Currently, any threads in the primary and secondary processes +@@ -432,7 +464,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + + desc = &crq->desc[crq->next_to_use]; + req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; +- opcode = req->msg[0] & 0xff; ++ opcode = req->msg.code & 0xff; + + flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); + if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { +@@ -446,8 +478,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + continue; + } + +- handle_out = hns->is_vf && desc->opcode == 0; +- if (handle_out) { ++ if (desc->opcode == 0) { + /* Message already processed by other thread */ + crq->desc[crq->next_to_use].flag = 0; + hns3_mbx_ring_ptr_move_crq(crq); +@@ -464,16 +495,6 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + case HNS3_MBX_ASSERTING_RESET: + hns3_handle_asserting_reset(hw, req); + break; +- case HNS3_MBX_PUSH_LINK_STATUS: +- /* +- * This message is reported by the firmware and is +- * reported in 'struct hns3_mbx_vf_to_pf_cmd' format. +- * Therefore, we should cast the req variable to +- * 'struct hns3_mbx_vf_to_pf_cmd' and then process it. +- */ +- hns3pf_handle_link_change_event(hw, +- (struct hns3_mbx_vf_to_pf_cmd *)req); +- break; + case HNS3_MBX_PUSH_VLAN_INFO: + /* + * When the PVID configuration status of VF device is +@@ -488,7 +509,7 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw) + * hns3 PF kernel driver, VF driver will receive this + * mailbox message from PF driver. + */ +- hns3_handle_promisc_info(hw, req->msg[1]); ++ hns3_handle_promisc_info(hw, req->msg.promisc_en); + break; + default: + hns3_err(hw, "received unsupported(%u) mbx msg", +diff --git a/dpdk/drivers/net/hns3/hns3_mbx.h b/dpdk/drivers/net/hns3/hns3_mbx.h +index 4a328802b9..2b6cb8f513 100644 +--- a/dpdk/drivers/net/hns3/hns3_mbx.h ++++ b/dpdk/drivers/net/hns3/hns3_mbx.h +@@ -89,7 +89,6 @@ enum hns3_mbx_link_fail_subcode { + HNS3_MBX_LF_XSFP_ABSENT, + }; + +-#define HNS3_MBX_MAX_MSG_SIZE 16 + #define HNS3_MBX_MAX_RESP_DATA_SIZE 8 + #define HNS3_MBX_DEF_TIME_LIMIT_MS 500 + +@@ -107,6 +106,69 @@ struct hns3_mbx_resp_status { + uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE]; + }; + ++struct hns3_ring_chain_param { ++ uint8_t ring_type; ++ uint8_t tqp_index; ++ uint8_t int_gl_index; ++}; ++ ++struct hns3_mbx_vlan_filter { ++ uint8_t is_kill; ++ uint16_t vlan_id; ++ uint16_t proto; ++} __rte_packed; ++ ++struct hns3_mbx_link_status { ++ uint16_t link_status; ++ uint32_t speed; ++ uint16_t duplex; ++ uint8_t flag; ++} __rte_packed; ++ ++#define HNS3_MBX_MSG_MAX_DATA_SIZE 14 ++#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 ++struct hns3_vf_to_pf_msg { ++ uint8_t code; ++ union { ++ struct { ++ uint8_t subcode; ++ uint8_t data[HNS3_MBX_MSG_MAX_DATA_SIZE]; ++ }; ++ struct { ++ uint8_t en_bc; ++ uint8_t en_uc; ++ uint8_t en_mc; ++ uint8_t en_limit_promisc; ++ }; ++ struct { ++ uint8_t vector_id; ++ uint8_t ring_num; ++ struct hns3_ring_chain_param ++ ring_param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; ++ }; ++ struct { ++ uint8_t link_status; ++ uint8_t link_fail_code; ++ }; ++ }; ++}; ++ ++struct hns3_pf_to_vf_msg { ++ uint16_t code; ++ union { ++ struct { ++ uint16_t vf_mbx_msg_code; ++ uint16_t vf_mbx_msg_subcode; ++ uint16_t resp_status; ++ uint8_t resp_data[HNS3_MBX_MAX_RESP_DATA_SIZE]; ++ }; ++ uint16_t promisc_en; ++ uint16_t reset_level; ++ uint16_t pvid_state; ++ uint8_t msg_data[HNS3_MBX_MSG_MAX_DATA_SIZE]; ++ }; ++}; ++ + struct errno_respcode_map { + uint16_t resp_code; + int err_no; +@@ -122,7 +184,7 @@ struct hns3_mbx_vf_to_pf_cmd { + uint8_t msg_len; + uint8_t rsv2; + uint16_t match_id; +- uint8_t msg[HNS3_MBX_MAX_MSG_SIZE]; ++ struct hns3_vf_to_pf_msg msg; + }; + + struct hns3_mbx_pf_to_vf_cmd { +@@ -131,20 +193,7 @@ struct hns3_mbx_pf_to_vf_cmd { + uint8_t msg_len; + uint8_t rsv1; + uint16_t match_id; +- uint16_t msg[8]; +-}; +- +-struct hns3_ring_chain_param { +- uint8_t ring_type; +- uint8_t tqp_index; +- uint8_t int_gl_index; +-}; +- +-#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 +-struct hns3_vf_bind_vector_msg { +- uint8_t vector_id; +- uint8_t ring_num; +- struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; ++ struct hns3_pf_to_vf_msg msg; + }; + + struct hns3_pf_rst_done_cmd { +@@ -158,8 +207,11 @@ struct hns3_pf_rst_done_cmd { + ((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num) + + struct hns3_hw; +-void hns3_dev_handle_mbx_msg(struct hns3_hw *hw); +-int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, +- const uint8_t *msg_data, uint8_t msg_len, bool need_resp, +- uint8_t *resp_data, uint16_t resp_len); ++void hns3pf_handle_mbx_msg(struct hns3_hw *hw); ++void hns3vf_handle_mbx_msg(struct hns3_hw *hw); ++void hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, ++ uint8_t code, uint8_t subcode); ++int hns3vf_mbx_send(struct hns3_hw *hw, ++ struct hns3_vf_to_pf_msg *req_msg, bool need_resp, ++ uint8_t *resp_data, uint16_t resp_len); + #endif /* HNS3_MBX_H */ +diff --git a/dpdk/drivers/net/hns3/hns3_ptp.c b/dpdk/drivers/net/hns3/hns3_ptp.c +index 894ac6dd71..c6e77d21cb 100644 +--- a/dpdk/drivers/net/hns3/hns3_ptp.c ++++ b/dpdk/drivers/net/hns3/hns3_ptp.c +@@ -50,7 +50,7 @@ hns3_ptp_int_en(struct hns3_hw *hw, bool en) + ret = hns3_cmd_send(hw, &desc, 1); + if (ret) + hns3_err(hw, +- "failed to %s ptp interrupt, ret = %d\n", ++ "failed to %s ptp interrupt, ret = %d", + en ? "enable" : "disable", ret); + + return ret; +diff --git a/dpdk/drivers/net/hns3/hns3_regs.c b/dpdk/drivers/net/hns3/hns3_regs.c +index be1be6a89c..8c0c0a3027 100644 +--- a/dpdk/drivers/net/hns3/hns3_regs.c ++++ b/dpdk/drivers/net/hns3/hns3_regs.c +@@ -17,13 +17,9 @@ + + static int hns3_get_dfx_reg_line(struct hns3_hw *hw, uint32_t *lines); + +-static const uint32_t cmdq_reg_addrs[] = {HNS3_CMDQ_TX_ADDR_L_REG, +- HNS3_CMDQ_TX_ADDR_H_REG, +- HNS3_CMDQ_TX_DEPTH_REG, ++static const uint32_t cmdq_reg_addrs[] = {HNS3_CMDQ_TX_DEPTH_REG, + HNS3_CMDQ_TX_TAIL_REG, + HNS3_CMDQ_TX_HEAD_REG, +- HNS3_CMDQ_RX_ADDR_L_REG, +- HNS3_CMDQ_RX_ADDR_H_REG, + HNS3_CMDQ_RX_DEPTH_REG, + HNS3_CMDQ_RX_TAIL_REG, + HNS3_CMDQ_RX_HEAD_REG, +@@ -44,9 +40,7 @@ static const uint32_t common_vf_reg_addrs[] = {HNS3_MISC_VECTOR_REG_BASE, + HNS3_FUN_RST_ING, + HNS3_GRO_EN_REG}; + +-static const uint32_t ring_reg_addrs[] = {HNS3_RING_RX_BASEADDR_L_REG, +- HNS3_RING_RX_BASEADDR_H_REG, +- HNS3_RING_RX_BD_NUM_REG, ++static const uint32_t ring_reg_addrs[] = {HNS3_RING_RX_BD_NUM_REG, + HNS3_RING_RX_BD_LEN_REG, + HNS3_RING_RX_EN_REG, + HNS3_RING_RX_MERGE_EN_REG, +@@ -57,8 +51,6 @@ static const uint32_t ring_reg_addrs[] = {HNS3_RING_RX_BASEADDR_L_REG, + HNS3_RING_RX_FBD_OFFSET_REG, + HNS3_RING_RX_STASH_REG, + HNS3_RING_RX_BD_ERR_REG, +- HNS3_RING_TX_BASEADDR_L_REG, +- HNS3_RING_TX_BASEADDR_H_REG, + HNS3_RING_TX_BD_NUM_REG, + HNS3_RING_TX_EN_REG, + HNS3_RING_TX_PRIORITY_REG, +@@ -135,7 +127,7 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length) + tqp_intr_lines = sizeof(tqp_intr_reg_addrs) / REG_LEN_PER_LINE + 1; + + len = (cmdq_lines + common_lines + ring_lines * hw->tqps_num + +- tqp_intr_lines * hw->num_msi) * REG_NUM_PER_LINE; ++ tqp_intr_lines * hw->intr_tqps_num) * REG_NUM_PER_LINE; + + if (!hns->is_vf) { + ret = hns3_get_regs_num(hw, ®s_num_32_bit, ®s_num_64_bit); +@@ -355,7 +347,7 @@ hns3_get_dfx_reg_bd_num(struct hns3_hw *hw, uint32_t *bd_num_list, + + ret = hns3_cmd_send(hw, desc, HNS3_GET_DFX_REG_BD_NUM_SIZE); + if (ret) { +- hns3_err(hw, "fail to get dfx bd num, ret = %d.\n", ret); ++ hns3_err(hw, "fail to get dfx bd num, ret = %d.", ret); + return ret; + } + +@@ -387,7 +379,7 @@ hns3_dfx_reg_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, + ret = hns3_cmd_send(hw, desc, bd_num); + if (ret) + hns3_err(hw, "fail to query dfx registers, opcode = 0x%04X, " +- "ret = %d.\n", opcode, ret); ++ "ret = %d.", opcode, ret); + + return ret; + } +diff --git a/dpdk/drivers/net/hns3/hns3_rss.c b/dpdk/drivers/net/hns3/hns3_rss.c +index 15feb26043..3eae4caf52 100644 +--- a/dpdk/drivers/net/hns3/hns3_rss.c ++++ b/dpdk/drivers/net/hns3/hns3_rss.c +@@ -153,8 +153,7 @@ static const struct { + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER), ++ BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D), + HNS3_RSS_TUPLE_IPV4_SCTP_M }, + + /* IPV6-FRAG */ +@@ -274,8 +273,7 @@ static const struct { + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) | +- BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER), ++ BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S), + HNS3_RSS_TUPLE_IPV6_SCTP_M }, + }; + +diff --git a/dpdk/drivers/net/hns3/hns3_rss.h b/dpdk/drivers/net/hns3/hns3_rss.h +index 9d182a8025..0755760b45 100644 +--- a/dpdk/drivers/net/hns3/hns3_rss.h ++++ b/dpdk/drivers/net/hns3/hns3_rss.h +@@ -49,7 +49,6 @@ enum hns3_tuple_field { + HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S, + HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D, + HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S, +- HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER, + + /* IPV4 ENABLE FIELD */ + HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D = 24, +@@ -74,7 +73,6 @@ enum hns3_tuple_field { + HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S, + HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, + HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, +- HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, + + /* IPV6 ENABLE FIELD */ + HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D = 56, +@@ -96,12 +94,12 @@ enum hns3_tuple_field { + + #define HNS3_RSS_TUPLE_IPV4_TCP_M GENMASK(3, 0) + #define HNS3_RSS_TUPLE_IPV4_UDP_M GENMASK(11, 8) +-#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(20, 16) ++#define HNS3_RSS_TUPLE_IPV4_SCTP_M GENMASK(19, 16) + #define HNS3_RSS_TUPLE_IPV4_NONF_M GENMASK(25, 24) + #define HNS3_RSS_TUPLE_IPV4_FLAG_M GENMASK(27, 26) + #define HNS3_RSS_TUPLE_IPV6_TCP_M GENMASK(35, 32) + #define HNS3_RSS_TUPLE_IPV6_UDP_M GENMASK(43, 40) +-#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(52, 48) ++#define HNS3_RSS_TUPLE_IPV6_SCTP_M GENMASK(51, 48) + #define HNS3_RSS_TUPLE_IPV6_NONF_M GENMASK(57, 56) + #define HNS3_RSS_TUPLE_IPV6_FLAG_M GENMASK(59, 58) + +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c +index 09b7e90c70..8d0db134d2 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx.c ++++ b/dpdk/drivers/net/hns3/hns3_rxtx.c +@@ -86,9 +86,14 @@ hns3_rx_queue_release(void *queue) + struct hns3_rx_queue *rxq = queue; + if (rxq) { + hns3_rx_queue_release_mbufs(rxq); +- if (rxq->mz) ++ if (rxq->mz) { + rte_memzone_free(rxq->mz); +- rte_free(rxq->sw_ring); ++ rxq->mz = NULL; ++ } ++ if (rxq->sw_ring) { ++ rte_free(rxq->sw_ring); ++ rxq->sw_ring = NULL; ++ } + rte_free(rxq); + } + } +@@ -99,10 +104,18 @@ hns3_tx_queue_release(void *queue) + struct hns3_tx_queue *txq = queue; + if (txq) { + hns3_tx_queue_release_mbufs(txq); +- if (txq->mz) ++ if (txq->mz) { + rte_memzone_free(txq->mz); +- rte_free(txq->sw_ring); +- rte_free(txq->free); ++ txq->mz = NULL; ++ } ++ if (txq->sw_ring) { ++ rte_free(txq->sw_ring); ++ txq->sw_ring = NULL; ++ } ++ if (txq->free) { ++ rte_free(txq->free); ++ txq->free = NULL; ++ } + rte_free(txq); + } + } +@@ -260,12 +273,27 @@ hns3_free_all_queues(struct rte_eth_dev *dev) + hns3_free_tx_queues(dev); + } + ++static int ++hns3_check_rx_dma_addr(struct hns3_hw *hw, uint64_t dma_addr) ++{ ++ uint64_t rem; ++ ++ rem = dma_addr & (hw->rx_dma_addr_align - 1); ++ if (rem > 0) { ++ hns3_err(hw, "The IO address of the beginning of the mbuf data " ++ "must be %u-byte aligned", hw->rx_dma_addr_align); ++ return -EINVAL; ++ } ++ return 0; ++} ++ + static int + hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) + { + struct rte_mbuf *mbuf; + uint64_t dma_addr; + uint16_t i; ++ int ret; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); +@@ -286,6 +314,12 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq) + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + rxq->rx_ring[i].addr = dma_addr; + rxq->rx_ring[i].rx.bd_base_info = 0; ++ ++ ret = hns3_check_rx_dma_addr(hw, dma_addr); ++ if (ret != 0) { ++ hns3_rx_queue_release_mbufs(rxq); ++ return ret; ++ } + } + + return 0; +@@ -686,13 +720,12 @@ tqp_reset_fail: + static int + hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) + { +- uint8_t msg_data[2]; ++ struct hns3_vf_to_pf_msg req; + int ret; + +- memcpy(msg_data, &queue_id, sizeof(uint16_t)); +- +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, +- sizeof(msg_data), true, NULL, 0); ++ hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); ++ memcpy(req.data, &queue_id, sizeof(uint16_t)); ++ ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); + if (ret) + hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", + queue_id, ret); +@@ -769,15 +802,14 @@ static int + hns3vf_reset_all_tqps(struct hns3_hw *hw) + { + #define HNS3VF_RESET_ALL_TQP_DONE 1U ++ struct hns3_vf_to_pf_msg req; + uint8_t reset_status; +- uint8_t msg_data[2]; + int ret; + uint16_t i; + +- memset(msg_data, 0, sizeof(msg_data)); +- ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, +- sizeof(msg_data), true, &reset_status, +- sizeof(reset_status)); ++ hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); ++ ret = hns3vf_mbx_send(hw, &req, true, ++ &reset_status, sizeof(reset_status)); + if (ret) { + hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret); + return ret; +@@ -2390,8 +2422,7 @@ hns3_rx_ptp_timestamp_handle(struct hns3_rx_queue *rxq, struct rte_mbuf *mbuf, + { + struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(rxq->hns); + +- mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP | +- RTE_MBUF_F_RX_IEEE1588_TMST; ++ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST; + if (hns3_timestamp_rx_dynflag > 0) { + *RTE_MBUF_DYNFIELD(mbuf, hns3_timestamp_dynfield_offset, + rte_mbuf_timestamp_t *) = timestamp; +@@ -2670,6 +2701,7 @@ hns3_recv_scattered_pkts(void *rx_queue, + continue; + } + ++ first_seg->ol_flags = 0; + if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) + hns3_rx_ptp_timestamp_handle(rxq, first_seg, timestamp); + +@@ -2699,7 +2731,7 @@ hns3_recv_scattered_pkts(void *rx_queue, + + first_seg->port = rxq->port_id; + first_seg->hash.rss = rte_le_to_cpu_32(rxd.rx.rss_hash); +- first_seg->ol_flags = RTE_MBUF_F_RX_RSS_HASH; ++ first_seg->ol_flags |= RTE_MBUF_F_RX_RSS_HASH; + if (unlikely(bd_base_info & BIT(HNS3_RXD_LUM_B))) { + first_seg->hash.fdir.hi = + rte_le_to_cpu_16(rxd.rx.fd_id); +@@ -3617,58 +3649,6 @@ hns3_pkt_need_linearized(struct rte_mbuf *tx_pkts, uint32_t bd_num, + return false; + } + +-static bool +-hns3_outer_ipv4_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, +- uint32_t *l4_proto) +-{ +- struct rte_ipv4_hdr *ipv4_hdr; +- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, +- m->outer_l2_len); +- if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) +- ipv4_hdr->hdr_checksum = 0; +- if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { +- struct rte_udp_hdr *udp_hdr; +- /* +- * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo +- * header for TSO packets +- */ +- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) +- return true; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + m->outer_l3_len); +- udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); +- +- return true; +- } +- *l4_proto = ipv4_hdr->next_proto_id; +- return false; +-} +- +-static bool +-hns3_outer_ipv6_cksum_prepared(struct rte_mbuf *m, uint64_t ol_flags, +- uint32_t *l4_proto) +-{ +- struct rte_ipv6_hdr *ipv6_hdr; +- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, +- m->outer_l2_len); +- if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) { +- struct rte_udp_hdr *udp_hdr; +- /* +- * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo +- * header for TSO packets +- */ +- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) +- return true; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + m->outer_l3_len); +- udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); +- +- return true; +- } +- *l4_proto = ipv6_hdr->proto; +- return false; +-} +- + static void + hns3_outer_header_cksum_prepare(struct rte_mbuf *m) + { +@@ -3676,29 +3656,38 @@ hns3_outer_header_cksum_prepare(struct rte_mbuf *m) + uint32_t paylen, hdr_len, l4_proto; + struct rte_udp_hdr *udp_hdr; + +- if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6))) ++ if (!(ol_flags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)) && ++ ((ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) || ++ !(ol_flags & RTE_MBUF_F_TX_TCP_SEG))) + return; + + if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) { +- if (hns3_outer_ipv4_cksum_prepared(m, ol_flags, &l4_proto)) +- return; ++ struct rte_ipv4_hdr *ipv4_hdr; ++ ++ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *, ++ m->outer_l2_len); ++ l4_proto = ipv4_hdr->next_proto_id; + } else { +- if (hns3_outer_ipv6_cksum_prepared(m, ol_flags, &l4_proto)) +- return; ++ struct rte_ipv6_hdr *ipv6_hdr; ++ ++ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *, ++ m->outer_l2_len); ++ l4_proto = ipv6_hdr->proto; + } + ++ if (l4_proto != IPPROTO_UDP) ++ return; ++ + /* driver should ensure the outer udp cksum is 0 for TUNNEL TSO */ +- if (l4_proto == IPPROTO_UDP && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) { +- hdr_len = m->l2_len + m->l3_len + m->l4_len; +- hdr_len += m->outer_l2_len + m->outer_l3_len; +- paylen = m->pkt_len - hdr_len; +- if (paylen <= m->tso_segsz) +- return; +- udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, +- m->outer_l2_len + +- m->outer_l3_len); +- udp_hdr->dgram_cksum = 0; +- } ++ hdr_len = m->l2_len + m->l3_len + m->l4_len; ++ hdr_len += m->outer_l2_len + m->outer_l3_len; ++ paylen = m->pkt_len - hdr_len; ++ if (paylen <= m->tso_segsz) ++ return; ++ udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *, ++ m->outer_l2_len + ++ m->outer_l3_len); ++ udp_hdr->dgram_cksum = 0; + } + + static int +diff --git a/dpdk/drivers/net/i40e/base/i40e_adminq.c b/dpdk/drivers/net/i40e/base/i40e_adminq.c +index 27c82d9b44..cd3b0f2e45 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_adminq.c ++++ b/dpdk/drivers/net/i40e/base/i40e_adminq.c +@@ -791,12 +791,26 @@ u16 i40e_clean_asq(struct i40e_hw *hw) + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; ++ u32 head = 0; ++ ++ if (ntc >= (1 << 10)) ++ goto clean_asq_exit; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); +- while (rd32(hw, hw->aq.asq.head) != ntc) { ++ while (true) { ++ head = rd32(hw, hw->aq.asq.head); ++ ++ if (head >= asq->count) { ++ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "Read head value is improper\n"); ++ return 0; ++ } ++ ++ if (head == ntc) ++ break; ++ + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, +- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); ++ "ntc %d head %d.\n", ntc, head); + + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = +@@ -816,6 +830,7 @@ u16 i40e_clean_asq(struct i40e_hw *hw) + + asq->next_to_clean = ntc; + ++clean_asq_exit: + return I40E_DESC_UNUSED(asq); + } + +diff --git a/dpdk/drivers/net/i40e/base/i40e_common.c b/dpdk/drivers/net/i40e/base/i40e_common.c +index ab655a0a72..547f5e3c2c 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_common.c ++++ b/dpdk/drivers/net/i40e/base/i40e_common.c +@@ -1019,9 +1019,6 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw) + else + hw->pf_id = (u8)(func_rid & 0x7); + +- if (hw->mac.type == I40E_MAC_X722) +- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | +- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + /* NVMUpdate features structure initialization */ + hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR; + hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR; +@@ -1590,7 +1587,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) + **/ + u32 i40e_led_get(struct i40e_hw *hw) + { +- u32 current_mode = 0; + u32 mode = 0; + int i; + +@@ -1603,21 +1599,6 @@ u32 i40e_led_get(struct i40e_hw *hw) + if (!gpio_val) + continue; + +- /* ignore gpio LED src mode entries related to the activity +- * LEDs +- */ +- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) +- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); +- switch (current_mode) { +- case I40E_COMBINED_ACTIVITY: +- case I40E_FILTER_ACTIVITY: +- case I40E_MAC_ACTIVITY: +- case I40E_LINK_ACTIVITY: +- continue; +- default: +- break; +- } +- + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; + break; +@@ -1637,7 +1618,6 @@ u32 i40e_led_get(struct i40e_hw *hw) + **/ + void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) + { +- u32 current_mode = 0; + int i; + + if (mode & ~I40E_LED_MODE_VALID) { +@@ -1654,21 +1634,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) + if (!gpio_val) + continue; + +- /* ignore gpio LED src mode entries related to the activity +- * LEDs +- */ +- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) +- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); +- switch (current_mode) { +- case I40E_COMBINED_ACTIVITY: +- case I40E_FILTER_ACTIVITY: +- case I40E_MAC_ACTIVITY: +- case I40E_LINK_ACTIVITY: +- continue; +- default: +- break; +- } +- + if (I40E_IS_X710TL_DEVICE(hw->device_id)) { + u32 pin_func = 0; + +@@ -4263,8 +4228,8 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, + /* use AQ read to get the physical register offset instead + * of the port relative offset + */ +- i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); +- if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) ++ status = i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); ++ if ((status == I40E_SUCCESS) && (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))) + hw->num_ports++; + } + +@@ -8203,7 +8168,8 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 sec_off; + u32 i; + +- if (track_id == I40E_DDP_TRACKID_INVALID) { ++ if (track_id == I40E_DDP_TRACKID_INVALID || ++ track_id == I40E_DDP_TRACKID_RDONLY) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); + return I40E_NOT_SUPPORTED; + } +diff --git a/dpdk/drivers/net/i40e/base/i40e_devids.h b/dpdk/drivers/net/i40e/base/i40e_devids.h +index ee31e51f57..37d7ee9939 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_devids.h ++++ b/dpdk/drivers/net/i40e/base/i40e_devids.h +@@ -42,7 +42,8 @@ + #define I40E_DEV_ID_10G_SFP 0x104E + #define I40E_IS_X710TL_DEVICE(d) \ + (((d) == I40E_DEV_ID_10G_BASE_T_BC) || \ +- ((d) == I40E_DEV_ID_5G_BASE_T_BC)) ++ ((d) == I40E_DEV_ID_5G_BASE_T_BC) || \ ++ ((d) == I40E_DEV_ID_1G_BASE_T_BC)) + #define I40E_DEV_ID_KX_X722 0x37CE + #define I40E_DEV_ID_QSFP_X722 0x37CF + #define I40E_DEV_ID_SFP_X722 0x37D0 +diff --git a/dpdk/drivers/net/i40e/base/i40e_diag.c b/dpdk/drivers/net/i40e/base/i40e_diag.c +index b3c4cfd3aa..4ca102cdd5 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_diag.c ++++ b/dpdk/drivers/net/i40e/base/i40e_diag.c +@@ -55,7 +55,7 @@ static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw, + return I40E_SUCCESS; + } + +-static struct i40e_diag_reg_test_info i40e_reg_list[] = { ++static const struct i40e_diag_reg_test_info i40e_reg_list[] = { + /* offset mask elements stride */ + {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, + {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, +@@ -81,28 +81,28 @@ enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw) + { + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 reg, mask; ++ u32 elements; + u32 i, j; + + for (i = 0; i40e_reg_list[i].offset != 0 && + ret_code == I40E_SUCCESS; i++) { + ++ elements = i40e_reg_list[i].elements; + /* set actual reg range for dynamically allocated resources */ + if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && + hw->func_caps.num_tx_qp != 0) +- i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; ++ elements = hw->func_caps.num_tx_qp; + if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || + i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || + i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && + hw->func_caps.num_msix_vectors != 0) +- i40e_reg_list[i].elements = +- hw->func_caps.num_msix_vectors - 1; ++ elements = hw->func_caps.num_msix_vectors - 1; + + /* test register access */ + mask = i40e_reg_list[i].mask; +- for (j = 0; j < i40e_reg_list[i].elements && +- ret_code == I40E_SUCCESS; j++) { ++ for (j = 0; j < elements && ret_code == I40E_SUCCESS; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); + ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); +diff --git a/dpdk/drivers/net/i40e/base/i40e_nvm.c b/dpdk/drivers/net/i40e/base/i40e_nvm.c +index f385042601..05816a4b79 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_nvm.c ++++ b/dpdk/drivers/net/i40e/base/i40e_nvm.c +@@ -223,11 +223,11 @@ read_nvm_exit: + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start +- * @words: number of words to write +- * @data: buffer with words to write to the Shadow RAM ++ * @words: number of words to read ++ * @data: buffer with words to read from the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * +- * Writes a 16 bit words buffer to the Shadow RAM using the admin command. ++ * Reads a 16 bit words buffer to the Shadow RAM using the admin command. + **/ + STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, + u8 module_pointer, u32 offset, +@@ -249,18 +249,18 @@ STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, +- "NVM write error: offset %d beyond Shadow RAM limit %d\n", ++ "NVM read error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) +- /* We can write only up to 4KB (one sector), in one AQ write */ ++ /* We can read only up to 4KB (one sector), in one AQ read */ + i40e_debug(hw, I40E_DEBUG_NVM, +- "NVM write fail error: tried to write %d words, limit is %d.\n", ++ "NVM read fail error: tried to read %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) +- /* A single write cannot spread over two sectors */ ++ /* A single read cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, +- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", ++ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_read_nvm(hw, module_pointer, +diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c +index 3ca226156b..2b043cd693 100644 +--- a/dpdk/drivers/net/i40e/i40e_ethdev.c ++++ b/dpdk/drivers/net/i40e/i40e_ethdev.c +@@ -653,7 +653,7 @@ eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + + if (eth_da.nb_representor_ports > 0 && + eth_da.type != RTE_ETH_REPRESENTOR_VF) { +- PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", ++ PMD_DRV_LOG(ERR, "unsupported representor type: %s", + pci_dev->device.devargs->args); + return -ENOTSUP; + } +@@ -1480,10 +1480,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) + + val = I40E_READ_REG(hw, I40E_GL_FWSTS); + if (val & I40E_GL_FWSTS_FWS1B_MASK) { +- PMD_INIT_LOG(ERR, "\nERROR: " +- "Firmware recovery mode detected. Limiting functionality.\n" +- "Refer to the Intel(R) Ethernet Adapters and Devices " +- "User Guide for details on firmware recovery mode."); ++ PMD_INIT_LOG(ERR, "ERROR: Firmware recovery mode detected. Limiting functionality."); + return -EIO; + } + +@@ -2222,7 +2219,7 @@ i40e_phy_conf_link(struct i40e_hw *hw, + status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, + NULL); + if (status) { +- PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n", ++ PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d", + status); + return ret; + } +@@ -2232,7 +2229,7 @@ i40e_phy_conf_link(struct i40e_hw *hw, + status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab, + NULL); + if (status) { +- PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n", ++ PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d", + status); + return ret; + } +@@ -2257,7 +2254,7 @@ i40e_phy_conf_link(struct i40e_hw *hw, + * Warn users and config the default available speeds. + */ + if (is_up && !(force_speed & avail_speed)) { +- PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n"); ++ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!"); + phy_conf.link_speed = avail_speed; + } else { + phy_conf.link_speed = is_up ? force_speed : avail_speed; +@@ -3724,8 +3721,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | + dev_info->tx_queue_offload_capa; ++ if (hw->mac.type == I40E_MAC_X722) { ++ dev_info->tx_offload_capa |= ++ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ } ++ + dev_info->dev_capa = + RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | + RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; +@@ -6810,7 +6811,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) + I40E_GL_MDET_TX_QUEUE_SHIFT) - + hw->func_caps.base_queue; + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on TX " +- "queue %d PF number 0x%02x VF number 0x%02x device %s\n", ++ "queue %d PF number 0x%02x VF number 0x%02x device %s", + event, queue, pf_num, vf_num, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_TX, I40E_MDD_CLEAR32); + mdd_detected = true; +@@ -6826,7 +6827,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) + hw->func_caps.base_queue; + + PMD_DRV_LOG(WARNING, "Malicious Driver Detection event 0x%02x on RX " +- "queue %d of function 0x%02x device %s\n", ++ "queue %d of function 0x%02x device %s", + event, queue, func, dev->data->name); + I40E_WRITE_REG(hw, I40E_GL_MDET_RX, I40E_MDD_CLEAR32); + mdd_detected = true; +@@ -6836,13 +6837,13 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) + reg = I40E_READ_REG(hw, I40E_PF_MDET_TX); + if (reg & I40E_PF_MDET_TX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_TX, I40E_MDD_CLEAR16); +- PMD_DRV_LOG(WARNING, "TX driver issue detected on PF\n"); ++ PMD_DRV_LOG(WARNING, "TX driver issue detected on PF"); + } + reg = I40E_READ_REG(hw, I40E_PF_MDET_RX); + if (reg & I40E_PF_MDET_RX_VALID_MASK) { + I40E_WRITE_REG(hw, I40E_PF_MDET_RX, + I40E_MDD_CLEAR16); +- PMD_DRV_LOG(WARNING, "RX driver issue detected on PF\n"); ++ PMD_DRV_LOG(WARNING, "RX driver issue detected on PF"); + } + } + +@@ -6855,7 +6856,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "TX driver issue detected on VF %d %-" +- PRIu64 "times\n", ++ PRIu64 "times", + i, vf->num_mdd_events); + } + +@@ -6865,7 +6866,7 @@ i40e_handle_mdd_event(struct rte_eth_dev *dev) + I40E_MDD_CLEAR16); + vf->num_mdd_events++; + PMD_DRV_LOG(WARNING, "RX driver issue detected on VF %d %-" +- PRIu64 "times\n", ++ PRIu64 "times", + i, vf->num_mdd_events); + } + } +@@ -11300,7 +11301,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, + if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { + PMD_DRV_LOG(ERR, + "Module EEPROM memory read not supported. " +- "Please update the NVM image.\n"); ++ "Please update the NVM image."); + return -EINVAL; + } + +@@ -11311,7 +11312,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, + if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { + PMD_DRV_LOG(ERR, + "Cannot read module EEPROM memory. " +- "No module connected.\n"); ++ "No module connected."); + return -EINVAL; + } + +@@ -11341,7 +11342,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, + if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) { + PMD_DRV_LOG(WARNING, + "Module address swap to access " +- "page 0xA2 is not supported.\n"); ++ "page 0xA2 is not supported."); + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else if (sff8472_comp == 0x00) { +@@ -11377,7 +11378,7 @@ static int i40e_get_module_info(struct rte_eth_dev *dev, + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + break; + default: +- PMD_DRV_LOG(ERR, "Module type unrecognized\n"); ++ PMD_DRV_LOG(ERR, "Module type unrecognized"); + return -EINVAL; + } + return 0; +@@ -11679,7 +11680,7 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg, + } + } + name[strlen(name) - 1] = '\0'; +- PMD_DRV_LOG(INFO, "name = %s\n", name); ++ PMD_DRV_LOG(INFO, "name = %s", name); + if (!strcmp(name, "GTPC")) + new_pctype = + i40e_find_customized_pctype(pf, +@@ -11823,7 +11824,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, + continue; + memset(name, 0, sizeof(name)); + strcpy(name, proto[n].name); +- PMD_DRV_LOG(INFO, "name = %s\n", name); ++ PMD_DRV_LOG(INFO, "name = %s", name); + if (!strncasecmp(name, "PPPOE", 5)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L2_ETHER_PPPOE; +diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c +index 877e49151e..273cb2d80c 100644 +--- a/dpdk/drivers/net/i40e/i40e_flow.c ++++ b/dpdk/drivers/net/i40e/i40e_flow.c +@@ -1263,27 +1263,31 @@ i40e_flow_parse_attr(const struct rte_flow_attr *attr, + return 0; + } + +-static uint16_t +-i40e_get_outer_vlan(struct rte_eth_dev *dev) ++static int ++i40e_get_outer_vlan(struct rte_eth_dev *dev, uint16_t *tpid) + { + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int qinq = dev->data->dev_conf.rxmode.offloads & + RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; + uint64_t reg_r = 0; + uint16_t reg_id; +- uint16_t tpid; ++ int ret; + + if (qinq) + reg_id = 2; + else + reg_id = 3; + +- i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), ++ ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ®_r, NULL); ++ if (ret != I40E_SUCCESS) { ++ PMD_DRV_LOG(ERR, "Failed to read from L2 tag ctrl register [%d]", reg_id); ++ return -EIO; ++ } + +- tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF; ++ *tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF; + +- return tpid; ++ return 0; + } + + /* 1. Last in item should be NULL as range is not supported. +@@ -1303,6 +1307,8 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + enum rte_flow_item_type item_type; ++ int ret; ++ uint16_t tpid; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { +@@ -1361,8 +1367,23 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6 || +- filter->ether_type == RTE_ETHER_TYPE_LLDP || +- filter->ether_type == i40e_get_outer_vlan(dev)) { ++ filter->ether_type == RTE_ETHER_TYPE_LLDP) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Unsupported ether_type in control packet filter."); ++ return -rte_errno; ++ } ++ ++ ret = i40e_get_outer_vlan(dev, &tpid); ++ if (ret != 0) { ++ rte_flow_error_set(error, EIO, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Can not get the Ethertype identifying the L2 tag"); ++ return -rte_errno; ++ } ++ if (filter->ether_type == tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, +@@ -1370,6 +1391,7 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, + " control packet filter."); + return -rte_errno; + } ++ + break; + default: + break; +@@ -1641,6 +1663,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + bool outer_ip = true; + uint8_t field_idx; + int ret; ++ uint16_t tpid; + + memset(off_arr, 0, sizeof(off_arr)); + memset(len_arr, 0, sizeof(len_arr)); +@@ -1708,16 +1731,30 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + + ether_type = rte_be_to_cpu_16(eth_spec->hdr.ether_type); + +- if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || +- ether_type == RTE_ETHER_TYPE_IPV4 || +- ether_type == RTE_ETHER_TYPE_IPV6 || +- ether_type == i40e_get_outer_vlan(dev)) { ++ if (ether_type == RTE_ETHER_TYPE_IPV4 || ++ ether_type == RTE_ETHER_TYPE_IPV6) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Unsupported ether_type."); ++ return -rte_errno; ++ } ++ ret = i40e_get_outer_vlan(dev, &tpid); ++ if (ret != 0) { ++ rte_flow_error_set(error, EIO, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Can not get the Ethertype identifying the L2 tag"); ++ return -rte_errno; ++ } ++ if (ether_type == tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ether_type."); + return -rte_errno; + } ++ + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + eth_spec->hdr.ether_type; +@@ -1764,14 +1801,29 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + rte_be_to_cpu_16(vlan_spec->hdr.eth_proto); + + if (ether_type == RTE_ETHER_TYPE_IPV4 || +- ether_type == RTE_ETHER_TYPE_IPV6 || +- ether_type == i40e_get_outer_vlan(dev)) { ++ ether_type == RTE_ETHER_TYPE_IPV6) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported inner_type."); + return -rte_errno; + } ++ ret = i40e_get_outer_vlan(dev, &tpid); ++ if (ret != 0) { ++ rte_flow_error_set(error, EIO, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Can not get the Ethertype identifying the L2 tag"); ++ return -rte_errno; ++ } ++ if (ether_type == tpid) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Unsupported ether_type."); ++ return -rte_errno; ++ } ++ + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + vlan_spec->hdr.eth_proto; +diff --git a/dpdk/drivers/net/i40e/i40e_pf.c b/dpdk/drivers/net/i40e/i40e_pf.c +index 15d9ff868f..4a47a8f7ee 100644 +--- a/dpdk/drivers/net/i40e/i40e_pf.c ++++ b/dpdk/drivers/net/i40e/i40e_pf.c +@@ -1280,17 +1280,17 @@ i40e_pf_host_process_cmd_request_queues(struct i40e_pf_vf *vf, uint8_t *msg) + req_pairs = i40e_align_floor(req_pairs) << 1; + + if (req_pairs == 0) { +- PMD_DRV_LOG(ERR, "VF %d tried to request 0 queues. Ignoring.\n", ++ PMD_DRV_LOG(ERR, "VF %d tried to request 0 queues. Ignoring.", + vf->vf_idx); + } else if (req_pairs > I40E_MAX_QP_NUM_PER_VF) { + PMD_DRV_LOG(ERR, +- "VF %d tried to request more than %d queues.\n", ++ "VF %d tried to request more than %d queues.", + vf->vf_idx, + I40E_MAX_QP_NUM_PER_VF); + vfres->num_queue_pairs = I40E_MAX_QP_NUM_PER_VF; + } else if (req_pairs > cur_pairs + pf->qp_pool.num_free) { + PMD_DRV_LOG(ERR, "VF %d requested %d queues (rounded to %d) " +- "but only %d available\n", ++ "but only %d available", + vf->vf_idx, + vfres->num_queue_pairs, + req_pairs, +@@ -1550,7 +1550,7 @@ check: + if (first_cycle && cur_cycle < first_cycle + + (uint64_t)pf->vf_msg_cfg.period * rte_get_timer_hz()) { + PMD_DRV_LOG(WARNING, "VF %u too much messages(%u in %u" +- " seconds),\n\tany new message from which" ++ " seconds), any new message from which" + " will be ignored during next %u seconds!", + vf_id, pf->vf_msg_cfg.max_msg, + (uint32_t)((cur_cycle - first_cycle + +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c +index 9aa5facb53..e65e8829d9 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx.c +@@ -295,6 +295,15 @@ i40e_parse_tunneling_params(uint64_t ol_flags, + */ + *cd_tunneling |= (tx_offload.l2_len >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; ++ ++ /** ++ * Calculate the tunneling UDP checksum (only supported with X722). ++ * Shall be set only if L4TUNT = 01b and EIPT is not zero ++ */ ++ if ((*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK) && ++ (*cd_tunneling & I40E_TXD_CTX_UDP_TUNNELING) && ++ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) ++ *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + } + + static inline void +@@ -1220,11 +1229,11 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + ctx_txd->type_cmd_tso_mss = + rte_cpu_to_le_64(cd_type_cmd_tso_mss); + +- PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n" +- "tunneling_params: %#x;\n" +- "l2tag2: %#hx;\n" +- "rsvd: %#hx;\n" +- "type_cmd_tso_mss: %#"PRIx64";\n", ++ PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]: " ++ "tunneling_params: %#x; " ++ "l2tag2: %#hx; " ++ "rsvd: %#hx; " ++ "type_cmd_tso_mss: %#"PRIx64";", + tx_pkt, tx_id, + ctx_txd->tunneling_params, + ctx_txd->l2tag2, +@@ -1267,12 +1276,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + } +- PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n" +- "buf_dma_addr: %#"PRIx64";\n" +- "td_cmd: %#x;\n" +- "td_offset: %#x;\n" +- "td_len: %u;\n" +- "td_tag: %#x;\n", ++ PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]: " ++ "buf_dma_addr: %#"PRIx64"; " ++ "td_cmd: %#x; " ++ "td_offset: %#x; " ++ "td_len: %u; " ++ "td_tag: %#x;", + tx_pkt, tx_id, buf_dma_addr, + td_cmd, td_offset, slen, td_tag); + +@@ -3458,7 +3467,7 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) + txq->queue_id); + else + PMD_INIT_LOG(DEBUG, +- "Neither simple nor vector Tx enabled on Tx queue %u\n", ++ "Neither simple nor vector Tx enabled on Tx queue %u", + txq->queue_id); + } + +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +index f468c1fd90..19cf0ac718 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx2.c +@@ -276,46 +276,30 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- do { +- const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); +- +- raw_desc6_7 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); +- raw_desc4_5 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); +- raw_desc2_3 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); +- raw_desc0_1 = _mm256_inserti128_si256( +- _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); +- } while (0); ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); ++ ++ const __m256i raw_desc6_7 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = _mm256_inserti128_si256( ++ _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; +diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c +index f3050cd06c..62fce19dc4 100644 +--- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c +@@ -799,6 +799,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { ++#ifdef RTE_ARCH_64 + const __m512i a = _mm512_load_si512(&txep[copied]); + const __m512i b = _mm512_load_si512(&txep[copied + 8]); + const __m512i c = _mm512_load_si512(&txep[copied + 16]); +@@ -808,6 +809,12 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); ++#else ++ const __m512i a = _mm512_load_si512(&txep[copied]); ++ const __m512i b = _mm512_load_si512(&txep[copied + 16]); ++ _mm512_storeu_si512(&cache_objs[copied], a); ++ _mm512_storeu_si512(&cache_objs[copied + 16], b); ++#endif + copied += 32; + } + cache->len += n; +diff --git a/dpdk/drivers/net/iavf/iavf.h b/dpdk/drivers/net/iavf/iavf.h +index 10868f2c30..d273d884f5 100644 +--- a/dpdk/drivers/net/iavf/iavf.h ++++ b/dpdk/drivers/net/iavf/iavf.h +@@ -18,7 +18,8 @@ + + #define IAVF_AQ_LEN 32 + #define IAVF_AQ_BUF_SZ 4096 +-#define IAVF_RESET_WAIT_CNT 500 ++#define IAVF_RESET_WAIT_CNT 2000 ++#define IAVF_RESET_DETECTED_CNT 500 + #define IAVF_BUF_SIZE_MIN 1024 + #define IAVF_FRAME_SIZE_MAX 9728 + #define IAVF_QUEUE_BASE_ADDR_UNIT 128 +@@ -511,5 +512,6 @@ int iavf_flow_sub_check(struct iavf_adapter *adapter, + struct iavf_fsub_conf *filter); + void iavf_dev_watchdog_enable(struct iavf_adapter *adapter); + void iavf_dev_watchdog_disable(struct iavf_adapter *adapter); +-int iavf_handle_hw_reset(struct rte_eth_dev *dev); ++void iavf_handle_hw_reset(struct rte_eth_dev *dev); ++void iavf_set_no_poll(struct iavf_adapter *adapter, bool link_change); + #endif /* _IAVF_ETHDEV_H_ */ +diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c +index d1edb0dd5c..9f3658c48b 100644 +--- a/dpdk/drivers/net/iavf/iavf_ethdev.c ++++ b/dpdk/drivers/net/iavf/iavf_ethdev.c +@@ -296,6 +296,7 @@ iavf_dev_watchdog(void *cb_arg) + PMD_DRV_LOG(INFO, "VF \"%s\" reset has completed", + adapter->vf.eth_dev->data->name); + adapter->vf.vf_reset = false; ++ iavf_set_no_poll(adapter, false); + } + /* If not in reset then poll vfr_inprogress register for VFLR event */ + } else { +@@ -308,6 +309,7 @@ iavf_dev_watchdog(void *cb_arg) + + /* enter reset state with VFLR event */ + adapter->vf.vf_reset = true; ++ iavf_set_no_poll(adapter, false); + adapter->vf.link_up = false; + + iavf_dev_event_post(adapter->vf.eth_dev, RTE_ETH_EVENT_INTR_RESET, +@@ -628,7 +630,8 @@ iavf_dev_init_vlan(struct rte_eth_dev *dev) + RTE_ETH_VLAN_FILTER_MASK | + RTE_ETH_VLAN_EXTEND_MASK); + if (err) { +- PMD_DRV_LOG(ERR, "Failed to update vlan offload"); ++ PMD_DRV_LOG(INFO, ++ "VLAN offloading is not supported, or offloading was refused by the PF"); + return err; + } + +@@ -704,9 +707,7 @@ iavf_dev_configure(struct rte_eth_dev *dev) + vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; + } + +- ret = iavf_dev_init_vlan(dev); +- if (ret) +- PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret); ++ iavf_dev_init_vlan(dev); + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (iavf_init_rss(ad) != 0) { +@@ -1032,7 +1033,7 @@ iavf_dev_start(struct rte_eth_dev *dev) + if (iavf_configure_queues(adapter, + IAVF_CFG_Q_NUM_PER_BUF, index) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); +- goto err_queue; ++ goto error; + } + num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF; + index += IAVF_CFG_Q_NUM_PER_BUF; +@@ -1040,12 +1041,12 @@ iavf_dev_start(struct rte_eth_dev *dev) + + if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); +- goto err_queue; ++ goto error; + } + + if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) { + PMD_DRV_LOG(ERR, "configure irq failed"); +- goto err_queue; ++ goto error; + } + /* re-enable intr again, because efd assign may change */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { +@@ -1065,14 +1066,12 @@ iavf_dev_start(struct rte_eth_dev *dev) + + if (iavf_start_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "enable queues failed"); +- goto err_mac; ++ goto error; + } + + return 0; + +-err_mac: +- iavf_add_del_all_mac_addr(adapter, false); +-err_queue: ++error: + return -1; + } + +@@ -1086,9 +1085,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) + + PMD_INIT_FUNC_TRACE(); + +- if (vf->vf_reset) +- return 0; +- + if (adapter->closed) + return -1; + +@@ -1104,16 +1100,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) + /* Rx interrupt vector mapping free */ + rte_intr_vec_list_free(intr_handle); + +- /* adminq will be disabled when vf is resetting. */ +- if (!vf->in_reset_recovery) { +- /* remove all mac addrs */ +- iavf_add_del_all_mac_addr(adapter, false); +- +- /* remove all multicast addresses */ +- iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, +- false); +- } +- + iavf_stop_queues(dev); + + adapter->stopped = 1; +@@ -1165,7 +1151,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_TSO | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | +@@ -1174,6 +1159,10 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; + ++ /* X710 does not support outer udp checksum */ ++ if (adapter->hw.mac.type != IAVF_MAC_XL710) ++ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC) + dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; + +@@ -2300,7 +2289,7 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev) + + kvlist = rte_kvargs_parse(devargs->args, iavf_valid_args); + if (!kvlist) { +- PMD_INIT_LOG(ERR, "invalid kvargs key\n"); ++ PMD_INIT_LOG(ERR, "invalid kvargs key"); + return -EINVAL; + } + +@@ -2335,7 +2324,7 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev) + if (ad->devargs.quanta_size != 0 && + (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 || + ad->devargs.quanta_size & 0x40)) { +- PMD_INIT_LOG(ERR, "invalid quanta size\n"); ++ PMD_INIT_LOG(ERR, "invalid quanta size"); + ret = -EINVAL; + goto bail; + } +@@ -2874,6 +2863,7 @@ iavf_dev_close(struct rte_eth_dev *dev) + if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) + iavf_config_promisc(adapter, false, false); + ++ iavf_vf_reset(hw); + iavf_shutdown_adminq(hw); + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* disable uio intr before callback unregister */ +@@ -2916,8 +2906,10 @@ iavf_dev_close(struct rte_eth_dev *dev) + * effect. + */ + out: +- if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) ++ if (vf->vf_reset && !rte_pci_set_bus_master(pci_dev, true)) { + vf->vf_reset = false; ++ iavf_set_no_poll(adapter, false); ++ } + + /* disable watchdog */ + iavf_dev_watchdog_disable(adapter); +@@ -2948,9 +2940,9 @@ static int + iavf_dev_reset(struct rte_eth_dev *dev) + { + int ret; ++ struct iavf_adapter *adapter = ++ IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); +- struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); +- + /* + * Check whether the VF reset has been done and inform application, + * to avoid calling the virtual channel command, which may cause +@@ -2958,12 +2950,12 @@ iavf_dev_reset(struct rte_eth_dev *dev) + */ + ret = iavf_check_vf_reset_done(hw); + if (ret) { +- PMD_DRV_LOG(ERR, "Wait too long for reset done!\n"); ++ PMD_DRV_LOG(ERR, "Wait too long for reset done!"); + return ret; + } +- vf->vf_reset = false; ++ iavf_set_no_poll(adapter, false); + +- PMD_DRV_LOG(DEBUG, "Start dev_reset ...\n"); ++ PMD_DRV_LOG(DEBUG, "Start dev_reset ..."); + ret = iavf_dev_uninit(dev); + if (ret) + return ret; +@@ -2971,16 +2963,49 @@ iavf_dev_reset(struct rte_eth_dev *dev) + return iavf_dev_init(dev); + } + ++static inline bool ++iavf_is_reset(struct iavf_hw *hw) ++{ ++ return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) & ++ IAVF_VF_ARQLEN1_ARQENABLE_MASK); ++} ++ ++static bool ++iavf_is_reset_detected(struct iavf_adapter *adapter) ++{ ++ struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); ++ int i; ++ ++ /* poll until we see the reset actually happen */ ++ for (i = 0; i < IAVF_RESET_DETECTED_CNT; i++) { ++ if (iavf_is_reset(hw)) ++ return true; ++ rte_delay_ms(20); ++ } ++ ++ return false; ++} ++ + /* + * Handle hardware reset + */ +-int ++void + iavf_handle_hw_reset(struct rte_eth_dev *dev) + { + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); ++ struct iavf_adapter *adapter = dev->data->dev_private; + int ret; + ++ if (!dev->data->dev_started) ++ return; ++ ++ if (!iavf_is_reset_detected(adapter)) { ++ PMD_DRV_LOG(DEBUG, "reset not start"); ++ return; ++ } ++ + vf->in_reset_recovery = true; ++ iavf_set_no_poll(adapter, false); + + ret = iavf_dev_reset(dev); + if (ret) +@@ -2997,15 +3022,26 @@ iavf_handle_hw_reset(struct rte_eth_dev *dev) + ret = iavf_dev_start(dev); + if (ret) + goto error; +- dev->data->dev_started = 1; + +- vf->in_reset_recovery = false; +- return 0; ++ dev->data->dev_started = 1; ++ goto exit; + + error: +- PMD_DRV_LOG(DEBUG, "RESET recover with error code=%d\n", ret); ++ PMD_DRV_LOG(DEBUG, "RESET recover with error code=%dn", ret); ++exit: + vf->in_reset_recovery = false; +- return ret; ++ iavf_set_no_poll(adapter, false); ++ ++ return; ++} ++ ++void ++iavf_set_no_poll(struct iavf_adapter *adapter, bool link_change) ++{ ++ struct iavf_info *vf = &adapter->vf; ++ ++ adapter->no_poll = (link_change & !vf->link_up) || ++ vf->vf_reset || vf->in_reset_recovery; + } + + static int +diff --git a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c +index 07a69db540..d6c0180ffd 100644 +--- a/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c ++++ b/dpdk/drivers/net/iavf/iavf_ipsec_crypto.c +@@ -1518,8 +1518,11 @@ iavf_security_ctx_create(struct iavf_adapter *adapter) + if (adapter->security_ctx == NULL) { + adapter->security_ctx = rte_malloc("iavf_security_ctx", + sizeof(struct iavf_security_ctx), 0); +- if (adapter->security_ctx == NULL) ++ if (adapter->security_ctx == NULL) { ++ rte_free(adapter->vf.eth_dev->security_ctx); ++ adapter->vf.eth_dev->security_ctx = NULL; + return -ENOMEM; ++ } + } + + return 0; +diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.c b/dpdk/drivers/net/iavf/iavf_rxtx.c +index f19aa14646..5fbc581b95 100644 +--- a/dpdk/drivers/net/iavf/iavf_rxtx.c ++++ b/dpdk/drivers/net/iavf/iavf_rxtx.c +@@ -3027,7 +3027,7 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m) + up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET; + + if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) { +- PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n", ++ PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u", + up, txq->queue_id); + return -1; + } else { +@@ -3668,7 +3668,11 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + return i; + } + +- if (m->pkt_len < IAVF_TX_MIN_PKT_LEN) { ++ /* valid packets are greater than min size, and single-buffer pkts ++ * must have data_len == pkt_len ++ */ ++ if (m->pkt_len < IAVF_TX_MIN_PKT_LEN || ++ (m->nb_segs == 1 && m->data_len != m->pkt_len)) { + rte_errno = EINVAL; + return i; + } +diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c +index 510b4d8f1c..49d41af953 100644 +--- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx2.c +@@ -193,62 +193,30 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- { +- const __m128i raw_desc7 = +- _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = +- _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = +- _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = +- _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = +- _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = +- _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = +- _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = +- _mm_load_si128((void *)(rxdp + 0)); +- +- raw_desc6_7 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc6), +- raw_desc7, 1); +- raw_desc4_5 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc4), +- raw_desc5, 1); +- raw_desc2_3 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc2), +- raw_desc3, 1); +- raw_desc0_1 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc0), +- raw_desc1, 1); +- } ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); ++ ++ const __m256i raw_desc6_7 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; +diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c +index 7a7df6d258..0e94eada4a 100644 +--- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c +@@ -1892,6 +1892,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq) + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { ++#ifdef RTE_ARCH_64 + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); +@@ -1901,6 +1902,12 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq) + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); ++#else ++ const __m512i a = _mm512_loadu_si512(&txep[copied]); ++ const __m512i b = _mm512_loadu_si512(&txep[copied + 16]); ++ _mm512_storeu_si512(&cache_objs[copied], a); ++ _mm512_storeu_si512(&cache_objs[copied + 16], b); ++#endif + copied += 32; + } + cache->len += n; +diff --git a/dpdk/drivers/net/iavf/iavf_vchnl.c b/dpdk/drivers/net/iavf/iavf_vchnl.c +index 0a3e1d082c..71be87845a 100644 +--- a/dpdk/drivers/net/iavf/iavf_vchnl.c ++++ b/dpdk/drivers/net/iavf/iavf_vchnl.c +@@ -255,8 +255,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, + case VIRTCHNL_EVENT_LINK_CHANGE: + vf->link_up = + vpe->event_data.link_event.link_status; +- if (vf->vf_res->vf_cap_flags & +- VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { ++ if (vf->vf_res != NULL && ++ vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + vf->link_speed = + vpe->event_data.link_event_adv.link_speed; + } else { +@@ -273,20 +273,18 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, + iavf_dev_watchdog_enable(adapter); + } + if (adapter->devargs.no_poll_on_link_down) { +- if (vf->link_up && adapter->no_poll) { +- adapter->no_poll = false; +- PMD_DRV_LOG(DEBUG, "VF no poll turned off"); +- } +- if (!vf->link_up) { +- adapter->no_poll = true; ++ iavf_set_no_poll(adapter, true); ++ if (adapter->no_poll) + PMD_DRV_LOG(DEBUG, "VF no poll turned on"); +- } ++ else ++ PMD_DRV_LOG(DEBUG, "VF no poll turned off"); + } + PMD_DRV_LOG(INFO, "Link status update:%s", + vf->link_up ? "up" : "down"); + break; + case VIRTCHNL_EVENT_RESET_IMPENDING: + vf->vf_reset = true; ++ iavf_set_no_poll(adapter, false); + PMD_DRV_LOG(INFO, "VF is resetting"); + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: +@@ -462,6 +460,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, + vf->link_up = false; + if (!vf->vf_reset) { + vf->vf_reset = true; ++ iavf_set_no_poll(adapter, false); + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_RESET, + NULL, 0); + } +@@ -485,14 +484,11 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, + iavf_dev_watchdog_enable(adapter); + } + if (adapter->devargs.no_poll_on_link_down) { +- if (vf->link_up && adapter->no_poll) { +- adapter->no_poll = false; +- PMD_DRV_LOG(DEBUG, "VF no poll turned off"); +- } +- if (!vf->link_up) { +- adapter->no_poll = true; ++ iavf_set_no_poll(adapter, true); ++ if (adapter->no_poll) + PMD_DRV_LOG(DEBUG, "VF no poll turned on"); +- } ++ else ++ PMD_DRV_LOG(DEBUG, "VF no poll turned off"); + } + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0); + break; +@@ -714,6 +710,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | + VIRTCHNL_VF_OFFLOAD_FSUB_PF | + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | ++ VIRTCHNL_VF_OFFLOAD_USO | + VIRTCHNL_VF_OFFLOAD_CRC | + VIRTCHNL_VF_OFFLOAD_VLAN_V2 | + VIRTCHNL_VF_LARGE_NUM_QPAIRS | +diff --git a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +index 844e90bbce..56ba2041f2 100644 +--- a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h ++++ b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +@@ -1621,7 +1621,7 @@ struct ice_aqc_get_link_status_data { + #define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2 + #define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3 + __le16 link_speed; +-#define ICE_AQ_LINK_SPEED_M 0x7FF ++#define ICE_AQ_LINK_SPEED_M 0xFFF + #define ICE_AQ_LINK_SPEED_10MB BIT(0) + #define ICE_AQ_LINK_SPEED_100MB BIT(1) + #define ICE_AQ_LINK_SPEED_1000MB BIT(2) +@@ -1728,8 +1728,8 @@ struct ice_aqc_link_topo_addr { + #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) + /* Used to decode the handle field */ + #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) +-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) +-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 ++#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM 0 ++#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ BIT(9) + #define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 + /* In case of a Mezzanine type */ + #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ +diff --git a/dpdk/drivers/net/ice/base/ice_bitops.h b/dpdk/drivers/net/ice/base/ice_bitops.h +index 3b71c1b7f5..5c17bcb674 100644 +--- a/dpdk/drivers/net/ice/base/ice_bitops.h ++++ b/dpdk/drivers/net/ice/base/ice_bitops.h +@@ -418,10 +418,10 @@ ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits) + * Note that this function assumes it is operating on a bitmap declared using + * ice_declare_bitmap. + */ +-static inline int ++static inline u16 + ice_bitmap_hweight(ice_bitmap_t *bm, u16 size) + { +- int count = 0; ++ u16 count = 0; + u16 bit = 0; + + while (size > (bit = ice_find_next_bit(bm, size, bit))) { +diff --git a/dpdk/drivers/net/ice/base/ice_common.c b/dpdk/drivers/net/ice/base/ice_common.c +index 8867279c28..7a50a0f9f0 100644 +--- a/dpdk/drivers/net/ice/base/ice_common.c ++++ b/dpdk/drivers/net/ice/base/ice_common.c +@@ -3890,8 +3890,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + break; + case ICE_FEC_DIS_AUTO: + /* Set No FEC and auto FEC */ +- if (!ice_fw_supports_fec_dis_auto(hw)) +- return ICE_ERR_NOT_SUPPORTED; ++ if (!ice_fw_supports_fec_dis_auto(hw)) { ++ status = ICE_ERR_NOT_SUPPORTED; ++ goto out; ++ } + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS; + /* fall-through */ + case ICE_FEC_AUTO: +@@ -4904,7 +4906,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) + + ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); + +- dest_byte &= ~(mask); ++ dest_byte &= mask; + + dest_byte >>= shift_width; + +@@ -4944,7 +4946,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_word &= ~(CPU_TO_LE16(mask)); ++ src_word &= CPU_TO_LE16(mask); + + /* get the data back into host order before shifting */ + dest_word = LE16_TO_CPU(src_word); +@@ -4995,7 +4997,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_dword &= ~(CPU_TO_LE32(mask)); ++ src_dword &= CPU_TO_LE32(mask); + + /* get the data back into host order before shifting */ + dest_dword = LE32_TO_CPU(src_dword); +@@ -5046,7 +5048,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) + /* the data in the memory is stored as little endian so mask it + * correctly + */ +- src_qword &= ~(CPU_TO_LE64(mask)); ++ src_qword &= CPU_TO_LE64(mask); + + /* get the data back into host order before shifting */ + dest_qword = LE64_TO_CPU(src_qword); +diff --git a/dpdk/drivers/net/ice/base/ice_controlq.c b/dpdk/drivers/net/ice/base/ice_controlq.c +index c34407b48c..4896fd2731 100644 +--- a/dpdk/drivers/net/ice/base/ice_controlq.c ++++ b/dpdk/drivers/net/ice/base/ice_controlq.c +@@ -846,12 +846,23 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) + u16 ntc = sq->next_to_clean; + struct ice_sq_cd *details; + struct ice_aq_desc *desc; ++ u32 head; + + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); + +- while (rd32(hw, cq->sq.head) != ntc) { +- ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); ++ head = rd32(hw, sq->head); ++ if (head >= sq->count) { ++ ice_debug(hw, ICE_DBG_AQ_MSG, ++ "Read head value (%d) exceeds allowed range.\n", ++ head); ++ return 0; ++ } ++ ++ while (head != ntc) { ++ ice_debug(hw, ICE_DBG_AQ_MSG, ++ "ntc %d head %d.\n", ++ ntc, head); + ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); + ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); + ntc++; +@@ -859,6 +870,14 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) + ntc = 0; + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); ++ ++ head = rd32(hw, sq->head); ++ if (head >= sq->count) { ++ ice_debug(hw, ICE_DBG_AQ_MSG, ++ "Read head value (%d) exceeds allowed range.\n", ++ head); ++ return 0; ++ } + } + + sq->next_to_clean = ntc; +diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c +index f9266447d9..a0e4f5fa27 100644 +--- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c ++++ b/dpdk/drivers/net/ice/base/ice_flex_pipe.c +@@ -1534,16 +1534,14 @@ ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, + switch (blk) { + case ICE_BLK_RSS: + offset = GLQF_HMASK(mask_idx); +- val = (idx << GLQF_HMASK_MSK_INDEX_S) & +- GLQF_HMASK_MSK_INDEX_M; +- val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; ++ val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; ++ val |= ((u32)mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; + break; + case ICE_BLK_FD: + offset = GLQF_FDMASK(mask_idx); + val = (idx << GLQF_FDMASK_MSK_INDEX_S) & + GLQF_FDMASK_MSK_INDEX_M; +- val |= (mask << GLQF_FDMASK_MASK_S) & +- GLQF_FDMASK_MASK_M; ++ val |= ((u32)mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; + break; + default: + ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n", +diff --git a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h +index d816df0ff6..39673e36f7 100644 +--- a/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h ++++ b/dpdk/drivers/net/ice/base/ice_lan_tx_rx.h +@@ -1074,10 +1074,9 @@ struct ice_tx_ctx_desc { + __le64 qw1; + }; + +-#define ICE_TX_GSC_DESC_START 0 /* 7 BITS */ +-#define ICE_TX_GSC_DESC_OFFSET 7 /* 4 BITS */ +-#define ICE_TX_GSC_DESC_TYPE 11 /* 2 BITS */ +-#define ICE_TX_GSC_DESC_ENA 13 /* 1 BIT */ ++#define ICE_TX_GCS_DESC_START 0 /* 8 BITS */ ++#define ICE_TX_GCS_DESC_OFFSET 8 /* 4 BITS */ ++#define ICE_TX_GCS_DESC_TYPE 12 /* 3 BITS */ + + #define ICE_TXD_CTX_QW1_DTYPE_S 0 + #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) +diff --git a/dpdk/drivers/net/ice/base/ice_nvm.c b/dpdk/drivers/net/ice/base/ice_nvm.c +index e46aded12a..98c4c943ca 100644 +--- a/dpdk/drivers/net/ice/base/ice_nvm.c ++++ b/dpdk/drivers/net/ice/base/ice_nvm.c +@@ -72,6 +72,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + enum ice_status status; + u32 inlen = *length; + u32 bytes_read = 0; ++ int retry_cnt = 0; + bool last_cmd; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); +@@ -106,11 +107,24 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, + offset, (u16)read_size, + data + bytes_read, last_cmd, + read_shadow_ram, NULL); +- if (status) +- break; +- +- bytes_read += read_size; +- offset += read_size; ++ if (status) { ++ if (hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY || ++ retry_cnt > ICE_SQ_SEND_MAX_EXECUTE) ++ break; ++ ice_debug(hw, ICE_DBG_NVM, ++ "NVM read EBUSY error, retry %d\n", ++ retry_cnt + 1); ++ ice_release_nvm(hw); ++ msleep(ICE_SQ_SEND_DELAY_TIME_MS); ++ status = ice_acquire_nvm(hw, ICE_RES_READ); ++ if (status) ++ break; ++ retry_cnt++; ++ } else { ++ bytes_read += read_size; ++ offset += read_size; ++ retry_cnt = 0; ++ } + } while (!last_cmd); + + *length = bytes_read; +@@ -457,6 +471,8 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) + return status; + } + ++#define check_add_overflow __builtin_add_overflow ++ + /** + * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA + * @hw: pointer to hardware structure +@@ -473,8 +489,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type) + { + enum ice_status status; +- u16 pfa_len, pfa_ptr; +- u16 next_tlv; ++ u16 pfa_len, pfa_ptr, next_tlv, max_tlv; + + status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); + if (status != ICE_SUCCESS) { +@@ -486,38 +501,54 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); + return status; + } +- /* Starting with first TLV after PFA length, iterate through the list ++ ++ if (check_add_overflow(pfa_ptr, (u16)(pfa_len - 1), &max_tlv)) { ++ ice_debug(hw, ICE_DBG_INIT, "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n", ++ pfa_ptr, pfa_len); ++ return ICE_ERR_INVAL_SIZE; ++ } ++ ++ /* The Preserved Fields Area contains a sequence of TLVs which define ++ * its contents. The PFA length includes all of the TLVs, plus its ++ * initial length word itself, *and* one final word at the end of all ++ * of the TLVs. ++ * ++ * Starting with first TLV after PFA length, iterate through the list + * of TLVs to find the requested one. + */ + next_tlv = pfa_ptr + 1; +- while (next_tlv < pfa_ptr + pfa_len) { ++ while (next_tlv < max_tlv) { + u16 tlv_sub_module_type; + u16 tlv_len; + + /* Read TLV type */ +- status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); +- if (status != ICE_SUCCESS) { ++ status = ice_read_sr_word(hw, (u16)next_tlv, ++ &tlv_sub_module_type); ++ if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); + break; + } + /* Read TLV length */ +- status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); ++ status = ice_read_sr_word(hw, (u16)(next_tlv + 1), &tlv_len); + if (status != ICE_SUCCESS) { + ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); + break; + } + if (tlv_sub_module_type == module_type) { + if (tlv_len) { +- *module_tlv = next_tlv; ++ *module_tlv = (u16)next_tlv; + *module_tlv_len = tlv_len; + return ICE_SUCCESS; + } + return ICE_ERR_INVAL_SIZE; + } +- /* Check next TLV, i.e. current TLV pointer + length + 2 words +- * (for current TLV's type and length) +- */ +- next_tlv = next_tlv + tlv_len + 2; ++ ++ if (check_add_overflow(next_tlv, (u16)2, &next_tlv) || ++ check_add_overflow(next_tlv, tlv_len, &next_tlv)) { ++ ice_debug(hw, ICE_DBG_INIT, "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n", ++ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len); ++ return ICE_ERR_INVAL_SIZE; ++ } + } + /* Module does not exist */ + return ICE_ERR_DOES_NOT_EXIST; +@@ -749,7 +780,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, + orom_data, hw->flash.banks.orom_size); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n"); +- return status; ++ goto exit_error;; + } + + /* Scan the memory buffer to locate the CIVD data section */ +@@ -773,7 +804,8 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, + if (sum) { + ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", + sum); +- goto err_invalid_checksum; ++ status = ICE_ERR_NVM; ++ goto exit_error; + } + + *civd = *tmp; +@@ -781,11 +813,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, + return ICE_SUCCESS; + } + ++ status = ICE_ERR_NVM; + ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n"); + +-err_invalid_checksum: ++exit_error: + ice_free(hw, orom_data); +- return ICE_ERR_NVM; ++ return status; + } + + /** +diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.c b/dpdk/drivers/net/ice/base/ice_ptp_hw.c +index 548ef5e820..c507f211df 100644 +--- a/dpdk/drivers/net/ice/base/ice_ptp_hw.c ++++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.c +@@ -2817,8 +2817,8 @@ ice_ptp_one_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd, + val &= ~TS_CMD_MASK; + val |= cmd_val; + +- status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TMR_CMD, val, +- lock_sbq); ++ status = ice_write_phy_reg_e822_lp(hw, port, P_REG_TX_TMR_CMD, ++ val | TS_CMD_RX_TYPE, lock_sbq); + if (status) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, status %d\n", + status); +diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.h b/dpdk/drivers/net/ice/base/ice_ptp_hw.h +index 3667c9882d..f53b9e3ecc 100644 +--- a/dpdk/drivers/net/ice/base/ice_ptp_hw.h ++++ b/dpdk/drivers/net/ice/base/ice_ptp_hw.h +@@ -295,6 +295,8 @@ enum ice_status ice_ptp_init_phy_cfg(struct ice_hw *hw); + #define TS_CMD_MASK_E810 0xFF + #define TS_CMD_MASK 0xF + #define SYNC_EXEC_CMD 0x3 ++#define TS_CMD_RX_TYPE_S 0x4 ++#define TS_CMD_RX_TYPE MAKEMASK(0x18, TS_CMD_RX_TYPE_S) + + /* Macros to derive port low and high addresses on both quads */ + #define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF) +diff --git a/dpdk/drivers/net/ice/base/ice_sched.c b/dpdk/drivers/net/ice/base/ice_sched.c +index a4d31647fe..21cfe53a6d 100644 +--- a/dpdk/drivers/net/ice/base/ice_sched.c ++++ b/dpdk/drivers/net/ice/base/ice_sched.c +@@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, + if (!root) + return ICE_ERR_NO_MEMORY; + +- /* coverity[suspicious_sizeof] */ + root->children = (struct ice_sched_node **) +- ice_calloc(hw, hw->max_children[0], sizeof(*root)); ++ ice_calloc(hw, hw->max_children[0], sizeof(*root->children)); + if (!root->children) { + ice_free(hw, root); + return ICE_ERR_NO_MEMORY; +@@ -186,9 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, + if (!node) + return ICE_ERR_NO_MEMORY; + if (hw->max_children[layer]) { +- /* coverity[suspicious_sizeof] */ + node->children = (struct ice_sched_node **) +- ice_calloc(hw, hw->max_children[layer], sizeof(*node)); ++ ice_calloc(hw, hw->max_children[layer], ++ sizeof(*node->children)); + if (!node->children) { + ice_free(hw, node); + return ICE_ERR_NO_MEMORY; +@@ -1069,11 +1068,11 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, + u32 *first_teid_ptr = first_node_teid; + u16 new_num_nodes = num_nodes; + enum ice_status status = ICE_SUCCESS; ++ u32 temp; + + *num_nodes_added = 0; + while (*num_nodes_added < num_nodes) { + u16 max_child_nodes, num_added = 0; +- u32 temp; + + status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, + layer, new_num_nodes, +diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c +index f7fcc3a8d4..7b103e5e34 100644 +--- a/dpdk/drivers/net/ice/base/ice_switch.c ++++ b/dpdk/drivers/net/ice/base/ice_switch.c +@@ -4603,7 +4603,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, + u16 vsi_handle_arr[2]; + + /* A rule already exists with the new VSI being added */ +- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) ++ if (cur_fltr->vsi_handle == new_fltr->vsi_handle) + return ICE_ERR_ALREADY_EXISTS; + + vsi_handle_arr[0] = cur_fltr->vsi_handle; +@@ -4651,7 +4651,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, + + /* A rule already exists with the new VSI being added */ + if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) +- return ICE_SUCCESS; ++ return ICE_ERR_ALREADY_EXISTS; + + /* Update the previously created VSI list set with + * the new VSI ID passed in +@@ -7390,7 +7390,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const ice_bitmap_t *profiles, + ice_xor_bitmap(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS); + + /* return number of free indexes */ +- return (u16)ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS); ++ return ice_bitmap_hweight(free_idx, ICE_MAX_FV_WORDS); + } + + static void ice_set_recipe_index(unsigned long idx, u8 *bitmap) +@@ -8101,6 +8101,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + enum ice_status status = ICE_SUCCESS; + struct ice_sw_recipe *rm; + u8 i; ++ u16 cnt; + + if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt) + return ICE_ERR_PARAM; +@@ -10022,8 +10023,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi, + if (!itr->vsi_list_info || + !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle)) + continue; +- /* Clearing it so that the logic can add it back */ +- ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); + f_entry.fltr_info.vsi_handle = vsi_handle; + f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; + /* update the src in case it is VSI num */ +diff --git a/dpdk/drivers/net/ice/ice_dcf_ethdev.c b/dpdk/drivers/net/ice/ice_dcf_ethdev.c +index 5d845bba31..a025b0ea7f 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_ethdev.c ++++ b/dpdk/drivers/net/ice/ice_dcf_ethdev.c +@@ -1646,7 +1646,7 @@ ice_dcf_init_repr_info(struct ice_dcf_adapter *dcf_adapter) + dcf_adapter->real_hw.num_vfs, + sizeof(dcf_adapter->repr_infos[0]), 0); + if (!dcf_adapter->repr_infos) { +- PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors\n"); ++ PMD_DRV_LOG(ERR, "Failed to alloc memory for VF representors"); + return -ENOMEM; + } + +@@ -2087,7 +2087,7 @@ eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv, + } + + if (dcf_adapter->real_hw.vf_vsi_map[vf_id] == dcf_vsi_id) { +- PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.\n", vf_id); ++ PMD_DRV_LOG(ERR, "VF ID %u is DCF's ID.", vf_id); + ret = -EINVAL; + break; + } +diff --git a/dpdk/drivers/net/ice/ice_dcf_vf_representor.c b/dpdk/drivers/net/ice/ice_dcf_vf_representor.c +index af281f069a..564ff02fd8 100644 +--- a/dpdk/drivers/net/ice/ice_dcf_vf_representor.c ++++ b/dpdk/drivers/net/ice/ice_dcf_vf_representor.c +@@ -133,7 +133,7 @@ ice_dcf_vf_repr_hw(struct ice_dcf_vf_repr *repr) + struct ice_dcf_adapter *dcf_adapter; + + if (!repr->dcf_valid) { +- PMD_DRV_LOG(ERR, "DCF for VF representor has been released\n"); ++ PMD_DRV_LOG(ERR, "DCF for VF representor has been released"); + return NULL; + } + +@@ -272,7 +272,7 @@ ice_dcf_vf_repr_vlan_offload_set(struct rte_eth_dev *dev, int mask) + + if (enable && repr->outer_vlan_info.port_vlan_ena) { + PMD_DRV_LOG(ERR, +- "Disable the port VLAN firstly\n"); ++ "Disable the port VLAN firstly"); + return -EINVAL; + } + +@@ -318,7 +318,7 @@ ice_dcf_vf_repr_vlan_pvid_set(struct rte_eth_dev *dev, + + if (repr->outer_vlan_info.stripping_ena) { + PMD_DRV_LOG(ERR, +- "Disable the VLAN stripping firstly\n"); ++ "Disable the VLAN stripping firstly"); + return -EINVAL; + } + +@@ -367,7 +367,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev, + + if (vlan_type != RTE_ETH_VLAN_TYPE_OUTER) { + PMD_DRV_LOG(ERR, +- "Can accelerate only outer VLAN in QinQ\n"); ++ "Can accelerate only outer VLAN in QinQ"); + return -EINVAL; + } + +@@ -375,7 +375,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev, + tpid != RTE_ETHER_TYPE_VLAN && + tpid != RTE_ETHER_TYPE_QINQ1) { + PMD_DRV_LOG(ERR, +- "Invalid TPID: 0x%04x\n", tpid); ++ "Invalid TPID: 0x%04x", tpid); + return -EINVAL; + } + +@@ -387,7 +387,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev, + true); + if (err) { + PMD_DRV_LOG(ERR, +- "Failed to reset port VLAN : %d\n", ++ "Failed to reset port VLAN : %d", + err); + return err; + } +@@ -398,7 +398,7 @@ ice_dcf_vf_repr_vlan_tpid_set(struct rte_eth_dev *dev, + RTE_ETH_VLAN_STRIP_MASK); + if (err) { + PMD_DRV_LOG(ERR, +- "Failed to reset VLAN stripping : %d\n", ++ "Failed to reset VLAN stripping : %d", + err); + return err; + } +diff --git a/dpdk/drivers/net/ice/ice_ethdev.c b/dpdk/drivers/net/ice/ice_ethdev.c +index 3ccba4db80..86f43050a5 100644 +--- a/dpdk/drivers/net/ice/ice_ethdev.c ++++ b/dpdk/drivers/net/ice/ice_ethdev.c +@@ -1804,6 +1804,7 @@ ice_pf_setup(struct ice_pf *pf) + } + + pf->main_vsi = vsi; ++ rte_spinlock_init(&pf->link_lock); + + return 0; + } +@@ -1866,7 +1867,7 @@ no_dsn: + + strncpy(pkg_file, ICE_PKG_FILE_DEFAULT, ICE_MAX_PKG_FILENAME_SIZE); + if (rte_firmware_read(pkg_file, &buf, &bufsz) < 0) { +- PMD_INIT_LOG(ERR, "failed to search file path\n"); ++ PMD_INIT_LOG(ERR, "failed to search file path"); + return -1; + } + +@@ -1875,7 +1876,7 @@ load_fw: + + err = ice_copy_and_init_pkg(hw, buf, bufsz); + if (!ice_is_init_pkg_successful(err)) { +- PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d\n", err); ++ PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d", err); + free(buf); + return -1; + } +@@ -2073,7 +2074,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) + + kvlist = rte_kvargs_parse(devargs->args, ice_valid_args); + if (kvlist == NULL) { +- PMD_INIT_LOG(ERR, "Invalid kvargs key\n"); ++ PMD_INIT_LOG(ERR, "Invalid kvargs key"); + return -EINVAL; + } + +@@ -2339,20 +2340,20 @@ ice_dev_init(struct rte_eth_dev *dev) + if (pos) { + if (rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4) < 0 || + rte_pci_read_config(pci_dev, &dsn_high, 4, pos + 8) < 0) { +- PMD_INIT_LOG(ERR, "Failed to read pci config space\n"); ++ PMD_INIT_LOG(ERR, "Failed to read pci config space"); + } else { + use_dsn = true; + dsn = (uint64_t)dsn_high << 32 | dsn_low; + } + } else { +- PMD_INIT_LOG(ERR, "Failed to read device serial number\n"); ++ PMD_INIT_LOG(ERR, "Failed to read device serial number"); + } + + ret = ice_load_pkg(pf->adapter, use_dsn, dsn); + if (ret == 0) { + ret = ice_init_hw_tbls(hw); + if (ret) { +- PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d\n", ret); ++ PMD_INIT_LOG(ERR, "ice_init_hw_tbls failed: %d", ret); + rte_free(hw->pkg_copy); + } + } +@@ -2404,14 +2405,14 @@ ice_dev_init(struct rte_eth_dev *dev) + + ret = ice_aq_stop_lldp(hw, true, false, NULL); + if (ret != ICE_SUCCESS) +- PMD_INIT_LOG(DEBUG, "lldp has already stopped\n"); ++ PMD_INIT_LOG(DEBUG, "lldp has already stopped"); + ret = ice_init_dcb(hw, true); + if (ret != ICE_SUCCESS) +- PMD_INIT_LOG(DEBUG, "Failed to init DCB\n"); ++ PMD_INIT_LOG(DEBUG, "Failed to init DCB"); + /* Forward LLDP packets to default VSI */ + ret = ice_vsi_config_sw_lldp(vsi, true); + if (ret != ICE_SUCCESS) +- PMD_INIT_LOG(DEBUG, "Failed to cfg lldp\n"); ++ PMD_INIT_LOG(DEBUG, "Failed to cfg lldp"); + /* register callback func to eal lib */ + rte_intr_callback_register(intr_handle, + ice_interrupt_handler, dev); +@@ -2438,7 +2439,7 @@ ice_dev_init(struct rte_eth_dev *dev) + if (hw->phy_cfg == ICE_PHY_E822) { + ret = ice_start_phy_timer_e822(hw, hw->pf_id, true); + if (ret) +- PMD_INIT_LOG(ERR, "Failed to start phy timer\n"); ++ PMD_INIT_LOG(ERR, "Failed to start phy timer"); + } + + if (!ad->is_safe_mode) { +@@ -2685,7 +2686,7 @@ ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) + status = ice_rem_rss_cfg(hw, vsi->idx, cfg); + if (status && status != ICE_ERR_DOES_NOT_EXIST) { + PMD_DRV_LOG(ERR, +- "ice_rem_rss_cfg failed for VSI:%d, error:%d\n", ++ "ice_rem_rss_cfg failed for VSI:%d, error:%d", + vsi->idx, status); + return -EBUSY; + } +@@ -2706,7 +2707,7 @@ ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg) + status = ice_add_rss_cfg(hw, vsi->idx, cfg); + if (status) { + PMD_DRV_LOG(ERR, +- "ice_add_rss_cfg failed for VSI:%d, error:%d\n", ++ "ice_add_rss_cfg failed for VSI:%d, error:%d", + vsi->idx, status); + return -EBUSY; + } +@@ -3101,7 +3102,7 @@ ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + + ret = ice_rem_rss_cfg(hw, vsi_id, cfg); + if (ret && ret != ICE_ERR_DOES_NOT_EXIST) +- PMD_DRV_LOG(ERR, "remove rss cfg failed\n"); ++ PMD_DRV_LOG(ERR, "remove rss cfg failed"); + + ice_rem_rss_cfg_post(pf, cfg->addl_hdrs); + +@@ -3117,15 +3118,15 @@ ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + + ret = ice_add_rss_cfg_pre(pf, cfg->addl_hdrs); + if (ret) +- PMD_DRV_LOG(ERR, "add rss cfg pre failed\n"); ++ PMD_DRV_LOG(ERR, "add rss cfg pre failed"); + + ret = ice_add_rss_cfg(hw, vsi_id, cfg); + if (ret) +- PMD_DRV_LOG(ERR, "add rss cfg failed\n"); ++ PMD_DRV_LOG(ERR, "add rss cfg failed"); + + ret = ice_add_rss_cfg_post(pf, cfg); + if (ret) +- PMD_DRV_LOG(ERR, "add rss cfg post failed\n"); ++ PMD_DRV_LOG(ERR, "add rss cfg post failed"); + + return 0; + } +@@ -3315,7 +3316,7 @@ ice_get_default_rss_key(uint8_t *rss_key, uint32_t rss_key_size) + if (rss_key_size > sizeof(default_key)) { + PMD_DRV_LOG(WARNING, + "requested size %u is larger than default %zu, " +- "only %zu bytes are gotten for key\n", ++ "only %zu bytes are gotten for key", + rss_key_size, sizeof(default_key), + sizeof(default_key)); + } +@@ -3350,12 +3351,12 @@ static int ice_init_rss(struct ice_pf *pf) + + if (nb_q == 0) { + PMD_DRV_LOG(WARNING, +- "RSS is not supported as rx queues number is zero\n"); ++ "RSS is not supported as rx queues number is zero"); + return 0; + } + + if (is_safe_mode) { +- PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode\n"); ++ PMD_DRV_LOG(WARNING, "RSS is not supported in safe mode"); + return 0; + } + +@@ -3621,17 +3622,31 @@ ice_rxq_intr_setup(struct rte_eth_dev *dev) + return 0; + } + ++static enum ice_status ++ice_get_link_info_safe(struct ice_pf *pf, bool ena_lse, ++ struct ice_link_status *link) ++{ ++ struct ice_hw *hw = ICE_PF_TO_HW(pf); ++ int ret; ++ ++ rte_spinlock_lock(&pf->link_lock); ++ ++ ret = ice_aq_get_link_info(hw->port_info, ena_lse, link, NULL); ++ ++ rte_spinlock_unlock(&pf->link_lock); ++ ++ return ret; ++} ++ + static void + ice_get_init_link_status(struct rte_eth_dev *dev) + { +- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + struct ice_link_status link_status; + int ret; + +- ret = ice_aq_get_link_info(hw->port_info, enable_lse, +- &link_status, NULL); ++ ret = ice_get_link_info_safe(pf, enable_lse, &link_status); + if (ret != ICE_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get link info"); + pf->init_link_up = false; +@@ -3735,7 +3750,10 @@ ice_dev_start(struct rte_eth_dev *dev) + ice_set_tx_function(dev); + + mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK | +- RTE_ETH_VLAN_EXTEND_MASK | RTE_ETH_QINQ_STRIP_MASK; ++ RTE_ETH_VLAN_EXTEND_MASK; ++ if (ice_is_dvm_ena(hw)) ++ mask |= RTE_ETH_QINQ_STRIP_MASK; ++ + ret = ice_vlan_offload_set(dev, mask); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); +@@ -3876,7 +3894,11 @@ ice_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | ++ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO; + dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; + } + +@@ -3996,7 +4018,7 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) + { + #define CHECK_INTERVAL 50 /* 50ms */ + #define MAX_REPEAT_TIME 40 /* 2s (40 * 50ms) in total */ +- struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); ++ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_link_status link_status; + struct rte_eth_link link, old; + int status; +@@ -4010,8 +4032,7 @@ ice_link_update(struct rte_eth_dev *dev, int wait_to_complete) + + do { + /* Get link status information from hardware */ +- status = ice_aq_get_link_info(hw->port_info, enable_lse, +- &link_status, NULL); ++ status = ice_get_link_info_safe(pf, enable_lse, &link_status); + if (status != ICE_SUCCESS) { + link.link_speed = RTE_ETH_SPEED_NUM_100M; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; +@@ -4181,7 +4202,7 @@ ice_phy_conf_link(struct ice_hw *hw, + cfg.phy_type_low = phy_type_low & phy_caps->phy_type_low; + cfg.phy_type_high = phy_type_high & phy_caps->phy_type_high; + } else { +- PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n"); ++ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!"); + cfg.phy_type_low = phy_caps->phy_type_low; + cfg.phy_type_high = phy_caps->phy_type_high; + } +@@ -4802,19 +4823,35 @@ ice_vlan_offload_set(struct rte_eth_dev *dev, int mask) + ice_vsi_config_vlan_filter(vsi, false); + } + +- if (mask & RTE_ETH_VLAN_STRIP_MASK) { +- if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) +- ice_vsi_config_vlan_stripping(vsi, true); +- else +- ice_vsi_config_vlan_stripping(vsi, false); +- } ++ struct ice_hw *hw = ICE_VSI_TO_HW(vsi); ++ if (!ice_is_dvm_ena(hw)) { ++ if (mask & RTE_ETH_VLAN_STRIP_MASK) { ++ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) ++ ice_vsi_config_vlan_stripping(vsi, true); ++ else ++ ice_vsi_config_vlan_stripping(vsi, false); ++ } + +- if (mask & RTE_ETH_QINQ_STRIP_MASK) { +- /* Enable or disable outer VLAN stripping */ +- if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) +- ice_vsi_config_outer_vlan_stripping(vsi, true); +- else +- ice_vsi_config_outer_vlan_stripping(vsi, false); ++ if (mask & RTE_ETH_QINQ_STRIP_MASK) { ++ PMD_DRV_LOG(ERR, "Single VLAN mode (SVM) does not support qinq"); ++ return -ENOTSUP; ++ } ++ } else { ++ if ((mask & RTE_ETH_VLAN_STRIP_MASK) | ++ (mask & RTE_ETH_QINQ_STRIP_MASK)) { ++ if (rxmode->offloads & (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | ++ RTE_ETH_RX_OFFLOAD_QINQ_STRIP)) ++ ice_vsi_config_outer_vlan_stripping(vsi, true); ++ else ++ ice_vsi_config_outer_vlan_stripping(vsi, false); ++ } ++ ++ if (mask & RTE_ETH_QINQ_STRIP_MASK) { ++ if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP) ++ ice_vsi_config_vlan_stripping(vsi, true); ++ else ++ ice_vsi_config_vlan_stripping(vsi, false); ++ } + } + + return 0; +@@ -5620,7 +5657,7 @@ ice_get_module_info(struct rte_eth_dev *dev, + } + break; + default: +- PMD_DRV_LOG(WARNING, "SFF Module Type not recognized.\n"); ++ PMD_DRV_LOG(WARNING, "SFF Module Type not recognized."); + return -EINVAL; + } + return 0; +@@ -5691,7 +5728,7 @@ ice_get_module_eeprom(struct rte_eth_dev *dev, + 0, NULL); + PMD_DRV_LOG(DEBUG, "SFF %02X %02X %02X %X = " + "%02X%02X%02X%02X." +- "%02X%02X%02X%02X (%X)\n", ++ "%02X%02X%02X%02X (%X)", + addr, offset, page, is_sfp, + value[0], value[1], + value[2], value[3], +diff --git a/dpdk/drivers/net/ice/ice_ethdev.h b/dpdk/drivers/net/ice/ice_ethdev.h +index abe6dcdc23..d607f028e0 100644 +--- a/dpdk/drivers/net/ice/ice_ethdev.h ++++ b/dpdk/drivers/net/ice/ice_ethdev.h +@@ -548,6 +548,10 @@ struct ice_pf { + uint64_t rss_hf; + struct ice_tm_conf tm_conf; + uint16_t outer_ethertype; ++ /* lock prevent race condition between lsc interrupt handler ++ * and link status update during dev_start. ++ */ ++ rte_spinlock_t link_lock; + }; + + #define ICE_MAX_QUEUE_NUM 2048 +diff --git a/dpdk/drivers/net/ice/ice_fdir_filter.c b/dpdk/drivers/net/ice/ice_fdir_filter.c +index 0b7920ad44..dd9130ace3 100644 +--- a/dpdk/drivers/net/ice/ice_fdir_filter.c ++++ b/dpdk/drivers/net/ice/ice_fdir_filter.c +@@ -334,7 +334,7 @@ ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id) + } + + if (!counter_free) { +- PMD_DRV_LOG(ERR, "No free counter found\n"); ++ PMD_DRV_LOG(ERR, "No free counter found"); + return NULL; + } + +diff --git a/dpdk/drivers/net/ice/ice_hash.c b/dpdk/drivers/net/ice/ice_hash.c +index f923641533..dad117679d 100644 +--- a/dpdk/drivers/net/ice/ice_hash.c ++++ b/dpdk/drivers/net/ice/ice_hash.c +@@ -650,10 +650,10 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, + uint8_t *pkt_buf, *msk_buf; + uint8_t tmp_val = 0; + uint8_t tmp_c = 0; +- int i, j; ++ int i, j, ret = 0; + + if (ad->psr == NULL) +- return -rte_errno; ++ return -ENOTSUP; + + raw_spec = item->spec; + raw_mask = item->mask; +@@ -670,8 +670,10 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, + return -ENOMEM; + + msk_buf = rte_zmalloc(NULL, pkt_len, 0); +- if (!msk_buf) ++ if (!msk_buf) { ++ rte_free(pkt_buf); + return -ENOMEM; ++ } + + /* convert string to int array */ + for (i = 0, j = 0; i < spec_len; i += 2, j++) { +@@ -708,18 +710,22 @@ ice_hash_parse_raw_pattern(struct ice_adapter *ad, + msk_buf[j] = tmp_val * 16 + tmp_c - '0'; + } + +- if (ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt)) +- return -rte_errno; ++ ret = ice_parser_run(ad->psr, pkt_buf, pkt_len, &rslt); ++ if (ret) ++ goto free_mem; + +- if (ice_parser_profile_init(&rslt, pkt_buf, msk_buf, +- pkt_len, ICE_BLK_RSS, true, &prof)) +- return -rte_errno; ++ ret = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, ++ pkt_len, ICE_BLK_RSS, true, &prof); ++ if (ret) ++ goto free_mem; + + rte_memcpy(&meta->raw.prof, &prof, sizeof(prof)); + ++free_mem: + rte_free(pkt_buf); + rte_free(msk_buf); +- return 0; ++ ++ return ret; + } + + static void +@@ -1236,13 +1242,13 @@ ice_hash_add_raw_cfg(struct ice_adapter *ad, + ice_get_hw_vsi_num(hw, vsi_handle), + id); + if (ret) { +- PMD_DRV_LOG(ERR, "remove RSS flow failed\n"); ++ PMD_DRV_LOG(ERR, "remove RSS flow failed"); + return ret; + } + + ret = ice_rem_prof(hw, ICE_BLK_RSS, id); + if (ret) { +- PMD_DRV_LOG(ERR, "remove RSS profile failed\n"); ++ PMD_DRV_LOG(ERR, "remove RSS profile failed"); + return ret; + } + } +@@ -1250,7 +1256,7 @@ ice_hash_add_raw_cfg(struct ice_adapter *ad, + /* add new profile */ + ret = ice_flow_set_hw_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS); + if (ret) { +- PMD_DRV_LOG(ERR, "HW profile add failed\n"); ++ PMD_DRV_LOG(ERR, "HW profile add failed"); + return ret; + } + +@@ -1372,7 +1378,7 @@ ice_hash_rem_raw_cfg(struct ice_adapter *ad, + return 0; + + err: +- PMD_DRV_LOG(ERR, "HW profile remove failed\n"); ++ PMD_DRV_LOG(ERR, "HW profile remove failed"); + return ret; + } + +diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c +index 73e47ae92d..644d106814 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx.c ++++ b/dpdk/drivers/net/ice/ice_rxtx.c +@@ -1123,6 +1123,10 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + tx_queue_id); + return -EINVAL; + } ++ if (txq->qtx_tail == NULL) { ++ PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id); ++ return 0; ++ } + vsi = txq->vsi; + + q_ids[0] = txq->reg_idx; +@@ -1137,6 +1141,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + } + + txq->tx_rel_mbufs(txq); ++ txq->qtx_tail = NULL; + + return 0; + } +@@ -2734,9 +2739,9 @@ ice_parse_tunneling_params(uint64_t ol_flags, + * Calculate the tunneling UDP checksum. + * Shall be set only if L4TUNT = 01b and EIPT is not zero + */ +- if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) && +- (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && +- (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) ++ if ((*cd_tunneling & ICE_TXD_CTX_QW0_EIPT_M) && ++ (*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING) && ++ (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) + *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; + } + +@@ -2822,7 +2827,7 @@ ice_xmit_cleanup(struct ice_tx_queue *txq) + if (!(txd[desc_to_clean_to].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE))) { + PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done " +- "(port=%d queue=%d) value=0x%"PRIx64"\n", ++ "(port=%d queue=%d) value=0x%"PRIx64, + desc_to_clean_to, + txq->port_id, txq->queue_id, + txd[desc_to_clean_to].cmd_type_offset_bsz); +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +index 6f6d790967..d6e88dbb29 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx2.c +@@ -254,62 +254,30 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, + _mm256_loadu_si256((void *)&sw_ring[i + 4])); + #endif + +- __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +-#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC +- /* for AVX we need alignment otherwise loads are not atomic */ +- if (avx_aligned) { +- /* load in descriptors, 2 at a time, in reverse order */ +- raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); +- } else +-#endif +- { +- const __m128i raw_desc7 = +- _mm_load_si128((void *)(rxdp + 7)); +- rte_compiler_barrier(); +- const __m128i raw_desc6 = +- _mm_load_si128((void *)(rxdp + 6)); +- rte_compiler_barrier(); +- const __m128i raw_desc5 = +- _mm_load_si128((void *)(rxdp + 5)); +- rte_compiler_barrier(); +- const __m128i raw_desc4 = +- _mm_load_si128((void *)(rxdp + 4)); +- rte_compiler_barrier(); +- const __m128i raw_desc3 = +- _mm_load_si128((void *)(rxdp + 3)); +- rte_compiler_barrier(); +- const __m128i raw_desc2 = +- _mm_load_si128((void *)(rxdp + 2)); +- rte_compiler_barrier(); +- const __m128i raw_desc1 = +- _mm_load_si128((void *)(rxdp + 1)); +- rte_compiler_barrier(); +- const __m128i raw_desc0 = +- _mm_load_si128((void *)(rxdp + 0)); +- +- raw_desc6_7 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc6), +- raw_desc7, 1); +- raw_desc4_5 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc4), +- raw_desc5, 1); +- raw_desc2_3 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc2), +- raw_desc3, 1); +- raw_desc0_1 = +- _mm256_inserti128_si256 +- (_mm256_castsi128_si256(raw_desc0), +- raw_desc1, 1); +- } ++ const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); ++ rte_compiler_barrier(); ++ const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); ++ ++ const __m256i raw_desc6_7 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); ++ const __m256i raw_desc4_5 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); ++ const __m256i raw_desc2_3 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); ++ const __m256i raw_desc0_1 = ++ _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + + if (split_packet) { + int j; +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c +index 04148e8ea2..add095ef06 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c +@@ -907,6 +907,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { ++#ifdef RTE_ARCH_64 + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); +@@ -916,6 +917,12 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); ++#else ++ const __m512i a = _mm512_loadu_si512(&txep[copied]); ++ const __m512i b = _mm512_loadu_si512(&txep[copied + 16]); ++ _mm512_storeu_si512(&cache_objs[copied], a); ++ _mm512_storeu_si512(&cache_objs[copied + 16], b); ++#endif + copied += 32; + } + cache->len += n; +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +index 55840cf170..4b73465af5 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h +@@ -251,6 +251,10 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_TSO | \ ++ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \ ++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \ + RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) + + #define ICE_TX_VECTOR_OFFLOAD ( \ +diff --git a/dpdk/drivers/net/ice/ice_tm.c b/dpdk/drivers/net/ice/ice_tm.c +index f5ea47ae83..65b9fdf320 100644 +--- a/dpdk/drivers/net/ice/ice_tm.c ++++ b/dpdk/drivers/net/ice/ice_tm.c +@@ -58,8 +58,15 @@ void + ice_tm_conf_uninit(struct rte_eth_dev *dev) + { + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); ++ struct ice_tm_shaper_profile *shaper_profile; + struct ice_tm_node *tm_node; + ++ /* clear profile */ ++ while ((shaper_profile = TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) { ++ TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node); ++ rte_free(shaper_profile); ++ } ++ + /* clear node configuration */ + while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) { + TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node); +@@ -648,6 +655,8 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, + uint16_t buf_size = ice_struct_size(buf, txqs, 1); + + buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf)); ++ if (buf == NULL) ++ return -ENOMEM; + + queue_parent_node = queue_sched_node->parent; + buf->src_teid = queue_parent_node->info.node_teid; +@@ -659,6 +668,7 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, + NULL, buf, buf_size, &txqs_moved, NULL); + if (ret || txqs_moved == 0) { + PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id); ++ rte_free(buf); + return ICE_ERR_PARAM; + } + +@@ -668,12 +678,14 @@ static int ice_move_recfg_lan_txq(struct rte_eth_dev *dev, + } else { + PMD_DRV_LOG(ERR, "invalid children number %d for queue %u", + queue_parent_node->num_children, queue_id); ++ rte_free(buf); + return ICE_ERR_PARAM; + } + dst_node->children[dst_node->num_children++] = queue_sched_node; + queue_sched_node->parent = dst_node; + ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info); + ++ rte_free(buf); + return ret; + } + +diff --git a/dpdk/drivers/net/igc/igc_ethdev.c b/dpdk/drivers/net/igc/igc_ethdev.c +index 58c4f80927..690736b6d1 100644 +--- a/dpdk/drivers/net/igc/igc_ethdev.c ++++ b/dpdk/drivers/net/igc/igc_ethdev.c +@@ -2853,7 +2853,7 @@ eth_igc_timesync_disable(struct rte_eth_dev *dev) + IGC_WRITE_REG(hw, IGC_TSYNCRXCTL, 0); + + val = IGC_READ_REG(hw, IGC_RXPBS); +- val &= IGC_RXPBS_CFG_TS_EN; ++ val &= ~IGC_RXPBS_CFG_TS_EN; + IGC_WRITE_REG(hw, IGC_RXPBS, val); + + val = IGC_READ_REG(hw, IGC_SRRCTL(0)); +diff --git a/dpdk/drivers/net/igc/igc_txrx.c b/dpdk/drivers/net/igc/igc_txrx.c +index 5c60e3e997..a54c4681f7 100644 +--- a/dpdk/drivers/net/igc/igc_txrx.c ++++ b/dpdk/drivers/net/igc/igc_txrx.c +@@ -347,6 +347,13 @@ igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + data_len = rte_le_to_cpu_16(rxd.wb.upper.length) - rxq->crc_len; ++ /* ++ * When the RTE_ETH_RX_OFFLOAD_TIMESTAMP offload is enabled the ++ * length in the descriptor still accounts for the timestamp so ++ * it must be subtracted. ++ */ ++ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ++ data_len -= IGC_TS_HDR_LEN; + rxm->data_len = data_len; + rxm->pkt_len = data_len; + rxm->nb_segs = 1; +@@ -509,6 +516,24 @@ next_desc: + */ + rxm->data_off = RTE_PKTMBUF_HEADROOM; + data_len = rte_le_to_cpu_16(rxd.wb.upper.length); ++ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { ++ /* ++ * When the RTE_ETH_RX_OFFLOAD_TIMESTAMP offload is enabled ++ * the pkt_addr of all software ring entries is moved forward ++ * by IGC_TS_HDR_LEN (see igc_alloc_rx_queue_mbufs()) so that ++ * when the hardware writes the packet with a prepended ++ * timestamp the actual packet data still starts at the ++ * normal data offset. The length in the descriptor still ++ * accounts for the timestamp so it needs to be subtracted. ++ * Follow-up mbufs do not have the timestamp so the data ++ * offset must be adjusted to point to the start of the packet ++ * data. ++ */ ++ if (first_seg == NULL) ++ data_len -= IGC_TS_HDR_LEN; ++ else ++ rxm->data_off -= IGC_TS_HDR_LEN; ++ } + rxm->data_len = data_len; + + /* +@@ -557,6 +582,7 @@ next_desc: + last_seg->data_len = last_seg->data_len - + (RTE_ETHER_CRC_LEN - data_len); + last_seg->next = NULL; ++ rxm = last_seg; + } else { + rxm->data_len = (uint16_t) + (data_len - RTE_ETHER_CRC_LEN); +diff --git a/dpdk/drivers/net/ionic/ionic_ethdev.c b/dpdk/drivers/net/ionic/ionic_ethdev.c +index 340fd0cd59..4ec9598b8e 100644 +--- a/dpdk/drivers/net/ionic/ionic_ethdev.c ++++ b/dpdk/drivers/net/ionic/ionic_ethdev.c +@@ -561,7 +561,7 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); + struct ionic_adapter *adapter = lif->adapter; + struct ionic_identity *ident = &adapter->ident; +- int i, num; ++ int i, j, num; + uint16_t tbl_sz = rte_le_to_cpu_16(ident->lif.eth.rss_ind_tbl_sz); + + IONIC_PRINT_CALL(); +@@ -582,9 +582,10 @@ ionic_dev_rss_reta_query(struct rte_eth_dev *eth_dev, + num = reta_size / RTE_ETH_RETA_GROUP_SIZE; + + for (i = 0; i < num; i++) { +- memcpy(reta_conf->reta, +- &lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE], +- RTE_ETH_RETA_GROUP_SIZE); ++ for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) { ++ reta_conf->reta[j] = ++ lif->rss_ind_tbl[(i * RTE_ETH_RETA_GROUP_SIZE) + j]; ++ } + reta_conf++; + } + +@@ -969,19 +970,21 @@ ionic_dev_close(struct rte_eth_dev *eth_dev) + + ionic_lif_stop(lif); + +- ionic_lif_free_queues(lif); +- + IONIC_PRINT(NOTICE, "Removing device %s", eth_dev->device->name); + if (adapter->intf->unconfigure_intr) + (*adapter->intf->unconfigure_intr)(adapter); + +- rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); +- + ionic_port_reset(adapter); + ionic_reset(adapter); ++ ++ ionic_lif_free_queues(lif); ++ ionic_lif_deinit(lif); ++ ionic_lif_free(lif); /* Does not free LIF object */ ++ + if (adapter->intf->unmap_bars) + (*adapter->intf->unmap_bars)(adapter); + ++ lif->adapter = NULL; + rte_free(adapter); + + return 0; +@@ -1058,21 +1061,18 @@ err: + static int + eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev) + { +- struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); +- struct ionic_adapter *adapter = lif->adapter; +- + IONIC_PRINT_CALL(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + +- adapter->lif = NULL; +- +- ionic_lif_deinit(lif); +- ionic_lif_free(lif); ++ if (eth_dev->state != RTE_ETH_DEV_UNUSED) ++ ionic_dev_close(eth_dev); + +- if (!(lif->state & IONIC_LIF_F_FW_RESET)) +- ionic_lif_reset(lif); ++ eth_dev->dev_ops = NULL; ++ eth_dev->rx_pkt_burst = NULL; ++ eth_dev->tx_pkt_burst = NULL; ++ eth_dev->tx_pkt_prepare = NULL; + + return 0; + } +@@ -1227,17 +1227,18 @@ eth_ionic_dev_remove(struct rte_device *rte_dev) + { + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *eth_dev; ++ int ret = 0; + + /* Adapter lookup is using the eth_dev name */ + snprintf(name, sizeof(name), "%s_lif", rte_dev->name); + + eth_dev = rte_eth_dev_allocated(name); + if (eth_dev) +- ionic_dev_close(eth_dev); ++ ret = rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); + else + IONIC_PRINT(DEBUG, "Cannot find device %s", rte_dev->name); + +- return 0; ++ return ret; + } + + RTE_LOG_REGISTER_DEFAULT(ionic_logtype, NOTICE); +diff --git a/dpdk/drivers/net/ionic/ionic_osdep.h b/dpdk/drivers/net/ionic/ionic_osdep.h +index 68f767b920..97188dfd59 100644 +--- a/dpdk/drivers/net/ionic/ionic_osdep.h ++++ b/dpdk/drivers/net/ionic/ionic_osdep.h +@@ -30,14 +30,28 @@ + + #define __iomem + +-typedef uint8_t u8; +-typedef uint16_t u16; +-typedef uint32_t u32; +-typedef uint64_t u64; +- +-typedef uint16_t __le16; +-typedef uint32_t __le32; +-typedef uint64_t __le64; ++#ifndef u8 ++#define u8 uint8_t ++#endif ++#ifndef u16 ++#define u16 uint16_t ++#endif ++#ifndef u32 ++#define u32 uint32_t ++#endif ++#ifndef u64 ++#define u64 uint64_t ++#endif ++ ++#ifndef __le16 ++#define __le16 rte_le16_t ++#endif ++#ifndef __le32 ++#define __le32 rte_le32_t ++#endif ++#ifndef __le64 ++#define __le64 rte_le64_t ++#endif + + #define ioread8(reg) rte_read8(reg) + #define ioread32(reg) rte_read32(rte_le_to_cpu_32(reg)) +diff --git a/dpdk/drivers/net/ionic/ionic_rxtx.c b/dpdk/drivers/net/ionic/ionic_rxtx.c +index b9e73b4871..170d3b0802 100644 +--- a/dpdk/drivers/net/ionic/ionic_rxtx.c ++++ b/dpdk/drivers/net/ionic/ionic_rxtx.c +@@ -26,38 +26,40 @@ + #include "ionic_logs.h" + + static void +-ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) ++ionic_empty_array(void **array, uint32_t free_idx, uint32_t zero_idx) + { + uint32_t i; + +- for (i = idx; i < cnt; i++) ++ for (i = 0; i < free_idx; i++) + if (array[i]) + rte_pktmbuf_free_seg(array[i]); + +- memset(array, 0, sizeof(void *) * cnt); ++ memset(array, 0, sizeof(void *) * zero_idx); + } + + static void __rte_cold + ionic_tx_empty(struct ionic_tx_qcq *txq) + { + struct ionic_queue *q = &txq->qcq.q; ++ uint32_t info_len = q->num_descs * q->num_segs; + +- ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); ++ ionic_empty_array(q->info, info_len, info_len); + } + + static void __rte_cold + ionic_rx_empty(struct ionic_rx_qcq *rxq) + { + struct ionic_queue *q = &rxq->qcq.q; ++ uint32_t info_len = q->num_descs * q->num_segs; + + /* + * Walk the full info array so that the clean up includes any + * fragments that were left dangling for later reuse + */ +- ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); ++ ionic_empty_array(q->info, info_len, info_len); + +- ionic_empty_array((void **)rxq->mbs, +- IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); ++ ionic_empty_array((void **)rxq->mbs, rxq->mb_idx, ++ IONIC_MBUF_BULK_ALLOC); + rxq->mb_idx = 0; + } + +@@ -752,7 +754,7 @@ ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) + { + struct ionic_rx_qcq *rxq = rx_queue; + struct ionic_qcq *qcq = &rxq->qcq; +- struct ionic_rxq_comp *cq_desc; ++ volatile struct ionic_rxq_comp *cq_desc; + uint16_t mask, head, tail, pos; + bool done_color; + +@@ -791,7 +793,7 @@ ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) + { + struct ionic_tx_qcq *txq = tx_queue; + struct ionic_qcq *qcq = &txq->qcq; +- struct ionic_txq_comp *cq_desc; ++ volatile struct ionic_txq_comp *cq_desc; + uint16_t mask, head, tail, pos, cq_pos; + bool done_color; + +diff --git a/dpdk/drivers/net/ionic/ionic_rxtx_sg.c b/dpdk/drivers/net/ionic/ionic_rxtx_sg.c +index ab8e56e91c..241b6f8587 100644 +--- a/dpdk/drivers/net/ionic/ionic_rxtx_sg.c ++++ b/dpdk/drivers/net/ionic/ionic_rxtx_sg.c +@@ -27,7 +27,8 @@ ionic_tx_flush_sg(struct ionic_tx_qcq *txq) + struct ionic_cq *cq = &txq->qcq.cq; + struct ionic_queue *q = &txq->qcq.q; + struct rte_mbuf *txm; +- struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; ++ struct ionic_txq_comp *cq_desc_base = cq->base; ++ volatile struct ionic_txq_comp *cq_desc; + void **info; + uint32_t i; + +@@ -252,7 +253,7 @@ ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts, + */ + static __rte_always_inline void + ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq, +- struct ionic_rxq_comp *cq_desc, ++ volatile struct ionic_rxq_comp *cq_desc, + struct ionic_rx_service *rx_svc) + { + struct ionic_queue *q = &rxq->qcq.q; +@@ -438,7 +439,8 @@ ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do, + struct ionic_cq *cq = &rxq->qcq.cq; + struct ionic_queue *q = &rxq->qcq.q; + struct ionic_rxq_desc *q_desc_base = q->base; +- struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; ++ struct ionic_rxq_comp *cq_desc_base = cq->base; ++ volatile struct ionic_rxq_comp *cq_desc; + uint32_t work_done = 0; + uint64_t then, now, hz, delta; + +diff --git a/dpdk/drivers/net/ionic/ionic_rxtx_simple.c b/dpdk/drivers/net/ionic/ionic_rxtx_simple.c +index 5f81856256..0992177afc 100644 +--- a/dpdk/drivers/net/ionic/ionic_rxtx_simple.c ++++ b/dpdk/drivers/net/ionic/ionic_rxtx_simple.c +@@ -27,7 +27,8 @@ ionic_tx_flush(struct ionic_tx_qcq *txq) + struct ionic_cq *cq = &txq->qcq.cq; + struct ionic_queue *q = &txq->qcq.q; + struct rte_mbuf *txm; +- struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; ++ struct ionic_txq_comp *cq_desc_base = cq->base; ++ volatile struct ionic_txq_comp *cq_desc; + void **info; + + cq_desc = &cq_desc_base[cq->tail_idx]; +@@ -225,7 +226,7 @@ ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + */ + static __rte_always_inline void + ionic_rx_clean_one(struct ionic_rx_qcq *rxq, +- struct ionic_rxq_comp *cq_desc, ++ volatile struct ionic_rxq_comp *cq_desc, + struct ionic_rx_service *rx_svc) + { + struct ionic_queue *q = &rxq->qcq.q; +@@ -359,7 +360,8 @@ ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, + struct ionic_cq *cq = &rxq->qcq.cq; + struct ionic_queue *q = &rxq->qcq.q; + struct ionic_rxq_desc *q_desc_base = q->base; +- struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; ++ struct ionic_rxq_comp *cq_desc_base = cq->base; ++ volatile struct ionic_rxq_comp *cq_desc; + uint32_t work_done = 0; + uint64_t then, now, hz, delta; + +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c +index 2c15611a23..baae80d661 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_ethdev.c +@@ -203,7 +203,7 @@ ipn3ke_vbng_init_done(struct ipn3ke_hw *hw) + } + + if (!timeout) { +- IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout.\n"); ++ IPN3KE_AFU_PMD_ERR("IPN3KE vBNG INIT timeout."); + return -1; + } + +@@ -348,7 +348,7 @@ ipn3ke_hw_init(struct rte_afu_device *afu_dev, + hw->acc_tm = 1; + hw->acc_flow = 1; + +- IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x\n", ++ IPN3KE_AFU_PMD_DEBUG("UPL_version is 0x%x", + IPN3KE_READ_REG(hw, 0)); + } + +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c b/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c +index d20a29b9a2..a2f76268b5 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c +@@ -993,7 +993,7 @@ ipn3ke_flow_hw_update(struct ipn3ke_hw *hw, + uint32_t time_out = MHL_COMMAND_TIME_COUNT; + uint32_t i; + +- IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n"); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start"); + + pdata = (uint32_t *)flow->rule.key; + IPN3KE_AFU_PMD_DEBUG(" - key :"); +@@ -1003,7 +1003,6 @@ ipn3ke_flow_hw_update(struct ipn3ke_hw *hw, + + for (i = 0; i < 4; i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i])); +- IPN3KE_AFU_PMD_DEBUG("\n"); + + pdata = (uint32_t *)flow->rule.result; + IPN3KE_AFU_PMD_DEBUG(" - result:"); +@@ -1013,7 +1012,7 @@ ipn3ke_flow_hw_update(struct ipn3ke_hw *hw, + + for (i = 0; i < 1; i++) + IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n"); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end"); + + pdata = (uint32_t *)flow->rule.key; + +@@ -1254,7 +1253,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_RX_TEST, + 0, + 0x1); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x", data); + + /* configure base mac address */ + IPN3KE_MASK_WRITE_REG(hw, +@@ -1268,7 +1267,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_BASE_DST_MAC_ADDR_HI, + 0, + 0xFFFF); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x", data); + + IPN3KE_MASK_WRITE_REG(hw, + IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW, +@@ -1281,7 +1280,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW, + 0, + 0xFFFFFFFF); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x", data); + + + /* configure hash lookup rules enable */ +@@ -1296,7 +1295,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_LKUP_ENABLE, + 0, + 0xFF); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x", data); + + + /* configure rx parse config, settings associated with VxLAN */ +@@ -1311,7 +1310,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_RX_PARSE_CFG, + 0, + 0x3FFFF); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x", data); + + + /* configure QinQ S-Tag */ +@@ -1326,7 +1325,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_QINQ_STAG, + 0, + 0xFFFF); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x", data); + + + /* configure gen ctrl */ +@@ -1341,7 +1340,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_MHL_GEN_CTRL, + 0, + 0x1F); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x", data); + + + /* clear monitoring register */ +@@ -1356,7 +1355,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_MHL_MON_0, + 0, + 0xFFFFFFFF); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x", data); + + + ipn3ke_flow_hw_flush(hw); +@@ -1366,7 +1365,7 @@ int ipn3ke_flow_init(void *dev) + IPN3KE_CLF_EM_NUM, + 0, + 0xFFFFFFFF); +- IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_EN_NUM: %x\n", hw->flow_max_entries); ++ IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_EN_NUM: %x", hw->flow_max_entries); + hw->flow_num_entries = 0; + + return 0; +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +index 8145f1bb2a..feb57420c3 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_representor.c +@@ -2401,8 +2401,8 @@ ipn3ke_update_link(struct rte_rawdev *rawdev, + else + link->link_status = 0; + +- IPN3KE_AFU_PMD_DEBUG("port is %d\n", port); +- IPN3KE_AFU_PMD_DEBUG("link->link_status is %d\n", link->link_status); ++ IPN3KE_AFU_PMD_DEBUG("port is %d", port); ++ IPN3KE_AFU_PMD_DEBUG("link->link_status is %d", link->link_status); + + rawdev->dev_ops->attr_get(rawdev, + "LineSideLinkSpeed", +@@ -2479,14 +2479,14 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev, + + if (!rpst->ori_linfo.link_status && + link.link_status) { +- IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Up\n", rpst->port_id); ++ IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Up", rpst->port_id); + rpst->ori_linfo.link_status = link.link_status; + rpst->ori_linfo.link_speed = link.link_speed; + + rte_eth_linkstatus_set(ethdev, &link); + + if (rpst->i40e_pf_eth) { +- IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Up\n", ++ IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Up", + rpst->i40e_pf_eth_port_id); + rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; +@@ -2494,7 +2494,7 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev, + } + } else if (rpst->ori_linfo.link_status && + !link.link_status) { +- IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Down\n", ++ IPN3KE_AFU_PMD_DEBUG("Update Rpst %d Down", + rpst->port_id); + rpst->ori_linfo.link_status = link.link_status; + rpst->ori_linfo.link_speed = link.link_speed; +@@ -2502,7 +2502,7 @@ ipn3ke_rpst_link_update(struct rte_eth_dev *ethdev, + rte_eth_linkstatus_set(ethdev, &link); + + if (rpst->i40e_pf_eth) { +- IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Down\n", ++ IPN3KE_AFU_PMD_DEBUG("Update FVL PF %d Down", + rpst->i40e_pf_eth_port_id); + rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; +@@ -2537,14 +2537,14 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst) + + if (!rpst->ori_linfo.link_status && + link.link_status) { +- IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Up\n", rpst->port_id); ++ IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Up", rpst->port_id); + rpst->ori_linfo.link_status = link.link_status; + rpst->ori_linfo.link_speed = link.link_speed; + + rte_eth_linkstatus_set(rpst->ethdev, &link); + + if (rpst->i40e_pf_eth) { +- IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Up\n", ++ IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Up", + rpst->i40e_pf_eth_port_id); + rte_eth_dev_set_link_up(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; +@@ -2552,14 +2552,14 @@ ipn3ke_rpst_link_check(struct ipn3ke_rpst *rpst) + } + } else if (rpst->ori_linfo.link_status && + !link.link_status) { +- IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Down\n", rpst->port_id); ++ IPN3KE_AFU_PMD_DEBUG("Check Rpst %d Down", rpst->port_id); + rpst->ori_linfo.link_status = link.link_status; + rpst->ori_linfo.link_speed = link.link_speed; + + rte_eth_linkstatus_set(rpst->ethdev, &link); + + if (rpst->i40e_pf_eth) { +- IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Down\n", ++ IPN3KE_AFU_PMD_DEBUG("Check FVL PF %d Down", + rpst->i40e_pf_eth_port_id); + rte_eth_dev_set_link_down(rpst->i40e_pf_eth_port_id); + pf = rpst->i40e_pf_eth; +diff --git a/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c b/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c +index 0260227900..44a8b88699 100644 +--- a/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c ++++ b/dpdk/drivers/net/ipn3ke/ipn3ke_tm.c +@@ -1934,10 +1934,10 @@ ipn3ke_tm_show(struct rte_eth_dev *dev) + + tm_id = tm->tm_id; + +- IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***\n", tm_id); ++ IPN3KE_AFU_PMD_DEBUG("***HQoS Tree(%d)***", tm_id); + + port_n = tm->h.port_node; +- IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)\n", port_n->node_index, ++ IPN3KE_AFU_PMD_DEBUG("Port: (%d|%s)", port_n->node_index, + str_state[port_n->node_state]); + + vt_nl = &tm->h.port_node->children_node_list; +@@ -1951,7 +1951,6 @@ ipn3ke_tm_show(struct rte_eth_dev *dev) + cos_n->node_index, + str_state[cos_n->node_state]); + } +- IPN3KE_AFU_PMD_DEBUG("\n"); + } + } + +@@ -1969,14 +1968,13 @@ ipn3ke_tm_show_commmit(struct rte_eth_dev *dev) + + tm_id = tm->tm_id; + +- IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***\n", tm_id); ++ IPN3KE_AFU_PMD_DEBUG("***Commit Tree(%d)***", tm_id); + n = tm->h.port_commit_node; + IPN3KE_AFU_PMD_DEBUG("Port: "); + if (n) + IPN3KE_AFU_PMD_DEBUG("(%d|%s)", + n->node_index, + str_state[n->node_state]); +- IPN3KE_AFU_PMD_DEBUG("\n"); + + nl = &tm->h.vt_commit_node_list; + IPN3KE_AFU_PMD_DEBUG("VT : "); +@@ -1985,7 +1983,6 @@ ipn3ke_tm_show_commmit(struct rte_eth_dev *dev) + n->node_index, + str_state[n->node_state]); + } +- IPN3KE_AFU_PMD_DEBUG("\n"); + + nl = &tm->h.cos_commit_node_list; + IPN3KE_AFU_PMD_DEBUG("COS : "); +@@ -1994,7 +1991,6 @@ ipn3ke_tm_show_commmit(struct rte_eth_dev *dev) + n->node_index, + str_state[n->node_state]); + } +- IPN3KE_AFU_PMD_DEBUG("\n"); + } + + /* Traffic manager hierarchy commit */ +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c b/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c +index c6e8b7e976..f37d83a0ab 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c +@@ -554,13 +554,15 @@ out: + **/ + void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) + { +- u32 autoc2_reg; + u16 ee_ctrl_2 = 0; ++ u32 autoc2_reg; ++ u32 status; + + DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); +- ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); ++ status = ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + +- if (!ixgbe_mng_present(hw) && !hw->wol_enabled && ++ if (status == IXGBE_SUCCESS && ++ !ixgbe_mng_present(hw) && !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c +index 74c5db16fa..56267bb00d 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c +@@ -432,8 +432,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; +- case X550_PHY_ID2: +- case X550_PHY_ID3: ++ case X550_PHY_ID: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; +@@ -915,6 +914,10 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) + hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; + + switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; ++ break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h +index 1094df5891..f709681df2 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_type.h ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_type.h +@@ -1664,6 +1664,7 @@ struct ixgbe_dmac_config { + #define TN1010_PHY_ID 0x00A19410 + #define TNX_FW_REV 0xB + #define X540_PHY_ID 0x01540200 ++#define X550_PHY_ID 0x01540220 + #define X550_PHY_ID2 0x01540223 + #define X550_PHY_ID3 0x01540221 + #define X557_PHY_ID 0x01540240 +@@ -1800,7 +1801,7 @@ enum { + /* VFRE bitmask */ + #define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ ++#define IXGBE_VF_INIT_TIMEOUT 10000 /* Number of retries to clear RSTI */ + + /* RDHMPN and TDHMPN bitmasks */ + #define IXGBE_RDHMPN_RDICADDR 0x007FF800 +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c +index 5e3ae1b519..11dbbe2a86 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_vf.c +@@ -585,7 +585,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; +- if (hw->mac.type >= ixgbe_mac_X550) { ++ if (hw->mac.type >= ixgbe_mac_X550_vf) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } +@@ -595,7 +595,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; +- if (hw->mac.type == ixgbe_mac_X550) { ++ if (hw->mac.type == ixgbe_mac_X550_vf) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } +@@ -603,7 +603,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ +- if (hw->mac.type >= ixgbe_mac_X550) ++ if (hw->mac.type >= ixgbe_mac_X550_vf) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +index d6cf00317e..f4ec485d69 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +@@ -1154,10 +1154,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + } + + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { +- PMD_INIT_LOG(ERR, "\nERROR: " +- "Firmware recovery mode detected. Limiting functionality.\n" +- "Refer to the Intel(R) Ethernet Adapters and Devices " +- "User Guide for details on firmware recovery mode."); ++ PMD_INIT_LOG(ERR, "ERROR: Firmware recovery mode detected. Limiting functionality."); + return -EIO; + } + +@@ -1190,7 +1187,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + diag = ixgbe_validate_eeprom_checksum(hw, &csum); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_exit; + } + + #ifdef RTE_LIBRTE_IXGBE_BYPASS +@@ -1228,7 +1226,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); + if (diag) { + PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); +- return -EIO; ++ ret = -EIO; ++ goto err_exit; + } + + /* Reset the hw statistics */ +@@ -1248,7 +1247,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + "Failed to allocate %u bytes needed to store " + "MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_exit; + } + /* Copy the permanent MAC address */ + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, +@@ -1263,7 +1263,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; +- return -ENOMEM; ++ ret = -ENOMEM; ++ goto err_exit; + } + + /* initialize the vfta */ +@@ -1347,6 +1348,11 @@ err_pf_host_init: + eth_dev->data->mac_addrs = NULL; + rte_free(eth_dev->data->hash_mac_addrs); + eth_dev->data->hash_mac_addrs = NULL; ++err_exit: ++#ifdef RTE_LIB_SECURITY ++ rte_free(eth_dev->security_ctx); ++ eth_dev->security_ctx = NULL; ++#endif + return ret; + } + +@@ -1773,7 +1779,7 @@ eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + + if (eth_da.nb_representor_ports > 0 && + eth_da.type != RTE_ETH_REPRESENTOR_VF) { +- PMD_DRV_LOG(ERR, "unsupported representor type: %s\n", ++ PMD_DRV_LOG(ERR, "unsupported representor type: %s", + pci_dev->device.devargs->args); + return -ENOTSUP; + } +@@ -4280,6 +4286,9 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait = 1; + u32 esdp_reg; + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return -1; ++ + memset(&link, 0, sizeof(link)); + link.link_status = RTE_ETH_LINK_DOWN; + link.link_speed = RTE_ETH_SPEED_NUM_NONE; +@@ -4296,11 +4305,6 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) + wait = 0; + +-/* BSD has no interrupt mechanism, so force NIC status synchronization. */ +-#ifdef RTE_EXEC_ENV_FREEBSD +- wait = 1; +-#endif +- + if (vf) + diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); + else +@@ -4654,14 +4658,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) + timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + + ixgbe_dev_link_status_print(dev); +- if (rte_eal_alarm_set(timeout * 1000, +- ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) +- PMD_DRV_LOG(ERR, "Error setting alarm"); +- else { +- /* remember original mask */ +- intr->mask_original = intr->mask; +- /* only disable lsc interrupt */ +- intr->mask &= ~IXGBE_EIMS_LSC; ++ ++ /* Don't program delayed handler if LSC interrupt is disabled. ++ * It means one is already programmed. ++ */ ++ if (intr->mask & IXGBE_EIMS_LSC) { ++ if (rte_eal_alarm_set(timeout * 1000, ++ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) ++ PMD_DRV_LOG(ERR, "Error setting alarm"); ++ else { ++ /* remember original mask */ ++ intr->mask_original = intr->mask; ++ /* only disable lsc interrupt */ ++ intr->mask &= ~IXGBE_EIMS_LSC; ++ } + } + } + +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c b/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c +index d331308556..3a666ba15f 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_ipsec.c +@@ -120,7 +120,7 @@ ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session) + /* Fail if no match and no free entries*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, +- "No free entry left in the Rx IP table\n"); ++ "No free entry left in the Rx IP table"); + return -1; + } + +@@ -134,7 +134,7 @@ ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session) + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, +- "No free entry left in the Rx SA table\n"); ++ "No free entry left in the Rx SA table"); + return -1; + } + +@@ -232,7 +232,7 @@ ixgbe_crypto_add_sa(struct ixgbe_crypto_session *ic_session) + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, +- "No free entry left in the Tx SA table\n"); ++ "No free entry left in the Tx SA table"); + return -1; + } + +@@ -291,7 +291,7 @@ ixgbe_crypto_remove_sa(struct rte_eth_dev *dev, + /* Fail if no match*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, +- "Entry not found in the Rx IP table\n"); ++ "Entry not found in the Rx IP table"); + return -1; + } + +@@ -306,7 +306,7 @@ ixgbe_crypto_remove_sa(struct rte_eth_dev *dev, + /* Fail if no match*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, +- "Entry not found in the Rx SA table\n"); ++ "Entry not found in the Rx SA table"); + return -1; + } + +@@ -349,7 +349,7 @@ ixgbe_crypto_remove_sa(struct rte_eth_dev *dev, + /* Fail if no match entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, +- "Entry not found in the Tx SA table\n"); ++ "Entry not found in the Tx SA table"); + return -1; + } + reg_val = IPSRXIDX_WRITE | (sa_index << 3); +@@ -379,7 +379,7 @@ ixgbe_crypto_create_session(void *device, + if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD || + conf->crypto_xform->aead.algo != + RTE_CRYPTO_AEAD_AES_GCM) { +- PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n"); ++ PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode"); + return -ENOTSUP; + } + aead_xform = &conf->crypto_xform->aead; +@@ -388,14 +388,14 @@ ixgbe_crypto_create_session(void *device, + if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { + ic_session->op = IXGBE_OP_AUTHENTICATED_DECRYPTION; + } else { +- PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n"); ++ PMD_DRV_LOG(ERR, "IPsec decryption not enabled"); + return -ENOTSUP; + } + } else { + if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) { + ic_session->op = IXGBE_OP_AUTHENTICATED_ENCRYPTION; + } else { +- PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n"); ++ PMD_DRV_LOG(ERR, "IPsec encryption not enabled"); + return -ENOTSUP; + } + } +@@ -409,7 +409,7 @@ ixgbe_crypto_create_session(void *device, + + if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) { + if (ixgbe_crypto_add_sa(ic_session)) { +- PMD_DRV_LOG(ERR, "Failed to add SA\n"); ++ PMD_DRV_LOG(ERR, "Failed to add SA"); + return -EPERM; + } + } +@@ -431,12 +431,12 @@ ixgbe_crypto_remove_session(void *device, + struct ixgbe_crypto_session *ic_session = SECURITY_GET_SESS_PRIV(session); + + if (eth_dev != ic_session->dev) { +- PMD_DRV_LOG(ERR, "Session not bound to this device\n"); ++ PMD_DRV_LOG(ERR, "Session not bound to this device"); + return -ENODEV; + } + + if (ixgbe_crypto_remove_sa(eth_dev, ic_session)) { +- PMD_DRV_LOG(ERR, "Failed to remove session\n"); ++ PMD_DRV_LOG(ERR, "Failed to remove session"); + return -EFAULT; + } + +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_pf.c b/dpdk/drivers/net/ixgbe/ixgbe_pf.c +index 0a0f639e39..002bc71c2a 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_pf.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_pf.c +@@ -171,14 +171,14 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) + struct ixgbe_ethertype_filter ethertype_filter; + + if (!hw->mac.ops.set_ethertype_anti_spoofing) { +- PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n"); ++ PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported."); + return; + } + + i = ixgbe_ethertype_filter_lookup(filter_info, + IXGBE_ETHERTYPE_FLOW_CTRL); + if (i >= 0) { +- PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n"); ++ PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!"); + return; + } + +@@ -191,7 +191,7 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) + i = ixgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (i < 0) { +- PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n"); ++ PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control."); + return; + } + +@@ -422,7 +422,7 @@ ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + +- PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf); ++ PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous", vf); + + vmolr &= ~IXGBE_VMOLR_MPE; + +@@ -628,7 +628,7 @@ ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + break; + } + +- PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n", ++ PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d", + api_version, vf); + + return -1; +@@ -677,7 +677,7 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + case RTE_ETH_MQ_TX_NONE: + case RTE_ETH_MQ_TX_DCB: + PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u" +- ", but its tx mode = %d\n", vf, ++ ", but its tx mode = %d", vf, + eth_conf->txmode.mq_mode); + return -1; + +@@ -711,7 +711,7 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + break; + + default: +- PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n", ++ PMD_DRV_LOG(ERR, "PF work with invalid mode = %d", + eth_conf->txmode.mq_mode); + return -1; + } +@@ -767,7 +767,7 @@ ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + if (!(fctrl & IXGBE_FCTRL_UPE)) { + /* VF promisc requires PF in promisc */ + PMD_DRV_LOG(ERR, +- "Enabling VF promisc requires PF in promisc\n"); ++ "Enabling VF promisc requires PF in promisc"); + return -1; + } + +@@ -804,7 +804,7 @@ ixgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + if (index) { + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)new_mac)) { +- PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf); ++ PMD_DRV_LOG(ERR, "set invalid mac vf:%d", vf); + return -1; + } + +diff --git a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +index 90b0a7004f..f6c17d4efb 100644 +--- a/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c ++++ b/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +@@ -5844,6 +5844,25 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) + IXGBE_PSRTYPE_RQPL_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + ++ /* Initialize the rss for x550_vf cards if enabled */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550_vf: ++ case ixgbe_mac_X550EM_x_vf: ++ case ixgbe_mac_X550EM_a_vf: ++ switch (dev->data->dev_conf.rxmode.mq_mode) { ++ case RTE_ETH_MQ_RX_RSS: ++ case RTE_ETH_MQ_RX_DCB_RSS: ++ case RTE_ETH_MQ_RX_VMDQ_RSS: ++ ixgbe_rss_configure(dev); ++ break; ++ default: ++ break; ++ } ++ break; ++ default: ++ break; ++ } ++ + ixgbe_set_rx_function(dev); + + return 0; +diff --git a/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c b/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c +index f76ef63921..15c28e7a3f 100644 +--- a/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c ++++ b/dpdk/drivers/net/ixgbe/rte_pmd_ixgbe.c +@@ -955,7 +955,7 @@ STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask) + while (--retries) { + status = ixgbe_acquire_swfw_semaphore(hw, mask); + if (status) { +- PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n", ++ PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d", + status); + return status; + } +@@ -964,18 +964,18 @@ STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask) + return IXGBE_SUCCESS; + + if (status == IXGBE_ERR_TOKEN_RETRY) +- PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n", ++ PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d", + status); + + ixgbe_release_swfw_semaphore(hw, mask); + if (status != IXGBE_ERR_TOKEN_RETRY) { + PMD_DRV_LOG(ERR, +- "Retry get PHY token failed, Status=%d\n", ++ "Retry get PHY token failed, Status=%d", + status); + return status; + } + } +- PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n", ++ PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X", + hw->phy.id); + return status; + } +diff --git a/dpdk/drivers/net/mana/mana.c b/dpdk/drivers/net/mana/mana.c +index 781ed76139..65ca139be5 100644 +--- a/dpdk/drivers/net/mana/mana.c ++++ b/dpdk/drivers/net/mana/mana.c +@@ -296,8 +296,8 @@ mana_dev_info_get(struct rte_eth_dev *dev, + dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE; + dev_info->max_rx_pktlen = MANA_MAX_MTU + RTE_ETHER_HDR_LEN; + +- dev_info->max_rx_queues = priv->max_rx_queues; +- dev_info->max_tx_queues = priv->max_tx_queues; ++ dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, UINT16_MAX); ++ dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, UINT16_MAX); + + dev_info->max_mac_addrs = MANA_MAX_MAC_ADDR; + dev_info->max_hash_mac_addrs = 0; +@@ -338,16 +338,20 @@ mana_dev_info_get(struct rte_eth_dev *dev, + + /* Buffer limits */ + dev_info->rx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE; +- dev_info->rx_desc_lim.nb_max = priv->max_rx_desc; ++ dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, UINT16_MAX); + dev_info->rx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE; +- dev_info->rx_desc_lim.nb_seg_max = priv->max_recv_sge; +- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge; ++ dev_info->rx_desc_lim.nb_seg_max = ++ RTE_MIN(priv->max_recv_sge, UINT16_MAX); ++ dev_info->rx_desc_lim.nb_mtu_seg_max = ++ RTE_MIN(priv->max_recv_sge, UINT16_MAX); + + dev_info->tx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE; +- dev_info->tx_desc_lim.nb_max = priv->max_tx_desc; ++ dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, UINT16_MAX); + dev_info->tx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE; +- dev_info->tx_desc_lim.nb_seg_max = priv->max_send_sge; +- dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge; ++ dev_info->tx_desc_lim.nb_seg_max = ++ RTE_MIN(priv->max_send_sge, UINT16_MAX); ++ dev_info->tx_desc_lim.nb_mtu_seg_max = ++ RTE_MIN(priv->max_send_sge, UINT16_MAX); + + /* Speed */ + dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G; +@@ -707,7 +711,7 @@ mana_dev_stats_reset(struct rte_eth_dev *dev __rte_unused) + static int + mana_get_ifname(const struct mana_priv *priv, char (*ifname)[IF_NAMESIZE]) + { +- int ret; ++ int ret = -ENODEV; + DIR *dir; + struct dirent *dent; + +@@ -1385,9 +1389,9 @@ mana_probe_port(struct ibv_device *ibdev, struct ibv_device_attr_ex *dev_attr, + priv->max_mr = dev_attr->orig_attr.max_mr; + priv->max_mr_size = dev_attr->orig_attr.max_mr_size; + +- DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d", ++ DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d mr %" PRIu64, + name, priv->max_rx_queues, priv->max_rx_desc, +- priv->max_send_sge); ++ priv->max_send_sge, priv->max_mr_size); + + rte_eth_copy_pci_info(eth_dev, pci_dev); + +diff --git a/dpdk/drivers/net/mana/mana.h b/dpdk/drivers/net/mana/mana.h +index 6836872dc2..822b8a1f15 100644 +--- a/dpdk/drivers/net/mana/mana.h ++++ b/dpdk/drivers/net/mana/mana.h +@@ -522,9 +522,9 @@ void mana_del_pmd_mr(struct mana_mr_cache *mr); + void mana_mempool_chunk_cb(struct rte_mempool *mp, void *opaque, + struct rte_mempool_memhdr *memhdr, unsigned int idx); + +-struct mana_mr_cache *mana_mr_btree_lookup(struct mana_mr_btree *bt, +- uint16_t *idx, +- uintptr_t addr, size_t len); ++int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, ++ uintptr_t addr, size_t len, ++ struct mana_mr_cache **cache); + int mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry); + int mana_mr_btree_init(struct mana_mr_btree *bt, int n, int socket); + void mana_mr_btree_free(struct mana_mr_btree *bt); +diff --git a/dpdk/drivers/net/mana/meson.build b/dpdk/drivers/net/mana/meson.build +index 2d72eca5a8..3ddc230ab4 100644 +--- a/dpdk/drivers/net/mana/meson.build ++++ b/dpdk/drivers/net/mana/meson.build +@@ -19,12 +19,14 @@ sources += files( + ) + + libnames = ['ibverbs', 'mana'] ++libs = [] + foreach libname:libnames + lib = dependency('lib' + libname, required:false) + if not lib.found() + lib = cc.find_library(libname, required:false) + endif + if lib.found() ++ libs += lib + ext_deps += lib + else + build = false +@@ -43,7 +45,7 @@ required_symbols = [ + ] + + foreach arg:required_symbols +- if not cc.has_header_symbol(arg[0], arg[1]) ++ if not cc.has_header_symbol(arg[0], arg[1], dependencies: libs, args: cflags) + build = false + reason = 'missing symbol "' + arg[1] + '" in "' + arg[0] + '"' + subdir_done() +diff --git a/dpdk/drivers/net/mana/mr.c b/dpdk/drivers/net/mana/mr.c +index b8e6ea0bbf..eb6d073a95 100644 +--- a/dpdk/drivers/net/mana/mr.c ++++ b/dpdk/drivers/net/mana/mr.c +@@ -40,7 +40,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, + struct ibv_mr *ibv_mr; + struct mana_range ranges[pool->nb_mem_chunks]; + uint32_t i; +- struct mana_mr_cache *mr; ++ struct mana_mr_cache mr; + int ret; + + rte_mempool_mem_iter(pool, mana_mempool_chunk_cb, ranges); +@@ -75,14 +75,13 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, + DP_LOG(DEBUG, "MR lkey %u addr %p len %zu", + ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); + +- mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); +- mr->lkey = ibv_mr->lkey; +- mr->addr = (uintptr_t)ibv_mr->addr; +- mr->len = ibv_mr->length; +- mr->verb_obj = ibv_mr; ++ mr.lkey = ibv_mr->lkey; ++ mr.addr = (uintptr_t)ibv_mr->addr; ++ mr.len = ibv_mr->length; ++ mr.verb_obj = ibv_mr; + + rte_spinlock_lock(&priv->mr_btree_lock); +- ret = mana_mr_btree_insert(&priv->mr_btree, mr); ++ ret = mana_mr_btree_insert(&priv->mr_btree, &mr); + rte_spinlock_unlock(&priv->mr_btree_lock); + if (ret) { + ibv_dereg_mr(ibv_mr); +@@ -90,7 +89,7 @@ mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv, + return ret; + } + +- ret = mana_mr_btree_insert(local_tree, mr); ++ ret = mana_mr_btree_insert(local_tree, &mr); + if (ret) { + /* Don't need to clean up MR as it's already + * in the global tree +@@ -138,8 +137,12 @@ mana_find_pmd_mr(struct mana_mr_btree *local_mr_btree, struct mana_priv *priv, + + try_again: + /* First try to find the MR in local queue tree */ +- mr = mana_mr_btree_lookup(local_mr_btree, &idx, +- (uintptr_t)mbuf->buf_addr, mbuf->buf_len); ++ ret = mana_mr_btree_lookup(local_mr_btree, &idx, ++ (uintptr_t)mbuf->buf_addr, mbuf->buf_len, ++ &mr); ++ if (ret) ++ return NULL; ++ + if (mr) { + DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIxPTR " len %zu", + mr->lkey, mr->addr, mr->len); +@@ -148,11 +151,14 @@ try_again: + + /* If not found, try to find the MR in global tree */ + rte_spinlock_lock(&priv->mr_btree_lock); +- mr = mana_mr_btree_lookup(&priv->mr_btree, &idx, +- (uintptr_t)mbuf->buf_addr, +- mbuf->buf_len); ++ ret = mana_mr_btree_lookup(&priv->mr_btree, &idx, ++ (uintptr_t)mbuf->buf_addr, ++ mbuf->buf_len, &mr); + rte_spinlock_unlock(&priv->mr_btree_lock); + ++ if (ret) ++ return NULL; ++ + /* If found in the global tree, add it to the local tree */ + if (mr) { + ret = mana_mr_btree_insert(local_mr_btree, mr); +@@ -228,22 +234,23 @@ mana_mr_btree_expand(struct mana_mr_btree *bt, int n) + /* + * Look for a region of memory in MR cache. + */ +-struct mana_mr_cache * +-mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, +- uintptr_t addr, size_t len) ++int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, ++ uintptr_t addr, size_t len, ++ struct mana_mr_cache **cache) + { + struct mana_mr_cache *table; + uint16_t n; + uint16_t base = 0; + int ret; + +- n = bt->len; ++ *cache = NULL; + ++ n = bt->len; + /* Try to double the cache if it's full */ + if (n == bt->size) { + ret = mana_mr_btree_expand(bt, bt->size << 1); + if (ret) +- return NULL; ++ return ret; + } + + table = bt->table; +@@ -262,14 +269,16 @@ mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, + + *idx = base; + +- if (addr + len <= table[base].addr + table[base].len) +- return &table[base]; ++ if (addr + len <= table[base].addr + table[base].len) { ++ *cache = &table[base]; ++ return 0; ++ } + + DP_LOG(DEBUG, + "addr 0x%" PRIxPTR " len %zu idx %u sum 0x%" PRIxPTR " not found", + addr, len, *idx, addr + len); + +- return NULL; ++ return 0; + } + + int +@@ -314,14 +323,21 @@ mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry) + struct mana_mr_cache *table; + uint16_t idx = 0; + uint16_t shift; ++ int ret; ++ ++ ret = mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len, &table); ++ if (ret) ++ return ret; + +- if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) { ++ if (table) { + DP_LOG(DEBUG, "Addr 0x%" PRIxPTR " len %zu exists in btree", + entry->addr, entry->len); + return 0; + } + + if (bt->len >= bt->size) { ++ DP_LOG(ERR, "Btree overflow detected len %u size %u", ++ bt->len, bt->size); + bt->overflow = 1; + return -1; + } +diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c +index 7cc8c0da91..1eb41bb471 100644 +--- a/dpdk/drivers/net/memif/rte_eth_memif.c ++++ b/dpdk/drivers/net/memif/rte_eth_memif.c +@@ -265,8 +265,6 @@ memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_q + cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); + while (mq->last_tail != cur_tail) { + RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]); +- /* Decrement refcnt and free mbuf. (current segment) */ +- rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1); + rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); + mq->last_tail++; + } +@@ -602,6 +600,10 @@ refill: + ret = rte_pktmbuf_alloc_bulk(mq->mempool, &mq->buffers[head & mask], n_slots); + if (unlikely(ret < 0)) + goto no_free_mbufs; ++ if (unlikely(n_slots > ring_size - (head & mask))) { ++ rte_memcpy(mq->buffers, &mq->buffers[ring_size], ++ (n_slots + (head & mask) - ring_size) * sizeof(struct rte_mbuf *)); ++ } + + while (n_slots--) { + s0 = head++ & mask; +@@ -684,7 +686,7 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + n_free = __atomic_load_n(&ring->head, __ATOMIC_ACQUIRE) - slot; + } + +- uint8_t i; ++ uint16_t i; + struct rte_mbuf **buf_tmp = bufs; + mbuf_head = *buf_tmp++; + struct rte_mempool *mp = mbuf_head->pool; +@@ -825,10 +827,6 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq + next_in_chain: + /* store pointer to mbuf to free it later */ + mq->buffers[slot & mask] = mbuf; +- /* Increment refcnt to make sure the buffer is not freed before server +- * receives it. (current segment) +- */ +- rte_mbuf_refcnt_update(mbuf, 1); + /* populate descriptor */ + d0 = &ring->desc[slot & mask]; + d0->length = rte_pktmbuf_data_len(mbuf); +@@ -1251,8 +1249,12 @@ memif_init_queues(struct rte_eth_dev *dev) + } + mq->buffers = NULL; + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) { ++ /* ++ * Allocate 2x ring_size to reserve a contiguous array for ++ * rte_pktmbuf_alloc_bulk (to store allocated mbufs). ++ */ + mq->buffers = rte_zmalloc("bufs", sizeof(struct rte_mbuf *) * +- (1 << mq->log2_ring_size), 0); ++ (1 << (mq->log2_ring_size + 1)), 0); + if (mq->buffers == NULL) + return -ENOMEM; + } +@@ -1298,7 +1300,7 @@ memif_connect(struct rte_eth_dev *dev) + PROT_READ | PROT_WRITE, + MAP_SHARED, mr->fd, 0); + if (mr->addr == MAP_FAILED) { +- MIF_LOG(ERR, "mmap failed: %s\n", ++ MIF_LOG(ERR, "mmap failed: %s", + strerror(errno)); + return -1; + } +diff --git a/dpdk/drivers/net/mlx4/mlx4.c b/dpdk/drivers/net/mlx4/mlx4.c +index a1a7e93288..7c0ac6888b 100644 +--- a/dpdk/drivers/net/mlx4/mlx4.c ++++ b/dpdk/drivers/net/mlx4/mlx4.c +@@ -106,7 +106,7 @@ mlx4_init_shared_data(void) + sizeof(*mlx4_shared_data), + SOCKET_ID_ANY, 0); + if (mz == NULL) { +- ERROR("Cannot allocate mlx4 shared data\n"); ++ ERROR("Cannot allocate mlx4 shared data"); + ret = -rte_errno; + goto error; + } +@@ -117,7 +117,7 @@ mlx4_init_shared_data(void) + /* Lookup allocated shared memory. */ + mz = rte_memzone_lookup(MZ_MLX4_PMD_SHARED_DATA); + if (mz == NULL) { +- ERROR("Cannot attach mlx4 shared data\n"); ++ ERROR("Cannot attach mlx4 shared data"); + ret = -rte_errno; + goto error; + } +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr.h b/dpdk/drivers/net/mlx5/hws/mlx5dr.h +index d88f73ab57..cbb79b8ba1 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr.h ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr.h +@@ -80,6 +80,7 @@ enum mlx5dr_action_aso_ct_flags { + }; + + enum mlx5dr_match_template_flags { ++ MLX5DR_MATCH_TEMPLATE_FLAG_NONE = 0, + /* Allow relaxed matching by skipping derived dependent match fields. */ + MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH = 1, + }; +@@ -95,8 +96,10 @@ struct mlx5dr_context_attr { + uint16_t queues; + uint16_t queue_size; + size_t initial_log_ste_memory; /* Currently not in use */ +- /* Optional PD used for allocating res ources */ ++ /* Optional PD used for allocating resources */ + struct ibv_pd *pd; ++ /* Optional the STC array size for that context */ ++ size_t initial_log_stc_memory; + /* Optional other ctx for resources allocation, all objects will be created on it */ + struct ibv_context *shared_ibv_ctx; + }; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c +index 862ee3e332..a068f100c5 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c +@@ -1465,7 +1465,9 @@ mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action, + + /* Create a full modify header action list in case shared */ + mlx5dr_action_prepare_decap_l3_actions(hdrs->sz, mh_data, &num_of_actions); +- mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); ++ ++ if (action->flags & MLX5DR_ACTION_FLAG_SHARED) ++ mlx5dr_action_prepare_decap_l3_data(hdrs->data, mh_data, num_of_actions); + + /* All DecapL3 cases require the same max arg size */ + arg_obj = mlx5dr_arg_create_modify_header_arg(ctx, +@@ -1489,6 +1491,7 @@ mlx5dr_action_handle_tunnel_l3_to_l2(struct mlx5dr_action *action, + + action[i].modify_header.max_num_of_actions = num_of_actions; + action[i].modify_header.num_of_actions = num_of_actions; ++ action[i].modify_header.num_of_patterns = num_of_hdrs; + action[i].modify_header.arg_obj = arg_obj; + action[i].modify_header.pat_obj = pat_obj; + action[i].modify_header.require_reparse = +@@ -2547,6 +2550,7 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) + case MLX5DR_ACTION_TYP_ASO_CT: + case MLX5DR_ACTION_TYP_PUSH_VLAN: + case MLX5DR_ACTION_TYP_REMOVE_HEADER: ++ case MLX5DR_ACTION_TYP_VPORT: + mlx5dr_action_destroy_stcs(action); + break; + case MLX5DR_ACTION_TYP_DEST_ROOT: +@@ -2600,6 +2604,9 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action) + if (action->ipv6_route_ext.action[i]) + mlx5dr_action_destroy(action->ipv6_route_ext.action[i]); + break; ++ default: ++ DR_LOG(ERR, "Not supported action type: %d", action->type); ++ assert(false); + } + } + +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c +index 876a47147d..0fb764df32 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_cmd.c +@@ -1027,7 +1027,8 @@ int mlx5dr_cmd_generate_wqe(struct ibv_context *ctx, + + ret = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); + if (ret) { +- DR_LOG(ERR, "Failed to write GTA WQE using FW"); ++ DR_LOG(ERR, "Failed to write GTA WQE using FW (syndrome: %#x)", ++ mlx5dr_cmd_get_syndrome(out)); + rte_errno = errno; + return rte_errno; + } +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c +index 15d53c578a..6c4c18b041 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c +@@ -19,7 +19,8 @@ uint8_t mlx5dr_context_get_reparse_mode(struct mlx5dr_context *ctx) + return MLX5_IFC_RTC_REPARSE_ALWAYS; + } + +-static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx) ++static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx, ++ struct mlx5dr_context_attr *attr) + { + struct mlx5dr_pool_attr pool_attr = {0}; + uint8_t max_log_sz; +@@ -34,7 +35,9 @@ static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx) + /* Create an STC pool per FT type */ + pool_attr.pool_type = MLX5DR_POOL_TYPE_STC; + pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL; +- max_log_sz = RTE_MIN(MLX5DR_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max); ++ if (!attr->initial_log_stc_memory) ++ attr->initial_log_stc_memory = MLX5DR_POOL_STC_LOG_SZ; ++ max_log_sz = RTE_MIN(attr->initial_log_stc_memory, ctx->caps->stc_alloc_log_max); + pool_attr.alloc_log_sz = RTE_MAX(max_log_sz, ctx->caps->stc_alloc_log_gran); + + for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) { +@@ -172,7 +175,7 @@ static int mlx5dr_context_init_hws(struct mlx5dr_context *ctx, + if (ret) + return ret; + +- ret = mlx5dr_context_pools_init(ctx); ++ ret = mlx5dr_context_pools_init(ctx, attr); + if (ret) + goto uninit_pd; + +@@ -263,6 +266,7 @@ struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx, + free_caps: + simple_free(ctx->caps); + free_ctx: ++ pthread_spin_destroy(&ctx->ctrl_lock); + simple_free(ctx); + return NULL; + } +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c +index 11557bcab8..f11c81ffee 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_debug.c +@@ -150,7 +150,7 @@ mlx5dr_debug_dump_matcher_action_template(FILE *f, struct mlx5dr_matcher *matche + MLX5DR_DEBUG_RES_TYPE_MATCHER_ACTION_TEMPLATE, + (uint64_t)(uintptr_t)at, + (uint64_t)(uintptr_t)matcher, +- at->only_term ? 0 : 1, ++ at->only_term, + is_root ? 0 : at->num_of_action_stes, + at->num_actions); + if (ret < 0) { +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c +index 0b60479406..ef437a6dbd 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c +@@ -8,8 +8,9 @@ + #define BAD_PORT 0xBAD + #define ETH_TYPE_IPV4_VXLAN 0x0800 + #define ETH_TYPE_IPV6_VXLAN 0x86DD +-#define ETH_VXLAN_DEFAULT_PORT 4789 +-#define IP_UDP_PORT_MPLS 6635 ++#define UDP_GTPU_PORT 2152 ++#define UDP_VXLAN_PORT 4789 ++#define UDP_PORT_MPLS 6635 + #define UDP_ROCEV2_PORT 4791 + #define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS) + +@@ -41,6 +42,10 @@ + (bit_off))); \ + } while (0) + ++/* Getter function based on bit offset and mask, for 32bit DW*/ ++#define DR_GET_32(p, byte_off, bit_off, mask) \ ++ ((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask)) ++ + /* Setter function based on bit offset and mask */ + #define DR_SET(p, v, byte_off, bit_off, mask) \ + do { \ +@@ -158,7 +163,7 @@ struct mlx5dr_definer_conv_data { + X(SET, tcp_protocol, STE_TCP, rte_flow_item_tcp) \ + X(SET_BE16, tcp_src_port, v->hdr.src_port, rte_flow_item_tcp) \ + X(SET_BE16, tcp_dst_port, v->hdr.dst_port, rte_flow_item_tcp) \ +- X(SET, gtp_udp_port, RTE_GTPU_UDP_PORT, rte_flow_item_gtp) \ ++ X(SET, gtp_udp_port, UDP_GTPU_PORT, rte_flow_item_gtp) \ + X(SET_BE32, gtp_teid, v->hdr.teid, rte_flow_item_gtp) \ + X(SET, gtp_msg_type, v->hdr.msg_type, rte_flow_item_gtp) \ + X(SET, gtp_ext_flag, !!v->hdr.gtp_hdr_info, rte_flow_item_gtp) \ +@@ -166,8 +171,8 @@ struct mlx5dr_definer_conv_data { + X(SET, gtp_ext_hdr_pdu, v->hdr.type, rte_flow_item_gtp_psc) \ + X(SET, gtp_ext_hdr_qfi, v->hdr.qfi, rte_flow_item_gtp_psc) \ + X(SET, vxlan_flags, v->flags, rte_flow_item_vxlan) \ +- X(SET, vxlan_udp_port, ETH_VXLAN_DEFAULT_PORT, rte_flow_item_vxlan) \ +- X(SET, mpls_udp_port, IP_UDP_PORT_MPLS, rte_flow_item_mpls) \ ++ X(SET, vxlan_udp_port, UDP_VXLAN_PORT, rte_flow_item_vxlan) \ ++ X(SET, mpls_udp_port, UDP_PORT_MPLS, rte_flow_item_mpls) \ + X(SET, source_qp, v->queue, mlx5_rte_flow_item_sq) \ + X(SET, tag, v->data, rte_flow_item_tag) \ + X(SET, metadata, v->data, rte_flow_item_meta) \ +@@ -183,6 +188,8 @@ struct mlx5dr_definer_conv_data { + X(SET, ib_l4_udp_port, UDP_ROCEV2_PORT, rte_flow_item_ib_bth) \ + X(SET, ib_l4_opcode, v->hdr.opcode, rte_flow_item_ib_bth) \ + X(SET, ib_l4_bth_a, v->hdr.a, rte_flow_item_ib_bth) \ ++ X(SET, cvlan, STE_CVLAN, rte_flow_item_vlan) \ ++ X(SET_BE16, inner_type, v->inner_type, rte_flow_item_vlan) \ + + /* Item set function format */ + #define X(set_type, func_name, value, item_type) \ +@@ -377,7 +384,7 @@ mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc, + { + bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I); + const struct rte_flow_item_integrity *v = item_spec; +- uint32_t ok1_bits = 0; ++ uint32_t ok1_bits = DR_GET_32(tag, fc->byte_off, fc->bit_off, fc->bit_mask); + + if (v->l3_ok) + ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) : +@@ -429,7 +436,7 @@ mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc, + idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0; + byte_off -= idx * sizeof(uint32_t); + ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off, +- false, is_inner, &val); ++ is_inner, &val); + if (ret == -1 || !val) + return; + +@@ -769,6 +776,15 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, + struct mlx5dr_definer_fc *fc; + bool inner = cd->tunnel; + ++ if (!cd->relaxed) { ++ /* Mark packet as tagged (CVLAN) */ ++ fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)]; ++ fc->item_idx = item_idx; ++ fc->tag_mask_set = &mlx5dr_definer_ones_set; ++ fc->tag_set = &mlx5dr_definer_cvlan_set; ++ DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner); ++ } ++ + if (!m) + return 0; + +@@ -777,8 +793,7 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, + return rte_errno; + } + +- if (!cd->relaxed || m->has_more_vlan) { +- /* Mark packet as tagged (CVLAN or SVLAN) even if TCI is not specified.*/ ++ if (m->has_more_vlan) { + fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)]; + fc->item_idx = item_idx; + fc->tag_mask_set = &mlx5dr_definer_ones_set; +@@ -796,7 +811,7 @@ mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd, + if (m->hdr.eth_proto) { + fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)]; + fc->item_idx = item_idx; +- fc->tag_set = &mlx5dr_definer_eth_type_set; ++ fc->tag_set = &mlx5dr_definer_inner_type_set; + DR_CALC_SET(fc, eth_l2, l3_ethertype, inner); + } + +@@ -1170,6 +1185,12 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd, + const struct rte_flow_item_gtp *m = item->mask; + struct mlx5dr_definer_fc *fc; + ++ if (cd->tunnel) { ++ DR_LOG(ERR, "Inner GTPU item not supported"); ++ rte_errno = ENOTSUP; ++ return rte_errno; ++ } ++ + /* Overwrite GTPU dest port if not present */ + fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)]; + if (!fc->tag_set && !cd->relaxed) { +@@ -1344,9 +1365,20 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, + struct mlx5dr_definer_fc *fc; + bool inner = cd->tunnel; + +- /* In order to match on VXLAN we must match on ether_type, ip_protocol +- * and l4_dport. +- */ ++ if (inner) { ++ DR_LOG(ERR, "Inner VXLAN item not supported"); ++ rte_errno = ENOTSUP; ++ return rte_errno; ++ } ++ ++ /* In order to match on VXLAN we must match on ip_protocol and l4_dport */ ++ if (m && (m->rsvd0[0] != 0 || m->rsvd0[1] != 0 || m->rsvd0[2] != 0 || ++ m->rsvd1 != 0)) { ++ DR_LOG(ERR, "reserved fields are not supported"); ++ rte_errno = ENOTSUP; ++ return rte_errno; ++ } ++ + if (!cd->relaxed) { + fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)]; + if (!fc->tag_set) { +@@ -1369,12 +1401,6 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, + return 0; + + if (m->flags) { +- if (inner) { +- DR_LOG(ERR, "Inner VXLAN flags item not supported"); +- rte_errno = ENOTSUP; +- return rte_errno; +- } +- + fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_FLAGS]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_vxlan_flags_set; +@@ -1384,12 +1410,6 @@ mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd, + } + + if (!is_mem_zero(m->vni, 3)) { +- if (inner) { +- DR_LOG(ERR, "Inner VXLAN vni item not supported"); +- rte_errno = ENOTSUP; +- return rte_errno; +- } +- + fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_VNI]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_vxlan_vni_set; +@@ -2240,11 +2260,6 @@ mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd, + const struct rte_flow_item_esp *m = item->mask; + struct mlx5dr_definer_fc *fc; + +- if (!cd->ctx->caps->ipsec_offload) { +- rte_errno = ENOTSUP; +- return rte_errno; +- } +- + if (!m) + return 0; + if (m->hdr.spi) { +@@ -2299,7 +2314,7 @@ mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd, + for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) { + byte_off = base_off - i * sizeof(uint32_t); + ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off, +- true, is_inner, &mask); ++ is_inner, &mask); + if (ret == -1) { + rte_errno = EINVAL; + return rte_errno; +@@ -2521,8 +2536,17 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx, + break; + case RTE_FLOW_ITEM_TYPE_FLEX: + ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i); +- item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX : +- MLX5_FLOW_ITEM_OUTER_FLEX; ++ if (ret == 0) { ++ enum rte_flow_item_flex_tunnel_mode tunnel_mode = ++ FLEX_TUNNEL_MODE_SINGLE; ++ ++ ret = mlx5_flex_get_tunnel_mode(items, &tunnel_mode); ++ if (tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL) ++ item_flags |= MLX5_FLOW_ITEM_FLEX_TUNNEL; ++ else ++ item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX : ++ MLX5_FLOW_ITEM_OUTER_FLEX; ++ } + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + ret = mlx5dr_definer_conv_item_mpls(&cd, items, i); +@@ -2842,7 +2866,7 @@ mlx5dr_definer_find_best_match_fit(struct mlx5dr_context *ctx, + return 0; + } + +- DR_LOG(ERR, "Unable to find supporting match/jumbo definer combination"); ++ DR_LOG(DEBUG, "Unable to find supporting match/jumbo definer combination"); + rte_errno = ENOTSUP; + return rte_errno; + } +@@ -2975,7 +2999,7 @@ mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher, + /* Find the match definer layout for header layout match union */ + ret = mlx5dr_definer_find_best_match_fit(ctx, match_definer, match_hl); + if (ret) { +- DR_LOG(ERR, "Failed to create match definer from header layout"); ++ DR_LOG(DEBUG, "Failed to create match definer from header layout"); + goto free_fc; + } + +@@ -3191,15 +3215,19 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx, + + /* Create optional range definers */ + for (i = 0; i < matcher->num_of_mt; i++) { +- if (!mt[i].fcr_sz) +- continue; +- + /* All must use range if requested */ +- if (i && !mt[i - 1].range_definer) { ++ bool is_range = !!mt[i].fcr_sz; ++ bool has_range = matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER; ++ ++ if (i && ((is_range && !has_range) || (!is_range && has_range))) { + DR_LOG(ERR, "Using range and non range templates is not allowed"); ++ rte_errno = EINVAL; + goto free_definers; + } + ++ if (!mt[i].fcr_sz) ++ continue; ++ + matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER; + /* Create definer without fcr binding, already binded */ + mt[i].range_definer = mlx5dr_definer_alloc(ctx, +@@ -3320,7 +3348,7 @@ int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx, + + ret = mlx5dr_definer_calc_layout(matcher, &match_layout, &range_layout); + if (ret) { +- DR_LOG(ERR, "Failed to calculate matcher definer layout"); ++ DR_LOG(DEBUG, "Failed to calculate matcher definer layout"); + return ret; + } + +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c +index 4ea161eae6..36be96c668 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_matcher.c +@@ -340,7 +340,7 @@ static int mlx5dr_matcher_disconnect(struct mlx5dr_matcher *matcher) + return 0; + + matcher_reconnect: +- if (LIST_EMPTY(&tbl->head)) ++ if (LIST_EMPTY(&tbl->head) || prev_matcher == matcher) + LIST_INSERT_HEAD(&matcher->tbl->head, matcher, next); + else + LIST_INSERT_AFTER(prev_matcher, matcher, next); +@@ -807,7 +807,7 @@ static int mlx5dr_matcher_bind_mt(struct mlx5dr_matcher *matcher) + /* Calculate match, range and hash definers */ + ret = mlx5dr_definer_matcher_init(ctx, matcher); + if (ret) { +- DR_LOG(ERR, "Failed to set matcher templates with match definers"); ++ DR_LOG(DEBUG, "Failed to set matcher templates with match definers"); + return ret; + } + +@@ -1171,6 +1171,13 @@ static int mlx5dr_matcher_init_root(struct mlx5dr_matcher *matcher) + return rte_errno; + } + ++ ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id); ++ if (ret) { ++ DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name); ++ rte_errno = EINVAL; ++ return rte_errno; ++ } ++ + mask = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) + + offsetof(struct mlx5dv_flow_match_parameters, match_buf)); + if (!mask) { +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h +index bbe313102f..c4e0cbc843 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_pat_arg.h +@@ -30,7 +30,6 @@ struct mlx5dr_pattern_cache { + struct mlx5dr_pattern_cache_item { + struct { + struct mlx5dr_devx_obj *pattern_obj; +- struct dr_icm_chunk *chunk; + uint8_t *data; + uint16_t num_of_actions; + } mh_data; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c +index fa19303b91..cc7a30d6d0 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_rule.c +@@ -23,6 +23,9 @@ static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher, + *skip_rx = false; + *skip_tx = false; + ++ if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher))) ++ return; ++ + if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) { + v = items[mt->vport_item_id].spec; + vport = flow_hw_conv_port_id(v->port_id); +@@ -55,14 +58,16 @@ static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe, + struct mlx5dr_rule *rule, + const struct rte_flow_item *items, + struct mlx5dr_match_template *mt, +- void *user_data) ++ struct mlx5dr_rule_attr *attr) + { + struct mlx5dr_matcher *matcher = rule->matcher; + struct mlx5dr_table *tbl = matcher->tbl; + bool skip_rx, skip_tx; + + dep_wqe->rule = rule; +- dep_wqe->user_data = user_data; ++ dep_wqe->user_data = attr->user_data; ++ dep_wqe->direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ? ++ attr->rule_idx : 0; + + if (!items) { /* rule update */ + dep_wqe->rtc_0 = rule->rtc_0; +@@ -145,8 +150,13 @@ mlx5dr_rule_save_delete_info(struct mlx5dr_rule *rule, + rule->tag_ptr = simple_calloc(2, sizeof(*rule->tag_ptr)); + assert(rule->tag_ptr); + +- src_tag = (uint8_t *)ste_attr->wqe_data->tag; +- memcpy(rule->tag_ptr[0].match, src_tag, MLX5DR_MATCH_TAG_SZ); ++ if (is_jumbo) ++ memcpy(rule->tag_ptr[0].jumbo, ste_attr->wqe_data->action, ++ MLX5DR_JUMBO_TAG_SZ); ++ else ++ memcpy(rule->tag_ptr[0].match, ste_attr->wqe_data->tag, ++ MLX5DR_MATCH_TAG_SZ); ++ + rule->tag_ptr[1].reserved[0] = ste_attr->send_attr.match_definer_id; + + /* Save range definer id and tag for delete */ +@@ -289,8 +299,8 @@ static int mlx5dr_rule_create_hws_fw_wqe(struct mlx5dr_rule *rule, + } + + mlx5dr_rule_create_init(rule, &ste_attr, &apply, false); +- mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr->user_data); +- mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr->user_data); ++ mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr); ++ mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr); + + ste_attr.direct_index = 0; + ste_attr.rtc_0 = match_wqe.rtc_0; +@@ -395,7 +405,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule, + * dep_wqe buffers (ctrl, data) are also reused for all STE writes. + */ + dep_wqe = mlx5dr_send_add_new_dep_wqe(queue); +- mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr->user_data); ++ mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, mt, attr); + + ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl; + ste_attr.wqe_data = &dep_wqe->wqe_data; +@@ -457,8 +467,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule, + ste_attr.used_id_rtc_1 = &rule->rtc_1; + ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0; + ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1; +- ste_attr.direct_index = mlx5dr_matcher_is_insert_by_idx(matcher) ? +- attr->rule_idx : 0; ++ ste_attr.direct_index = dep_wqe->direct_index; + } else { + apply.next_direct_idx = --ste_attr.direct_index; + } +@@ -594,6 +603,13 @@ static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule, + uint8_t match_criteria; + int ret; + ++ ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id); ++ if (ret) { ++ DR_LOG(ERR, "Failed to get port id for dev %s", ctx->ibv_ctx->device->name); ++ rte_errno = EINVAL; ++ return rte_errno; ++ } ++ + attr = simple_calloc(num_actions, sizeof(*attr)); + if (!attr) { + rte_errno = ENOMEM; +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c +index 622d574bfa..4c279ba42a 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.c +@@ -50,6 +50,7 @@ void mlx5dr_send_all_dep_wqe(struct mlx5dr_send_engine *queue) + ste_attr.used_id_rtc_1 = &dep_wqe->rule->rtc_1; + ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl; + ste_attr.wqe_data = &dep_wqe->wqe_data; ++ ste_attr.direct_index = dep_wqe->direct_index; + + mlx5dr_send_ste(queue, &ste_attr); + +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h +index c1e8616f7e..0c89faa8a7 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_send.h +@@ -106,6 +106,7 @@ struct mlx5dr_send_ring_dep_wqe { + uint32_t rtc_1; + uint32_t retry_rtc_0; + uint32_t retry_rtc_1; ++ uint32_t direct_index; + void *user_data; + }; + +@@ -202,8 +203,6 @@ struct mlx5dr_send_ste_attr { + * value to write in CPU endian format. + * @param addr + * Address to write to. +- * @param lock +- * Address of the lock to use for that UAR access. + */ + static __rte_always_inline void + mlx5dr_uar_write64_relaxed(uint64_t val, void *addr) +diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c +index 55b9b20150..ab73017ade 100644 +--- a/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c ++++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_table.c +@@ -611,8 +611,7 @@ static int mlx5dr_table_set_default_miss_not_valid(struct mlx5dr_table *tbl, + + if (mlx5dr_table_is_root(tbl) || + (miss_tbl && mlx5dr_table_is_root(miss_tbl)) || +- (miss_tbl && miss_tbl->type != tbl->type) || +- (miss_tbl && tbl->default_miss.miss_tbl)) { ++ (miss_tbl && miss_tbl->type != tbl->type)) { + DR_LOG(ERR, "Invalid arguments"); + rte_errno = EINVAL; + return -rte_errno; +@@ -625,6 +624,7 @@ int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl, + struct mlx5dr_table *miss_tbl) + { + struct mlx5dr_context *ctx = tbl->ctx; ++ struct mlx5dr_table *old_miss_tbl; + int ret; + + ret = mlx5dr_table_set_default_miss_not_valid(tbl, miss_tbl); +@@ -632,15 +632,16 @@ int mlx5dr_table_set_default_miss(struct mlx5dr_table *tbl, + return ret; + + pthread_spin_lock(&ctx->ctrl_lock); +- ++ old_miss_tbl = tbl->default_miss.miss_tbl; + ret = mlx5dr_table_connect_to_miss_table(tbl, miss_tbl); + if (ret) + goto out; + ++ if (old_miss_tbl) ++ LIST_REMOVE(tbl, default_miss.next); ++ + if (miss_tbl) + LIST_INSERT_HEAD(&miss_tbl->default_miss.head, tbl, default_miss.next); +- else +- LIST_REMOVE(tbl, default_miss.next); + + pthread_spin_unlock(&ctx->ctrl_lock); + return 0; +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +index dd5a0c546d..1d999ef66b 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_ethdev_os.c +@@ -671,7 +671,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + ifr.ifr_data = (void *)ðpause; + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { +- DRV_LOG(WARNING, ++ DRV_LOG(DEBUG, + "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" + " %s", + dev->data->port_id, strerror(rte_errno)); +@@ -1286,13 +1286,17 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + struct ifreq ifr; +- unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); ++ unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd); ++ unsigned int stats_sz = max_stats_n * sizeof(uint64_t); + unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; + struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; + int ret; ++ uint16_t i_idx, o_idx; ++ uint32_t total_stats = xstats_n; + + et_stats->cmd = ETHTOOL_GSTATS; +- et_stats->n_stats = xstats_ctrl->stats_n; ++ /* Pass the maximum value, the driver may ignore this. */ ++ et_stats->n_stats = max_stats_n; + ifr.ifr_data = (caddr_t)et_stats; + if (pf >= 0) + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname, +@@ -1305,21 +1309,34 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + dev->data->port_id); + return ret; + } +- for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { +- if (xstats_ctrl->info[i].dev) +- continue; +- stats[i] += (uint64_t) +- et_stats->data[xstats_ctrl->dev_table_idx[i]]; ++ if (pf <= 0) { ++ for (i = 0; i != total_stats; i++) { ++ i_idx = xstats_ctrl->dev_table_idx[i]; ++ o_idx = xstats_ctrl->xstats_o_idx[i]; ++ if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev) ++ continue; ++ stats[o_idx] += (uint64_t)et_stats->data[i_idx]; ++ } ++ } else { ++ for (i = 0; i != total_stats; i++) { ++ i_idx = xstats_ctrl->dev_table_idx_2nd[i]; ++ o_idx = xstats_ctrl->xstats_o_idx_2nd[i]; ++ if (i_idx == UINT16_MAX || xstats_ctrl->info[o_idx].dev) ++ continue; ++ stats[o_idx] += (uint64_t)et_stats->data[i_idx]; ++ } + } + return 0; + } + +-/** ++/* + * Read device counters. + * + * @param dev + * Pointer to Ethernet device. +- * @param[out] stats ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param stats + * Counters table output buffer. + * + * @return +@@ -1327,7 +1344,7 @@ _mlx5_os_read_dev_counters(struct rte_eth_dev *dev, int pf, uint64_t *stats) + * rte_errno is set. + */ + int +-mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) ++mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; +@@ -1335,7 +1352,7 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + + memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n); + /* Read ifreq counters. */ +- if (priv->master && priv->pf_bond >= 0) { ++ if (bond_master) { + /* Sum xstats from bonding device member ports. */ + for (i = 0; i < priv->sh->bond.n_port; i++) { + ret = _mlx5_os_read_dev_counters(dev, i, stats); +@@ -1347,13 +1364,17 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + if (ret) + return ret; + } +- /* Read IB counters. */ +- for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { ++ /* ++ * Read IB dev counters. ++ * The counters are unique per IB device but not per netdev IF. ++ * In bonding mode, getting the stats name only from 1 port is enough. ++ */ ++ for (i = xstats_ctrl->dev_cnt_start; i < xstats_ctrl->mlx5_stats_n; i++) { + if (!xstats_ctrl->info[i].dev) + continue; + /* return last xstats counter if fail to read. */ + if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, +- &stats[i]) == 0) ++ &stats[i]) == 0) + xstats_ctrl->xstats[i] = stats[i]; + else + stats[i] = xstats_ctrl->xstats[i]; +@@ -1361,18 +1382,24 @@ mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) + return ret; + } + +-/** ++/* + * Query the number of statistics provided by ETHTOOL. + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param n_stats ++ * Pointer to number of stats to store. ++ * @param n_stats_sec ++ * Pointer to number of stats to store for the 2nd port of the bond. + * + * @return +- * Number of statistics on success, negative errno value otherwise and +- * rte_errno is set. ++ * 0 on success, negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_os_get_stats_n(struct rte_eth_dev *dev) ++mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct ethtool_drvinfo drvinfo; +@@ -1381,18 +1408,34 @@ mlx5_os_get_stats_n(struct rte_eth_dev *dev) + + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (caddr_t)&drvinfo; +- if (priv->master && priv->pf_bond >= 0) +- /* Bonding PF. */ ++ /* Bonding PFs. */ ++ if (bond_master) { + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, + SIOCETHTOOL, &ifr); +- else ++ if (ret) { ++ DRV_LOG(WARNING, "bonding port %u unable to query number of" ++ " statistics for the 1st slave, %d", PORT_ID(priv), ret); ++ return ret; ++ } ++ *n_stats = drvinfo.n_stats; ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, ++ SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "bonding port %u unable to query number of" ++ " statistics for the 2nd slave, %d", PORT_ID(priv), ret); ++ return ret; ++ } ++ *n_stats_sec = drvinfo.n_stats; ++ } else { + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); +- if (ret) { +- DRV_LOG(WARNING, "port %u unable to query number of statistics", +- dev->data->port_id); +- return ret; ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to query number of statistics", ++ PORT_ID(priv)); ++ return ret; ++ } ++ *n_stats = drvinfo.n_stats; + } +- return drvinfo.n_stats; ++ return 0; + } + + static const struct mlx5_counter_ctrl mlx5_counters_init[] = { +@@ -1576,7 +1619,104 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = { + }, + }; + +-static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); ++const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); ++ ++static int ++mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master, ++ struct ethtool_gstrings *strings, ++ uint32_t stats_n, uint32_t stats_n_2nd) ++{ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; ++ struct ifreq ifr; ++ int ret; ++ uint32_t i, j, idx; ++ ++ /* Ensure no out of bounds access before. */ ++ MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS); ++ strings->cmd = ETHTOOL_GSTRINGS; ++ strings->string_set = ETH_SS_STATS; ++ strings->len = stats_n; ++ ifr.ifr_data = (caddr_t)strings; ++ if (bond_master) ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, ++ SIOCETHTOOL, &ifr); ++ else ++ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to get statistic names with %d", ++ PORT_ID(priv), ret); ++ return ret; ++ } ++ /* Reorganize the orders to reduce the iterations. */ ++ for (j = 0; j < xstats_n; j++) { ++ xstats_ctrl->dev_table_idx[j] = UINT16_MAX; ++ for (i = 0; i < stats_n; i++) { ++ const char *curr_string = ++ (const char *)&strings->data[i * ETH_GSTRING_LEN]; ++ ++ if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->dev_table_idx[j] = i; ++ xstats_ctrl->xstats_o_idx[j] = idx; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ } ++ } ++ } ++ if (!bond_master) { ++ /* Add dev counters, unique per IB device. */ ++ xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n; ++ for (j = 0; j != xstats_n; j++) { ++ if (mlx5_counters_init[j].dev) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ xstats_ctrl->hw_stats[idx] = 0; ++ } ++ } ++ return 0; ++ } ++ ++ strings->len = stats_n_2nd; ++ ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, ++ SIOCETHTOOL, &ifr); ++ if (ret) { ++ DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d", ++ PORT_ID(priv), ret); ++ return ret; ++ } ++ /* The 2nd slave port may have a different strings set, based on the configuration. */ ++ for (j = 0; j != xstats_n; j++) { ++ xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX; ++ for (i = 0; i != stats_n_2nd; i++) { ++ const char *curr_string = ++ (const char *)&strings->data[i * ETH_GSTRING_LEN]; ++ ++ if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { ++ xstats_ctrl->dev_table_idx_2nd[j] = i; ++ if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) { ++ /* Already mapped in the 1st slave port. */ ++ idx = xstats_ctrl->xstats_o_idx[j]; ++ xstats_ctrl->xstats_o_idx_2nd[j] = idx; ++ } else { ++ /* Append the new items to the end of the map. */ ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->xstats_o_idx_2nd[j] = idx; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ } ++ } ++ } ++ } ++ /* Dev counters are always at the last now. */ ++ xstats_ctrl->dev_cnt_start = xstats_ctrl->mlx5_stats_n; ++ for (j = 0; j != xstats_n; j++) { ++ if (mlx5_counters_init[j].dev) { ++ idx = xstats_ctrl->mlx5_stats_n++; ++ xstats_ctrl->info[idx] = mlx5_counters_init[j]; ++ xstats_ctrl->hw_stats[idx] = 0; ++ } ++ } ++ return 0; ++} + + /** + * Init the structures to read device counters. +@@ -1590,76 +1730,44 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; +- unsigned int i; +- unsigned int j; +- struct ifreq ifr; + struct ethtool_gstrings *strings = NULL; +- unsigned int dev_stats_n; ++ uint16_t dev_stats_n = 0; ++ uint16_t dev_stats_n_2nd = 0; ++ unsigned int max_stats_n; + unsigned int str_sz; + int ret; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + + /* So that it won't aggregate for each init. */ + xstats_ctrl->mlx5_stats_n = 0; +- ret = mlx5_os_get_stats_n(dev); ++ ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd); + if (ret < 0) { + DRV_LOG(WARNING, "port %u no extended statistics available", + dev->data->port_id); + return; + } +- dev_stats_n = ret; ++ max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd); + /* Allocate memory to grab stat names and values. */ +- str_sz = dev_stats_n * ETH_GSTRING_LEN; ++ str_sz = max_stats_n * ETH_GSTRING_LEN; + strings = (struct ethtool_gstrings *) + mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, + SOCKET_ID_ANY); + if (!strings) { + DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", +- dev->data->port_id); ++ dev->data->port_id); + return; + } +- strings->cmd = ETHTOOL_GSTRINGS; +- strings->string_set = ETH_SS_STATS; +- strings->len = dev_stats_n; +- ifr.ifr_data = (caddr_t)strings; +- if (priv->master && priv->pf_bond >= 0) +- /* Bonding master. */ +- ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, +- SIOCETHTOOL, &ifr); +- else +- ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); +- if (ret) { +- DRV_LOG(WARNING, "port %u unable to get statistic names", ++ ret = mlx5_os_get_stats_strings(dev, bond_master, strings, ++ dev_stats_n, dev_stats_n_2nd); ++ if (ret < 0) { ++ DRV_LOG(WARNING, "port %u failed to get the stats strings", + dev->data->port_id); + goto free; + } +- for (i = 0; i != dev_stats_n; ++i) { +- const char *curr_string = (const char *) +- &strings->data[i * ETH_GSTRING_LEN]; +- +- for (j = 0; j != xstats_n; ++j) { +- if (!strcmp(mlx5_counters_init[j].ctr_name, +- curr_string)) { +- unsigned int idx = xstats_ctrl->mlx5_stats_n++; +- +- xstats_ctrl->dev_table_idx[idx] = i; +- xstats_ctrl->info[idx] = mlx5_counters_init[j]; +- break; +- } +- } +- } +- /* Add dev counters. */ +- MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS); +- for (i = 0; i != xstats_n; ++i) { +- if (mlx5_counters_init[i].dev) { +- unsigned int idx = xstats_ctrl->mlx5_stats_n++; +- +- xstats_ctrl->info[idx] = mlx5_counters_init[i]; +- xstats_ctrl->hw_stats[idx] = 0; +- } +- } + xstats_ctrl->stats_n = dev_stats_n; ++ xstats_ctrl->stats_n_2nd = dev_stats_n_2nd; + /* Copy to base at first time. */ +- ret = mlx5_os_read_dev_counters(dev, xstats_ctrl->base); ++ ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base); + if (ret) + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); +diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c +index ae82e1e5d8..9dcdc8581a 100644 +--- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c ++++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c +@@ -455,15 +455,16 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv) + * Routine checks the reference counter and does actual + * resources creation/initialization only if counter is zero. + * +- * @param[in] priv +- * Pointer to the private device data structure. ++ * @param[in] eth_dev ++ * Pointer to the device. + * + * @return + * Zero on success, positive error code otherwise. + */ + static int +-mlx5_alloc_shared_dr(struct mlx5_priv *priv) ++mlx5_alloc_shared_dr(struct rte_eth_dev *eth_dev) + { ++ struct mlx5_priv *priv = eth_dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + char s[MLX5_NAME_SIZE] __rte_unused; + int err; +@@ -578,6 +579,44 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv) + err = errno; + goto error; + } ++ ++ if (sh->config.dv_flow_en == 1) { ++ /* Query availability of metadata reg_c's. */ ++ if (!priv->sh->metadata_regc_check_flag) { ++ err = mlx5_flow_discover_mreg_c(eth_dev); ++ if (err < 0) { ++ err = -err; ++ goto error; ++ } ++ } ++ if (!mlx5_flow_ext_mreg_supported(eth_dev)) { ++ DRV_LOG(DEBUG, ++ "port %u extensive metadata register is not supported", ++ eth_dev->data->port_id); ++ if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { ++ DRV_LOG(ERR, "metadata mode %u is not supported " ++ "(no metadata registers available)", ++ sh->config.dv_xmeta_en); ++ err = ENOTSUP; ++ goto error; ++ } ++ } ++ if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && ++ mlx5_flow_ext_mreg_supported(eth_dev) && sh->dv_regc0_mask) { ++ sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, ++ MLX5_FLOW_MREG_HTABLE_SZ, ++ false, true, eth_dev, ++ flow_dv_mreg_create_cb, ++ flow_dv_mreg_match_cb, ++ flow_dv_mreg_remove_cb, ++ flow_dv_mreg_clone_cb, ++ flow_dv_mreg_clone_free_cb); ++ if (!sh->mreg_cp_tbl) { ++ err = ENOMEM; ++ goto error; ++ } ++ } ++ } + #endif + if (!sh->tunnel_hub && sh->config.dv_miss_info) + err = mlx5_alloc_tunnel_hub(sh); +@@ -662,6 +701,10 @@ error: + mlx5_list_destroy(sh->dest_array_list); + sh->dest_array_list = NULL; + } ++ if (sh->mreg_cp_tbl) { ++ mlx5_hlist_destroy(sh->mreg_cp_tbl); ++ sh->mreg_cp_tbl = NULL; ++ } + return err; + } + +@@ -759,6 +802,10 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv) + mlx5_list_destroy(sh->dest_array_list); + sh->dest_array_list = NULL; + } ++ if (sh->mreg_cp_tbl) { ++ mlx5_hlist_destroy(sh->mreg_cp_tbl); ++ sh->mreg_cp_tbl = NULL; ++ } + } + + /** +@@ -1523,9 +1570,11 @@ err_secondary: + priv->ctrl_flows = 0; + rte_spinlock_init(&priv->flow_list_lock); + TAILQ_INIT(&priv->flow_meters); +- priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); +- if (!priv->mtr_profile_tbl) +- goto error; ++ if (priv->mtr_en) { ++ priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); ++ if (!priv->mtr_profile_tbl) ++ goto error; ++ } + /* Bring Ethernet device up. */ + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", + eth_dev->data->port_id); +@@ -1545,13 +1594,6 @@ err_secondary: + } + /* Create context for virtual machine VLAN workaround. */ + priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); +- if (sh->config.dv_flow_en) { +- err = mlx5_alloc_shared_dr(priv); +- if (err) +- goto error; +- if (mlx5_flex_item_port_init(eth_dev) < 0) +- goto error; +- } + if (mlx5_devx_obj_ops_en(sh)) { + priv->obj_ops = devx_obj_ops; + mlx5_queue_counter_id_prepare(eth_dev); +@@ -1602,6 +1644,13 @@ err_secondary: + goto error; + } + rte_rwlock_init(&priv->ind_tbls_lock); ++ if (sh->config.dv_flow_en) { ++ err = mlx5_alloc_shared_dr(eth_dev); ++ if (err) ++ goto error; ++ if (mlx5_flex_item_port_init(eth_dev) < 0) ++ goto error; ++ } + if (priv->sh->config.dv_flow_en == 2) { + #ifdef HAVE_MLX5_HWS_SUPPORT + if (priv->sh->config.dv_esw_en) { +@@ -1682,43 +1731,6 @@ err_secondary: + err = -err; + goto error; + } +- /* Query availability of metadata reg_c's. */ +- if (!priv->sh->metadata_regc_check_flag) { +- err = mlx5_flow_discover_mreg_c(eth_dev); +- if (err < 0) { +- err = -err; +- goto error; +- } +- } +- if (!mlx5_flow_ext_mreg_supported(eth_dev)) { +- DRV_LOG(DEBUG, +- "port %u extensive metadata register is not supported", +- eth_dev->data->port_id); +- if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { +- DRV_LOG(ERR, "metadata mode %u is not supported " +- "(no metadata registers available)", +- sh->config.dv_xmeta_en); +- err = ENOTSUP; +- goto error; +- } +- } +- if (sh->config.dv_flow_en && +- sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && +- mlx5_flow_ext_mreg_supported(eth_dev) && +- priv->sh->dv_regc0_mask) { +- priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME, +- MLX5_FLOW_MREG_HTABLE_SZ, +- false, true, eth_dev, +- flow_dv_mreg_create_cb, +- flow_dv_mreg_match_cb, +- flow_dv_mreg_remove_cb, +- flow_dv_mreg_clone_cb, +- flow_dv_mreg_clone_free_cb); +- if (!priv->mreg_cp_tbl) { +- err = ENOMEM; +- goto error; +- } +- } + rte_spinlock_init(&priv->shared_act_sl); + mlx5_flow_counter_mode_config(eth_dev); + mlx5_flow_drop_action_config(eth_dev); +@@ -1737,8 +1749,6 @@ error: + priv->sh->config.dv_esw_en) + flow_hw_destroy_vport_action(eth_dev); + #endif +- if (priv->mreg_cp_tbl) +- mlx5_hlist_destroy(priv->mreg_cp_tbl); + if (priv->sh) + mlx5_os_free_shared_dr(priv); + if (priv->nl_socket_route >= 0) +@@ -2429,8 +2439,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, + list[ns].info.master = 0; + list[ns].info.representor = 0; + } +- if (list[ns].info.port_name == bd) +- ns++; ++ ns++; + break; + case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: + /* Fallthrough */ +@@ -2993,9 +3002,15 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, + + if (priv->sh) { + if (priv->q_counters != NULL && +- strcmp(ctr_name, "out_of_buffer") == 0) ++ strcmp(ctr_name, "out_of_buffer") == 0) { ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { ++ DRV_LOG(WARNING, "Devx out_of_buffer counter is not supported in the secondary process"); ++ rte_errno = ENOTSUP; ++ return 1; ++ } + return mlx5_devx_cmd_queue_counter_query + (priv->q_counters, 0, (uint32_t *)stat); ++ } + MKSTR(path, "%s/ports/%d/hw_counters/%s", + priv->sh->ibdev_path, + priv->dev_port, +diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c +index 3a182de248..584a51b393 100644 +--- a/dpdk/drivers/net/mlx5/mlx5.c ++++ b/dpdk/drivers/net/mlx5/mlx5.c +@@ -906,7 +906,7 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh) + */ + case MLX5_IPOOL_MLX5_FLOW: + cfg.size = sh->config.dv_flow_en ? +- sizeof(struct mlx5_flow_handle) : ++ RTE_ALIGN_MUL_CEIL(sizeof(struct mlx5_flow_handle), 8) : + MLX5_FLOW_HANDLE_VERBS_SIZE; + break; + #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) +@@ -1689,7 +1689,8 @@ mlx5_init_shared_dev_registers(struct mlx5_dev_ctx_shared *sh) + } else { + DRV_LOG(DEBUG, "ASO register: NONE"); + } +- mlx5_init_hws_flow_tags_registers(sh); ++ if (sh->config.dv_flow_en == 2) ++ mlx5_init_hws_flow_tags_registers(sh); + } + + /** +@@ -2163,6 +2164,7 @@ int + mlx5_proc_priv_init(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_proc_priv *ppriv; + size_t ppriv_size; + +@@ -2183,6 +2185,9 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) + dev->process_private = ppriv; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + priv->sh->pppriv = ppriv; ++ /* Check and try to map HCA PCI BAR to allow reading real time. */ ++ if (sh->dev_cap.rt_timestamp && mlx5_dev_is_pci(dev->device)) ++ mlx5_txpp_map_hca_bar(dev); + return 0; + } + +@@ -2267,6 +2272,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) + mlx5_indirect_list_handles_release(dev); + #ifdef HAVE_MLX5_HWS_SUPPORT + flow_hw_destroy_vport_action(dev); ++ /* dr context will be closed after mlx5_os_free_shared_dr. */ + flow_hw_resource_release(dev); + flow_hw_clear_port_info(dev); + #endif +@@ -2279,7 +2285,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) + mlx5_free(priv->rxq_privs); + priv->rxq_privs = NULL; + } +- if (priv->txqs != NULL) { ++ if (priv->txqs != NULL && dev->data->tx_queues != NULL) { + /* XXX race condition if mlx5_tx_burst() is still running. */ + rte_delay_us_sleep(1000); + for (i = 0; (i != priv->txqs_n); ++i) +@@ -2288,16 +2294,20 @@ mlx5_dev_close(struct rte_eth_dev *dev) + priv->txqs = NULL; + } + mlx5_proc_priv_uninit(dev); ++ if (priv->drop_queue.hrxq) ++ mlx5_drop_action_destroy(dev); + if (priv->q_counters) { + mlx5_devx_cmd_destroy(priv->q_counters); + priv->q_counters = NULL; + } +- if (priv->drop_queue.hrxq) +- mlx5_drop_action_destroy(dev); +- if (priv->mreg_cp_tbl) +- mlx5_hlist_destroy(priv->mreg_cp_tbl); + mlx5_mprq_free_mp(dev); + mlx5_os_free_shared_dr(priv); ++#ifdef HAVE_MLX5_HWS_SUPPORT ++ if (priv->dr_ctx) { ++ claim_zero(mlx5dr_context_close(priv->dr_ctx)); ++ priv->dr_ctx = NULL; ++ } ++#endif + if (priv->rss_conf.rss_key != NULL) + mlx5_free(priv->rss_conf.rss_key); + if (priv->reta_idx != NULL) +diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h +index 263ebead7f..55c29e31a2 100644 +--- a/dpdk/drivers/net/mlx5/mlx5.h ++++ b/dpdk/drivers/net/mlx5/mlx5.h +@@ -69,7 +69,7 @@ + #define MLX5_ROOT_TBL_MODIFY_NUM 16 + + /* Maximal number of flex items created on the port.*/ +-#define MLX5_PORT_FLEX_ITEM_NUM 4 ++#define MLX5_PORT_FLEX_ITEM_NUM 8 + + /* Maximal number of field/field parts to map into sample registers .*/ + #define MLX5_FLEX_ITEM_MAPPING_NUM 32 +@@ -263,16 +263,29 @@ struct mlx5_counter_ctrl { + struct mlx5_xstats_ctrl { + /* Number of device stats. */ + uint16_t stats_n; ++ /* Number of device stats, for the 2nd port in bond. */ ++ uint16_t stats_n_2nd; + /* Number of device stats identified by PMD. */ +- uint16_t mlx5_stats_n; ++ uint16_t mlx5_stats_n; ++ /* First device counters index. */ ++ uint16_t dev_cnt_start; + /* Index in the device counters table. */ + uint16_t dev_table_idx[MLX5_MAX_XSTATS]; ++ /* Index in the output table. */ ++ uint16_t xstats_o_idx[MLX5_MAX_XSTATS]; + uint64_t base[MLX5_MAX_XSTATS]; + uint64_t xstats[MLX5_MAX_XSTATS]; + uint64_t hw_stats[MLX5_MAX_XSTATS]; + struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; ++ /* Index in the device counters table, for the 2nd port in bond. */ ++ uint16_t dev_table_idx_2nd[MLX5_MAX_XSTATS]; ++ /* Index in the output table, for the 2nd port in bond. */ ++ uint16_t xstats_o_idx_2nd[MLX5_MAX_XSTATS]; + }; + ++/* xstats array size. */ ++extern const unsigned int xstats_n; ++ + struct mlx5_stats_ctrl { + /* Base for imissed counter. */ + uint64_t imissed_base; +@@ -1473,6 +1486,8 @@ struct mlx5_dev_ctx_shared { + struct mlx5_hlist *flow_tbls; /* SWS flow table. */ + struct mlx5_hlist *groups; /* HWS flow group. */ + }; ++ struct mlx5_hlist *mreg_cp_tbl; ++ /* Hash table of Rx metadata register copy table. */ + struct mlx5_flow_tunnel_hub *tunnel_hub; + /* Direct Rules tables for FDB, NIC TX+RX */ + void *dr_drop_action; /* Pointer to DR drop action, any domain. */ +@@ -1862,11 +1877,7 @@ struct mlx5_priv { + rte_spinlock_t hw_ctrl_lock; + LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows; + LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows; +- struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; +- struct rte_flow_template_table *hw_esw_sq_miss_tbl; +- struct rte_flow_template_table *hw_esw_zero_tbl; +- struct rte_flow_template_table *hw_tx_meta_cpy_tbl; +- struct rte_flow_template_table *hw_lacp_rx_tbl; ++ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; + struct rte_flow_pattern_template *hw_tx_repr_tagging_pt; + struct rte_flow_actions_template *hw_tx_repr_tagging_at; + struct rte_flow_template_table *hw_tx_repr_tagging_tbl; +@@ -1875,7 +1886,6 @@ struct mlx5_priv { + uint32_t ctrl_flows; /* Control flow rules. */ + rte_spinlock_t flow_list_lock; + struct mlx5_obj_ops obj_ops; /* HW objects operations. */ +- LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ + LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */ + struct mlx5_list *hrxqs; /* Hash Rx queues. */ + LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ +@@ -1900,8 +1910,6 @@ struct mlx5_priv { + int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */ + int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */ + struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */ +- struct mlx5_hlist *mreg_cp_tbl; +- /* Hash table of Rx metadata register copy table. */ + struct mlx5_mtr_config mtr_config; /* Meter configuration */ + uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */ + struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */ +@@ -1930,6 +1938,7 @@ struct mlx5_priv { + uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */ + struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set flow engine info. */ + struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */ ++ bool hws_rule_flushing; /**< Whether this port is in rules flushing stage. */ + #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) + /* Item template list. */ + LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt; +@@ -1989,6 +1998,30 @@ enum dr_dump_rec_type { + DR_DUMP_REC_TYPE_PMD_COUNTER = 4430, + }; + ++#if defined(HAVE_MLX5_HWS_SUPPORT) ++static __rte_always_inline struct mlx5_hw_q_job * ++flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue) ++{ ++ MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size); ++ return priv->hw_q[queue].job_idx ? ++ priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL; ++} ++ ++static __rte_always_inline void ++flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue) ++{ ++ MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size); ++ priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job; ++} ++ ++struct mlx5_hw_q_job * ++mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue, ++ const struct rte_flow_action_handle *handle, ++ void *user_data, void *query_data, ++ enum mlx5_hw_job_type type, ++ struct rte_flow_error *error); ++#endif ++ + /** + * Indicates whether HW objects operations can be created by DevX. + * +@@ -2131,8 +2164,9 @@ int mlx5_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + int mlx5_os_read_dev_stat(struct mlx5_priv *priv, + const char *ctr_name, uint64_t *stat); +-int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats); +-int mlx5_os_get_stats_n(struct rte_eth_dev *dev); ++int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats); ++int mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec); + void mlx5_os_stats_init(struct rte_eth_dev *dev); + int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev); + +@@ -2394,11 +2428,12 @@ int mlx5_aso_flow_hit_queue_poll_start(struct mlx5_dev_ctx_shared *sh); + int mlx5_aso_flow_hit_queue_poll_stop(struct mlx5_dev_ctx_shared *sh); + void mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh, + enum mlx5_access_aso_opc_mod aso_opc_mod); +-int mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue, +- struct mlx5_aso_mtr *mtr, struct mlx5_mtr_bulk *bulk, +- void *user_data, bool push); +-int mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue, +- struct mlx5_aso_mtr *mtr); ++int mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue, ++ struct mlx5_aso_mtr *mtr, ++ struct mlx5_mtr_bulk *bulk, ++ struct mlx5_hw_q_job *job, bool push); ++int mlx5_aso_mtr_wait(struct mlx5_priv *priv, ++ struct mlx5_aso_mtr *mtr, bool is_tmpl_api); + int mlx5_aso_ct_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue, + struct mlx5_aso_ct_action *ct, + const struct rte_flow_action_conntrack *profile, +@@ -2458,11 +2493,12 @@ void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher, + void *key, const struct rte_flow_item *item, + bool is_inner); + int mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp, +- uint32_t idx, uint32_t *pos, +- bool is_inner, uint32_t *def); ++ uint32_t idx, uint32_t *pos, bool is_inner); + int mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item, + void *flex, uint32_t byte_off, +- bool is_mask, bool tunnel, uint32_t *value); ++ bool tunnel, uint32_t *value); ++int mlx5_flex_get_tunnel_mode(const struct rte_flow_item *item, ++ enum rte_flow_item_flex_tunnel_mode *tunnel_mode); + int mlx5_flex_acquire_index(struct rte_eth_dev *dev, + struct rte_flow_item_flex_handle *handle, + bool acquire); +diff --git a/dpdk/drivers/net/mlx5/mlx5_devx.c b/dpdk/drivers/net/mlx5/mlx5_devx.c +index 9fa400fc48..4f08ddf899 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_devx.c ++++ b/dpdk/drivers/net/mlx5/mlx5_devx.c +@@ -592,7 +592,8 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq) + DRV_LOG(ERR, "Failed to create CQ."); + goto error; + } +- rxq_data->delay_drop = priv->config.std_delay_drop; ++ if (!rxq_data->shared || !rxq_ctrl->started) ++ rxq_data->delay_drop = priv->config.std_delay_drop; + /* Create RQ using DevX API. */ + ret = mlx5_rxq_create_devx_rq_resources(rxq); + if (ret) { +diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c +index ab30e2c215..8f29e58cda 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c ++++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c +@@ -146,6 +146,12 @@ mlx5_dev_configure(struct rte_eth_dev *dev) + ret = mlx5_proc_priv_init(dev); + if (ret) + return ret; ++ ret = mlx5_dev_set_mtu(dev, dev->data->mtu); ++ if (ret) { ++ DRV_LOG(ERR, "port %u failed to set MTU to %u", dev->data->port_id, ++ dev->data->mtu); ++ return ret; ++ } + return 0; + } + +@@ -345,6 +351,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) + info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; + mlx5_set_default_params(dev, info); + mlx5_set_txlimit_params(dev, info); ++ info->rx_desc_lim.nb_max = ++ 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz; ++ info->tx_desc_lim.nb_max = ++ 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz; + if (priv->sh->cdev->config.hca_attr.mem_rq_rmp && + priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new) + info->dev_capa |= RTE_ETH_DEV_CAPA_RXQ_SHARE; +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c +index 85e8c77c81..1e9484f372 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow.c +@@ -1748,13 +1748,13 @@ flow_rxq_mark_flag_set(struct rte_eth_dev *dev) + opriv->domain_id != priv->domain_id || + opriv->mark_enabled) + continue; +- LIST_FOREACH(rxq_ctrl, &opriv->rxqsctrl, next) { ++ LIST_FOREACH(rxq_ctrl, &opriv->sh->shared_rxqs, share_entry) { + rxq_ctrl->rxq.mark = 1; + } + opriv->mark_enabled = 1; + } + } else { +- LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { ++ LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) { + rxq_ctrl->rxq.mark = 1; + } + priv->mark_enabled = 1; +@@ -1953,18 +1953,20 @@ mlx5_flow_rxq_dynf_set(struct rte_eth_dev *dev) + if (rxq == NULL || rxq->ctrl == NULL) + continue; + data = &rxq->ctrl->rxq; +- if (!rte_flow_dynf_metadata_avail()) { +- data->dynf_meta = 0; +- data->flow_meta_mask = 0; +- data->flow_meta_offset = -1; +- data->flow_meta_port_mask = 0; +- } else { +- data->dynf_meta = 1; +- data->flow_meta_mask = rte_flow_dynf_metadata_mask; +- data->flow_meta_offset = rte_flow_dynf_metadata_offs; +- data->flow_meta_port_mask = priv->sh->dv_meta_mask; ++ if (!data->shared || !rxq->ctrl->started) { ++ if (!rte_flow_dynf_metadata_avail()) { ++ data->dynf_meta = 0; ++ data->flow_meta_mask = 0; ++ data->flow_meta_offset = -1; ++ data->flow_meta_port_mask = 0; ++ } else { ++ data->dynf_meta = 1; ++ data->flow_meta_mask = rte_flow_dynf_metadata_mask; ++ data->flow_meta_offset = rte_flow_dynf_metadata_offs; ++ data->flow_meta_port_mask = priv->sh->dv_meta_mask; ++ } ++ data->mark_flag = mark_flag; + } +- data->mark_flag = mark_flag; + } + } + +@@ -2504,7 +2506,7 @@ int + flow_validate_modify_field_level(const struct rte_flow_action_modify_data *data, + struct rte_flow_error *error) + { +- if (data->level == 0) ++ if (data->level == 0 || data->field == RTE_FLOW_FIELD_FLEX_ITEM) + return 0; + if (data->field != RTE_FLOW_FIELD_TAG && + data->field != (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) +@@ -5228,8 +5230,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id, + }; + + /* Check if already registered. */ +- MLX5_ASSERT(priv->mreg_cp_tbl); +- entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx); ++ MLX5_ASSERT(priv->sh->mreg_cp_tbl); ++ entry = mlx5_hlist_register(priv->sh->mreg_cp_tbl, mark_id, &ctx); + if (!entry) + return NULL; + return container_of(entry, struct mlx5_flow_mreg_copy_resource, +@@ -5268,10 +5270,10 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev, + return; + mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP], + flow->rix_mreg_copy); +- if (!mcp_res || !priv->mreg_cp_tbl) ++ if (!mcp_res || !priv->sh->mreg_cp_tbl) + return; + MLX5_ASSERT(mcp_res->rix_flow); +- mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent); ++ mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, &mcp_res->hlist_ent); + flow->rix_mreg_copy = 0; + } + +@@ -5293,14 +5295,14 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev) + uint32_t mark_id; + + /* Check if default flow is registered. */ +- if (!priv->mreg_cp_tbl) ++ if (!priv->sh->mreg_cp_tbl) + return; + mark_id = MLX5_DEFAULT_COPY_ID; + ctx.data = &mark_id; +- entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx); ++ entry = mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx); + if (!entry) + return; +- mlx5_hlist_unregister(priv->mreg_cp_tbl, entry); ++ mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, entry); + } + + /** +@@ -5338,7 +5340,7 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev, + */ + mark_id = MLX5_DEFAULT_COPY_ID; + ctx.data = &mark_id; +- if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx)) ++ if (mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx)) + return 0; + mcp_res = flow_mreg_add_copy_action(dev, mark_id, error); + if (!mcp_res) +@@ -5492,6 +5494,7 @@ flow_hairpin_split(struct rte_eth_dev *dev, + } + break; + case RTE_FLOW_ACTION_TYPE_COUNT: ++ case RTE_FLOW_ACTION_TYPE_AGE: + if (encap) { + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); +@@ -5817,8 +5820,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + struct mlx5_rte_flow_item_tag *tag_item_spec; + struct mlx5_rte_flow_item_tag *tag_item_mask; + uint32_t tag_id = 0; +- struct rte_flow_item *vlan_item_dst = NULL; +- const struct rte_flow_item *vlan_item_src = NULL; ++ bool vlan_actions; ++ struct rte_flow_item *orig_sfx_items = sfx_items; + const struct rte_flow_item *orig_items = items; + struct rte_flow_action *hw_mtr_action; + struct rte_flow_action *action_pre_head = NULL; +@@ -5835,6 +5838,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + + /* Prepare the suffix subflow items. */ + tag_item = sfx_items++; ++ tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + int item_type = items->type; + +@@ -5857,10 +5861,13 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + sfx_items++; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: +- /* Determine if copy vlan item below. */ +- vlan_item_src = items; +- vlan_item_dst = sfx_items++; +- vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID; ++ /* ++ * Copy VLAN items in case VLAN actions are performed. ++ * If there are no VLAN actions, these items will be VOID. ++ */ ++ memcpy(sfx_items, items, sizeof(*sfx_items)); ++ sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN; ++ sfx_items++; + break; + default: + break; +@@ -5877,6 +5884,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + tag_action = actions_pre++; + } + /* Prepare the actions for prefix and suffix flow. */ ++ vlan_actions = false; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + struct rte_flow_action *action_cur = NULL; + +@@ -5907,16 +5915,7 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + break; + case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: +- if (vlan_item_dst && vlan_item_src) { +- memcpy(vlan_item_dst, vlan_item_src, +- sizeof(*vlan_item_dst)); +- /* +- * Convert to internal match item, it is used +- * for vlan push and set vid. +- */ +- vlan_item_dst->type = (enum rte_flow_item_type) +- MLX5_RTE_FLOW_ITEM_TYPE_VLAN; +- } ++ vlan_actions = true; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + if (fm->def_policy) +@@ -5931,6 +5930,14 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + actions_sfx++ : actions_pre++; + memcpy(action_cur, actions, sizeof(struct rte_flow_action)); + } ++ /* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */ ++ if (!vlan_actions) { ++ struct rte_flow_item *it = orig_sfx_items; ++ ++ for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++) ++ if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) ++ it->type = RTE_FLOW_ITEM_TYPE_VOID; ++ } + /* Add end action to the actions. */ + actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; + if (priv->sh->meter_aso_en) { +@@ -6020,8 +6027,6 @@ flow_meter_split_prep(struct rte_eth_dev *dev, + tag_action->type = (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_TAG; + tag_action->conf = set_tag; +- tag_item->type = (enum rte_flow_item_type) +- MLX5_RTE_FLOW_ITEM_TYPE_TAG; + tag_item->spec = tag_item_spec; + tag_item->last = NULL; + tag_item->mask = tag_item_mask; +@@ -6849,6 +6854,19 @@ flow_meter_create_drop_flow_with_org_pattern(struct rte_eth_dev *dev, + &drop_split_info, error); + } + ++static int ++flow_count_vlan_items(const struct rte_flow_item items[]) ++{ ++ int items_n = 0; ++ ++ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { ++ if (items->type == RTE_FLOW_ITEM_TYPE_VLAN || ++ items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) ++ items_n++; ++ } ++ return items_n; ++} ++ + /** + * The splitting for meter feature. + * +@@ -6904,6 +6922,7 @@ flow_create_split_meter(struct rte_eth_dev *dev, + size_t act_size; + size_t item_size; + int actions_n = 0; ++ int vlan_items_n = 0; + int ret = 0; + + if (priv->mtr_en) +@@ -6963,9 +6982,11 @@ flow_create_split_meter(struct rte_eth_dev *dev, + act_size = (sizeof(struct rte_flow_action) * + (actions_n + METER_PREFIX_ACTION)) + + sizeof(struct mlx5_rte_flow_action_set_tag); +- /* Suffix items: tag, vlan, port id, end. */ +-#define METER_SUFFIX_ITEM 4 +- item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + ++ /* Flow can have multiple VLAN items. Account for them in suffix items. */ ++ vlan_items_n = flow_count_vlan_items(items); ++ /* Suffix items: tag, [vlans], port id, end. */ ++#define METER_SUFFIX_ITEM 3 ++ item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) + + sizeof(struct mlx5_rte_flow_item_tag) * 2; + sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), + 0, SOCKET_ID_ANY); +@@ -8080,7 +8101,9 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, + #ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (priv->sh->config.dv_flow_en == 2 && + type == MLX5_FLOW_TYPE_GEN) { ++ priv->hws_rule_flushing = true; + flow_hw_q_flow_flush(dev, NULL); ++ priv->hws_rule_flushing = false; + return; + } + #endif +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h +index 6dde9de688..01f0eab1fa 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow.h ++++ b/dpdk/drivers/net/mlx5/mlx5_flow.h +@@ -77,7 +77,7 @@ enum mlx5_indirect_type { + /* Now, the maximal ports will be supported is 16, action number is 32M. */ + #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10 + +-#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22 ++#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25 + #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1) + + /* 29-31: type, 25-28: owner port, 0-24: index */ +@@ -157,6 +157,9 @@ struct mlx5_flow_action_copy_mreg { + /* Matches on source queue. */ + struct mlx5_rte_flow_item_sq { + uint32_t queue; /* DevX SQ number */ ++#ifdef RTE_ARCH_64 ++ uint32_t reserved; ++#endif + }; + + /* Feature name to allocate metadata register. */ +@@ -1759,6 +1762,28 @@ flow_hw_get_reg_id_from_ctx(void *dr_ctx, + return REG_NON; + } + ++static __rte_always_inline int ++flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val) ++{ ++#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) ++ uint32_t port; ++ ++ MLX5_ETH_FOREACH_DEV(port, NULL) { ++ struct mlx5_priv *priv; ++ priv = rte_eth_devices[port].data->dev_private; ++ ++ if (priv->dr_ctx == dr_ctx) { ++ *port_val = port; ++ return 0; ++ } ++ } ++#else ++ RTE_SET_USED(dr_ctx); ++ RTE_SET_USED(port_val); ++#endif ++ return -EINVAL; ++} ++ + void flow_hw_set_port_info(struct rte_eth_dev *dev); + void flow_hw_clear_port_info(struct rte_eth_dev *dev); + int flow_hw_create_vport_action(struct rte_eth_dev *dev); +@@ -2418,13 +2443,13 @@ enum mlx5_flow_ctrl_rx_eth_pattern_type { + + /* All types of RSS actions used in control flow rules. */ + enum mlx5_flow_ctrl_rx_expanded_rss_type { +- MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP = 0, +- MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4, ++ MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP = 0, ++ MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP, + MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP, + MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP, + MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6, +- MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP, +- MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP, ++ MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4, ++ MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP, + MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX, + }; + +@@ -2446,6 +2471,25 @@ struct mlx5_flow_hw_ctrl_rx { + [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; + }; + ++/* Contains all templates required for control flow rules in FDB with HWS. */ ++struct mlx5_flow_hw_ctrl_fdb { ++ struct rte_flow_pattern_template *esw_mgr_items_tmpl; ++ struct rte_flow_actions_template *regc_jump_actions_tmpl; ++ struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; ++ struct rte_flow_pattern_template *regc_sq_items_tmpl; ++ struct rte_flow_actions_template *port_actions_tmpl; ++ struct rte_flow_template_table *hw_esw_sq_miss_tbl; ++ struct rte_flow_pattern_template *port_items_tmpl; ++ struct rte_flow_actions_template *jump_one_actions_tmpl; ++ struct rte_flow_template_table *hw_esw_zero_tbl; ++ struct rte_flow_pattern_template *tx_meta_items_tmpl; ++ struct rte_flow_actions_template *tx_meta_actions_tmpl; ++ struct rte_flow_template_table *hw_tx_meta_cpy_tbl; ++ struct rte_flow_pattern_template *lacp_rx_items_tmpl; ++ struct rte_flow_actions_template *lacp_rx_actions_tmpl; ++ struct rte_flow_template_table *hw_lacp_rx_tbl; ++}; ++ + #define MLX5_CTRL_PROMISCUOUS (RTE_BIT32(0)) + #define MLX5_CTRL_ALL_MULTICAST (RTE_BIT32(1)) + #define MLX5_CTRL_BROADCAST (RTE_BIT32(2)) +@@ -2967,6 +3011,9 @@ flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx) + void + mlx5_indirect_list_handles_release(struct rte_eth_dev *dev); + #ifdef HAVE_MLX5_HWS_SUPPORT ++ ++#define MLX5_REPR_STC_MEMORY_LOG 11 ++ + struct mlx5_mirror; + void + mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror); +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c +index f311443472..b78d80ab44 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c +@@ -489,7 +489,7 @@ mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe) + int i; + + DRV_LOG(ERR, "Error cqe:"); +- for (i = 0; i < 16; i += 4) ++ for (i = 0; i < (int)sizeof(struct mlx5_error_cqe) / 4; i += 4) + DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1], + cqe[i + 2], cqe[i + 3]); + DRV_LOG(ERR, "\nError wqe:"); +@@ -509,8 +509,8 @@ mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq) + { + struct mlx5_aso_cq *cq = &sq->cq; + uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1); +- volatile struct mlx5_err_cqe *cqe = +- (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx]; ++ volatile struct mlx5_error_cqe *cqe = ++ (volatile struct mlx5_error_cqe *)&cq->cq_obj.cqes[idx]; + + cq->errors++; + idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n); +@@ -792,7 +792,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh, + struct mlx5_aso_mtr *aso_mtr, + struct mlx5_mtr_bulk *bulk, + bool need_lock, +- void *user_data, ++ struct mlx5_hw_q_job *job, + bool push) + { + volatile struct mlx5_aso_wqe *wqe = NULL; +@@ -819,7 +819,7 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh, + rte_prefetch0(&sq->sq_obj.aso_wqes[(sq->head + 1) & mask]); + /* Fill next WQE. */ + fm = &aso_mtr->fm; +- sq->elts[sq->head & mask].mtr = user_data ? user_data : aso_mtr; ++ sq->elts[sq->head & mask].user_data = job ? job : (void *)aso_mtr; + if (aso_mtr->type == ASO_METER_INDIRECT) { + if (likely(sh->config.dv_flow_en == 2)) + pool = aso_mtr->pool; +@@ -897,24 +897,6 @@ mlx5_aso_mtr_sq_enqueue_single(struct mlx5_dev_ctx_shared *sh, + return 1; + } + +-static void +-mlx5_aso_mtrs_status_update(struct mlx5_aso_sq *sq, uint16_t aso_mtrs_nums) +-{ +- uint16_t size = 1 << sq->log_desc_n; +- uint16_t mask = size - 1; +- uint16_t i; +- struct mlx5_aso_mtr *aso_mtr = NULL; +- uint8_t exp_state = ASO_METER_WAIT; +- +- for (i = 0; i < aso_mtrs_nums; ++i) { +- aso_mtr = sq->elts[(sq->tail + i) & mask].mtr; +- MLX5_ASSERT(aso_mtr); +- (void)__atomic_compare_exchange_n(&aso_mtr->state, +- &exp_state, ASO_METER_READY, +- false, __ATOMIC_RELAXED, __ATOMIC_RELAXED); +- } +-} +- + static void + mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) + { +@@ -925,7 +907,7 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) + uint32_t idx; + uint32_t next_idx = cq->cq_ci & mask; + uint16_t max; +- uint16_t n = 0; ++ uint16_t i, n = 0; + int ret; + + if (need_lock) +@@ -957,7 +939,19 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) + cq->cq_ci++; + } while (1); + if (likely(n)) { +- mlx5_aso_mtrs_status_update(sq, n); ++ uint8_t exp_state = ASO_METER_WAIT; ++ struct mlx5_aso_mtr *aso_mtr; ++ __rte_unused bool verdict; ++ ++ for (i = 0; i < n; ++i) { ++ aso_mtr = sq->elts[(sq->tail + i) & mask].mtr; ++ MLX5_ASSERT(aso_mtr); ++ verdict = __atomic_compare_exchange_n(&aso_mtr->state, ++ &exp_state, ASO_METER_READY, ++ false, __ATOMIC_RELAXED, ++ __ATOMIC_RELAXED); ++ MLX5_ASSERT(verdict); ++ } + sq->tail += n; + rte_io_wmb(); + cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci); +@@ -966,6 +960,82 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) + rte_spinlock_unlock(&sq->sqsl); + } + ++static __rte_always_inline struct mlx5_aso_sq * ++mlx5_aso_mtr_select_sq(struct mlx5_dev_ctx_shared *sh, uint32_t queue, ++ struct mlx5_aso_mtr *mtr, bool *need_lock) ++{ ++ struct mlx5_aso_sq *sq; ++ ++ if (likely(sh->config.dv_flow_en == 2) && ++ mtr->type == ASO_METER_INDIRECT) { ++ if (queue == MLX5_HW_INV_QUEUE) { ++ sq = &mtr->pool->sq[mtr->pool->nb_sq - 1]; ++ *need_lock = true; ++ } else { ++ sq = &mtr->pool->sq[queue]; ++ *need_lock = false; ++ } ++ } else { ++ sq = &sh->mtrmng->pools_mng.sq; ++ *need_lock = true; ++ } ++ return sq; ++} ++ ++#if defined(HAVE_MLX5_HWS_SUPPORT) ++static void ++mlx5_aso_poll_cq_mtr_hws(struct mlx5_priv *priv, struct mlx5_aso_sq *sq) ++{ ++#define MLX5_HWS_MTR_CMPL_NUM 4 ++ ++ int i, ret; ++ struct mlx5_aso_mtr *mtr; ++ uint8_t exp_state = ASO_METER_WAIT; ++ struct rte_flow_op_result res[MLX5_HWS_MTR_CMPL_NUM]; ++ __rte_unused bool verdict; ++ ++ rte_spinlock_lock(&sq->sqsl); ++repeat: ++ ret = mlx5_aso_pull_completion(sq, res, MLX5_HWS_MTR_CMPL_NUM); ++ if (ret) { ++ for (i = 0; i < ret; i++) { ++ struct mlx5_hw_q_job *job = res[i].user_data; ++ ++ MLX5_ASSERT(job); ++ mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, ++ MLX5_INDIRECT_ACTION_IDX_GET(job->action)); ++ MLX5_ASSERT(mtr); ++ verdict = __atomic_compare_exchange_n(&mtr->state, ++ &exp_state, ASO_METER_READY, ++ false, __ATOMIC_RELAXED, ++ __ATOMIC_RELAXED); ++ MLX5_ASSERT(verdict); ++ flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv)); ++ } ++ if (ret == MLX5_HWS_MTR_CMPL_NUM) ++ goto repeat; ++ } ++ rte_spinlock_unlock(&sq->sqsl); ++ ++#undef MLX5_HWS_MTR_CMPL_NUM ++} ++#else ++static void ++mlx5_aso_poll_cq_mtr_hws(__rte_unused struct mlx5_priv *priv, __rte_unused struct mlx5_aso_sq *sq) ++{ ++ MLX5_ASSERT(false); ++} ++#endif ++ ++static void ++mlx5_aso_poll_cq_mtr_sws(__rte_unused struct mlx5_priv *priv, ++ struct mlx5_aso_sq *sq) ++{ ++ mlx5_aso_mtr_completion_handle(sq, true); ++} ++ ++typedef void (*poll_cq_t)(struct mlx5_priv *, struct mlx5_aso_sq *); ++ + /** + * Update meter parameter by send WQE. + * +@@ -980,39 +1050,29 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue, +- struct mlx5_aso_mtr *mtr, +- struct mlx5_mtr_bulk *bulk, +- void *user_data, +- bool push) ++mlx5_aso_meter_update_by_wqe(struct mlx5_priv *priv, uint32_t queue, ++ struct mlx5_aso_mtr *mtr, ++ struct mlx5_mtr_bulk *bulk, ++ struct mlx5_hw_q_job *job, bool push) + { +- struct mlx5_aso_sq *sq; +- uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES; + bool need_lock; ++ struct mlx5_dev_ctx_shared *sh = priv->sh; ++ struct mlx5_aso_sq *sq = ++ mlx5_aso_mtr_select_sq(sh, queue, mtr, &need_lock); ++ uint32_t poll_wqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES; ++ poll_cq_t poll_mtr_cq = ++ job ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws; + int ret; + +- if (likely(sh->config.dv_flow_en == 2) && +- mtr->type == ASO_METER_INDIRECT) { +- if (queue == MLX5_HW_INV_QUEUE) { +- sq = &mtr->pool->sq[mtr->pool->nb_sq - 1]; +- need_lock = true; +- } else { +- sq = &mtr->pool->sq[queue]; +- need_lock = false; +- } +- } else { +- sq = &sh->mtrmng->pools_mng.sq; +- need_lock = true; +- } + if (queue != MLX5_HW_INV_QUEUE) { + ret = mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk, +- need_lock, user_data, push); ++ need_lock, job, push); + return ret > 0 ? 0 : -1; + } + do { +- mlx5_aso_mtr_completion_handle(sq, need_lock); ++ poll_mtr_cq(priv, sq); + if (mlx5_aso_mtr_sq_enqueue_single(sh, sq, mtr, bulk, +- need_lock, NULL, true)) ++ need_lock, job, true)) + return 0; + /* Waiting for wqe resource. */ + rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY); +@@ -1036,32 +1096,22 @@ mlx5_aso_meter_update_by_wqe(struct mlx5_dev_ctx_shared *sh, uint32_t queue, + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_aso_mtr_wait(struct mlx5_dev_ctx_shared *sh, uint32_t queue, +- struct mlx5_aso_mtr *mtr) ++mlx5_aso_mtr_wait(struct mlx5_priv *priv, ++ struct mlx5_aso_mtr *mtr, bool is_tmpl_api) + { ++ bool need_lock; + struct mlx5_aso_sq *sq; ++ struct mlx5_dev_ctx_shared *sh = priv->sh; + uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES; +- uint8_t state; +- bool need_lock; ++ uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED); ++ poll_cq_t poll_mtr_cq = ++ is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : mlx5_aso_poll_cq_mtr_sws; + +- if (likely(sh->config.dv_flow_en == 2) && +- mtr->type == ASO_METER_INDIRECT) { +- if (queue == MLX5_HW_INV_QUEUE) { +- sq = &mtr->pool->sq[mtr->pool->nb_sq - 1]; +- need_lock = true; +- } else { +- sq = &mtr->pool->sq[queue]; +- need_lock = false; +- } +- } else { +- sq = &sh->mtrmng->pools_mng.sq; +- need_lock = true; +- } +- state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED); + if (state == ASO_METER_READY || state == ASO_METER_WAIT_ASYNC) + return 0; ++ sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock); + do { +- mlx5_aso_mtr_completion_handle(sq, need_lock); ++ poll_mtr_cq(priv, sq); + if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) == + ASO_METER_READY) + return 0; +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +index 115d730317..09c7068339 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +@@ -267,21 +267,41 @@ struct field_modify_info modify_tcp[] = { + {0, 0, 0}, + }; + +-static void ++enum mlx5_l3_tunnel_detection { ++ l3_tunnel_none, ++ l3_tunnel_outer, ++ l3_tunnel_inner ++}; ++ ++static enum mlx5_l3_tunnel_detection + mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, +- uint8_t next_protocol, uint64_t *item_flags, +- int *tunnel) ++ uint8_t next_protocol, uint64_t item_flags, ++ uint64_t *l3_tunnel_flag) + { ++ enum mlx5_l3_tunnel_detection td = l3_tunnel_none; ++ + MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 || + item->type == RTE_FLOW_ITEM_TYPE_IPV6); +- if (next_protocol == IPPROTO_IPIP) { +- *item_flags |= MLX5_FLOW_LAYER_IPIP; +- *tunnel = 1; +- } +- if (next_protocol == IPPROTO_IPV6) { +- *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP; +- *tunnel = 1; ++ if ((item_flags & MLX5_FLOW_LAYER_OUTER_L3) == 0) { ++ switch (next_protocol) { ++ case IPPROTO_IPIP: ++ td = l3_tunnel_outer; ++ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPIP; ++ break; ++ case IPPROTO_IPV6: ++ td = l3_tunnel_outer; ++ *l3_tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP; ++ break; ++ default: ++ break; ++ } ++ } else { ++ td = l3_tunnel_inner; ++ *l3_tunnel_flag = item->type == RTE_FLOW_ITEM_TYPE_IPV4 ? ++ MLX5_FLOW_LAYER_IPIP : ++ MLX5_FLOW_LAYER_IPV6_ENCAP; + } ++ return td; + } + + static inline struct mlx5_hlist * +@@ -1461,7 +1481,6 @@ mlx5_modify_flex_item(const struct rte_eth_dev *dev, + const struct mlx5_flex_pattern_field *map; + uint32_t offset = data->offset; + uint32_t width_left = width; +- uint32_t def; + uint32_t cur_width = 0; + uint32_t tmp_ofs; + uint32_t idx = 0; +@@ -1486,7 +1505,7 @@ mlx5_modify_flex_item(const struct rte_eth_dev *dev, + tmp_ofs = pos < data->offset ? data->offset - pos : 0; + for (j = i; i < flex->mapnum && width_left > 0; ) { + map = flex->map + i; +- id = mlx5_flex_get_sample_id(flex, i, &pos, false, &def); ++ id = mlx5_flex_get_sample_id(flex, i, &pos, false); + if (id == -1) { + i++; + /* All left length is dummy */ +@@ -1505,7 +1524,7 @@ mlx5_modify_flex_item(const struct rte_eth_dev *dev, + * 2. Width has been covered. + */ + for (j = i + 1; j < flex->mapnum; j++) { +- tmp_id = mlx5_flex_get_sample_id(flex, j, &pos, false, &def); ++ tmp_id = mlx5_flex_get_sample_id(flex, j, &pos, false); + if (tmp_id == -1) { + i = j; + pos -= flex->map[j].width; +@@ -1925,7 +1944,7 @@ mlx5_flow_field_id_to_modify_info + if (priv->sh->config.dv_flow_en == 2) + reg = flow_hw_get_reg_id(dev, + RTE_FLOW_ITEM_TYPE_TAG, +- data->level); ++ tag_index); + else + reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, + tag_index, error); +@@ -5484,13 +5503,6 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev, + &grp_info, error); + if (ret) + return ret; +- if (attributes->group == target_group && +- !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET | +- MLX5_FLOW_ACTION_TUNNEL_MATCH))) +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, NULL, +- "target group must be other than" +- " the current flow group"); + if (table == 0) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, +@@ -5952,7 +5964,7 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx) + "cannot allocate resource memory"); + return NULL; + } +- rte_memcpy(&entry->ft_type, ++ rte_memcpy(RTE_PTR_ADD(entry, offsetof(typeof(*entry), ft_type)), + RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), + key_len + data_len); + if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) +@@ -7062,11 +7074,13 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, + } + + static int +-validate_integrity_bits(const struct rte_flow_item_integrity *mask, ++validate_integrity_bits(const void *arg, + int64_t pattern_flags, uint64_t l3_flags, + uint64_t l4_flags, uint64_t ip4_flag, + struct rte_flow_error *error) + { ++ const struct rte_flow_item_integrity *mask = arg; ++ + if (mask->l3_ok && !(pattern_flags & l3_flags)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, +@@ -7255,6 +7269,40 @@ flow_dv_validate_item_flex(struct rte_eth_dev *dev, + return 0; + } + ++static __rte_always_inline uint8_t ++mlx5_flow_l3_next_protocol(const struct rte_flow_item *l3_item, ++ enum MLX5_SET_MATCHER key_type) ++{ ++#define MLX5_L3_NEXT_PROTOCOL(i, ms) \ ++ ((i)->type == RTE_FLOW_ITEM_TYPE_IPV4 ? \ ++ ((const struct rte_flow_item_ipv4 *)(i)->ms)->hdr.next_proto_id : \ ++ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6 ? \ ++ ((const struct rte_flow_item_ipv6 *)(i)->ms)->hdr.proto : \ ++ (i)->type == RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT ? \ ++ ((const struct rte_flow_item_ipv6_frag_ext *)(i)->ms)->hdr.next_header :\ ++ 0xff) ++ ++ uint8_t next_protocol; ++ ++ if (l3_item->mask != NULL && l3_item->spec != NULL) { ++ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask); ++ if (next_protocol) ++ next_protocol &= MLX5_L3_NEXT_PROTOCOL(l3_item, spec); ++ else ++ next_protocol = 0xff; ++ } else if (key_type == MLX5_SET_MATCHER_HS_M && l3_item->mask != NULL) { ++ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask); ++ } else if (key_type == MLX5_SET_MATCHER_HS_V && l3_item->spec != NULL) { ++ next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec); ++ } else { ++ /* Reset for inner layer. */ ++ next_protocol = 0xff; ++ } ++ return next_protocol; ++ ++#undef MLX5_L3_NEXT_PROTOCOL ++} ++ + /** + * Validate IB BTH item. + * +@@ -7451,6 +7499,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + return ret; + is_root = (uint64_t)ret; + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { ++ enum mlx5_l3_tunnel_detection l3_tunnel_detection; ++ uint64_t l3_tunnel_flag; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int type = items->type; + +@@ -7528,8 +7578,16 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + vlan_m = items->mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: +- mlx5_flow_tunnel_ip_check(items, next_protocol, +- &item_flags, &tunnel); ++ next_protocol = mlx5_flow_l3_next_protocol ++ (items, (enum MLX5_SET_MATCHER)-1); ++ l3_tunnel_detection = ++ mlx5_flow_tunnel_ip_check(items, next_protocol, ++ item_flags, ++ &l3_tunnel_flag); ++ if (l3_tunnel_detection == l3_tunnel_inner) { ++ item_flags |= l3_tunnel_flag; ++ tunnel = 1; ++ } + ret = flow_dv_validate_item_ipv4(dev, items, item_flags, + last_item, ether_type, + error); +@@ -7537,23 +7595,20 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; +- if (items->mask != NULL && +- ((const struct rte_flow_item_ipv4 *) +- items->mask)->hdr.next_proto_id) { +- next_protocol = +- ((const struct rte_flow_item_ipv4 *) +- (items->spec))->hdr.next_proto_id; +- next_protocol &= +- ((const struct rte_flow_item_ipv4 *) +- (items->mask))->hdr.next_proto_id; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ if (l3_tunnel_detection == l3_tunnel_outer) ++ item_flags |= l3_tunnel_flag; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: +- mlx5_flow_tunnel_ip_check(items, next_protocol, +- &item_flags, &tunnel); ++ next_protocol = mlx5_flow_l3_next_protocol ++ (items, (enum MLX5_SET_MATCHER)-1); ++ l3_tunnel_detection = ++ mlx5_flow_tunnel_ip_check(items, next_protocol, ++ item_flags, ++ &l3_tunnel_flag); ++ if (l3_tunnel_detection == l3_tunnel_inner) { ++ item_flags |= l3_tunnel_flag; ++ tunnel = 1; ++ } + ret = mlx5_flow_validate_item_ipv6(items, item_flags, + last_item, + ether_type, +@@ -7563,22 +7618,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + return ret; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; +- if (items->mask != NULL && +- ((const struct rte_flow_item_ipv6 *) +- items->mask)->hdr.proto) { +- item_ipv6_proto = +- ((const struct rte_flow_item_ipv6 *) +- items->spec)->hdr.proto; +- next_protocol = +- ((const struct rte_flow_item_ipv6 *) +- items->spec)->hdr.proto; +- next_protocol &= +- ((const struct rte_flow_item_ipv6 *) +- items->mask)->hdr.proto; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ if (l3_tunnel_detection == l3_tunnel_outer) ++ item_flags |= l3_tunnel_flag; + break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + ret = flow_dv_validate_item_ipv6_frag_ext(items, +@@ -7589,19 +7630,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + last_item = tunnel ? + MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; +- if (items->mask != NULL && +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->mask)->hdr.next_header) { +- next_protocol = +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->spec)->hdr.next_header; +- next_protocol &= +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->mask)->hdr.next_header; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ next_protocol = mlx5_flow_l3_next_protocol ++ (items, (enum MLX5_SET_MATCHER)-1); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + ret = mlx5_flow_validate_item_tcp +@@ -7819,6 +7849,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + tunnel != 0, error); + if (ret < 0) + return ret; ++ /* Reset for next proto, it is unknown. */ ++ next_protocol = 0xff; + break; + case RTE_FLOW_ITEM_TYPE_METER_COLOR: + ret = flow_dv_validate_item_meter_color(dev, items, +@@ -9444,22 +9476,23 @@ flow_dv_translate_item_gre(void *key, const struct rte_flow_item *item, + } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; + uint16_t protocol_m, protocol_v; + +- if (key_type & MLX5_SET_MATCHER_M) ++ if (key_type & MLX5_SET_MATCHER_M) { + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 0xff); +- else ++ if (!gre_m) ++ gre_m = &rte_flow_item_gre_mask; ++ gre_v = gre_m; ++ } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + IPPROTO_GRE); +- if (!gre_v) { +- gre_v = &empty_gre; +- gre_m = &empty_gre; +- } else { +- if (!gre_m) ++ if (!gre_v) { ++ gre_v = &empty_gre; ++ gre_m = &empty_gre; ++ } else if (!gre_m) { + gre_m = &rte_flow_item_gre_mask; ++ } ++ if (key_type == MLX5_SET_MATCHER_HS_V) ++ gre_m = gre_v; + } +- if (key_type & MLX5_SET_MATCHER_M) +- gre_v = gre_m; +- else if (key_type == MLX5_SET_MATCHER_HS_V) +- gre_m = gre_v; + gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); + gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); + MLX5_SET(fte_match_set_misc, misc_v, gre_c_present, +@@ -9985,14 +10018,13 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, + { + const struct rte_flow_item_geneve_opt *geneve_opt_m; + const struct rte_flow_item_geneve_opt *geneve_opt_v; +- const struct rte_flow_item_geneve_opt *geneve_opt_vv = item->spec; +- void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); ++ const struct rte_flow_item_geneve_opt *orig_spec = item->spec; + void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); + rte_be32_t opt_data_key = 0, opt_data_mask = 0; +- uint32_t *data; ++ size_t option_byte_len; + int ret = 0; + +- if (MLX5_ITEM_VALID(item, key_type)) ++ if (MLX5_ITEM_VALID(item, key_type) || !orig_spec) + return -1; + MLX5_ITEM_UPDATE(item, key_type, geneve_opt_v, geneve_opt_m, + &rte_flow_item_geneve_opt_mask); +@@ -10005,36 +10037,15 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, + return ret; + } + } +- /* +- * Set the option length in GENEVE header if not requested. +- * The GENEVE TLV option length is expressed by the option length field +- * in the GENEVE header. +- * If the option length was not requested but the GENEVE TLV option item +- * is present we set the option length field implicitly. +- */ +- if (!MLX5_GET16(fte_match_set_misc, misc_v, geneve_opt_len)) { +- if (key_type & MLX5_SET_MATCHER_M) +- MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, +- MLX5_GENEVE_OPTLEN_MASK); +- else +- MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, +- geneve_opt_v->option_len + 1); +- } +- /* Set the data. */ +- if (key_type == MLX5_SET_MATCHER_SW_V) +- data = geneve_opt_vv->data; +- else +- data = geneve_opt_v->data; +- if (data) { +- memcpy(&opt_data_key, data, +- RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), +- sizeof(opt_data_key))); +- memcpy(&opt_data_mask, geneve_opt_m->data, +- RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), +- sizeof(opt_data_mask))); ++ /* Convert the option length from DW to bytes for using memcpy. */ ++ option_byte_len = RTE_MIN((size_t)(orig_spec->option_len * 4), ++ sizeof(rte_be32_t)); ++ if (geneve_opt_v->data) { ++ memcpy(&opt_data_key, geneve_opt_v->data, option_byte_len); ++ memcpy(&opt_data_mask, geneve_opt_m->data, option_byte_len); + MLX5_SET(fte_match_set_misc3, misc3_v, +- geneve_tlv_option_0_data, +- rte_be_to_cpu_32(opt_data_key & opt_data_mask)); ++ geneve_tlv_option_0_data, ++ rte_be_to_cpu_32(opt_data_key & opt_data_mask)); + } + return ret; + } +@@ -13658,6 +13669,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev, + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Connection is not supported"); ++ if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "CT supports port indexes up to " ++ RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT)); ++ return 0; ++ } + idx = flow_dv_aso_ct_alloc(dev, error); + if (!idx) + return rte_flow_error_set(error, rte_errno, +@@ -13707,6 +13725,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev, + int tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL); + int item_type = items->type; + uint64_t last_item = wks->last_item; ++ enum mlx5_l3_tunnel_detection l3_tunnel_detection; ++ uint64_t l3_tunnel_flag; + int ret; + + switch (item_type) { +@@ -13750,94 +13770,47 @@ flow_dv_translate_items(struct rte_eth_dev *dev, + MLX5_FLOW_LAYER_OUTER_VLAN); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: +- mlx5_flow_tunnel_ip_check(items, next_protocol, +- &wks->item_flags, &tunnel); ++ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); ++ l3_tunnel_detection = ++ mlx5_flow_tunnel_ip_check(items, next_protocol, ++ wks->item_flags, ++ &l3_tunnel_flag); ++ if (l3_tunnel_detection == l3_tunnel_inner) { ++ wks->item_flags |= l3_tunnel_flag; ++ tunnel = 1; ++ } + flow_dv_translate_item_ipv4(key, items, tunnel, + wks->group, key_type); + wks->priority = MLX5_PRIORITY_MAP_L3; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; +- if (items->mask != NULL && +- items->spec != NULL && +- ((const struct rte_flow_item_ipv4 *) +- items->mask)->hdr.next_proto_id) { +- next_protocol = +- ((const struct rte_flow_item_ipv4 *) +- (items->spec))->hdr.next_proto_id; +- next_protocol &= +- ((const struct rte_flow_item_ipv4 *) +- (items->mask))->hdr.next_proto_id; +- } else if (key_type == MLX5_SET_MATCHER_HS_M && +- items->mask != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv4 *) +- (items->mask))->hdr.next_proto_id; +- } else if (key_type == MLX5_SET_MATCHER_HS_V && +- items->spec != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv4 *) +- (items->spec))->hdr.next_proto_id; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ if (l3_tunnel_detection == l3_tunnel_outer) ++ wks->item_flags |= l3_tunnel_flag; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: +- mlx5_flow_tunnel_ip_check(items, next_protocol, +- &wks->item_flags, &tunnel); ++ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); ++ l3_tunnel_detection = ++ mlx5_flow_tunnel_ip_check(items, next_protocol, ++ wks->item_flags, ++ &l3_tunnel_flag); ++ if (l3_tunnel_detection == l3_tunnel_inner) { ++ wks->item_flags |= l3_tunnel_flag; ++ tunnel = 1; ++ } + flow_dv_translate_item_ipv6(key, items, tunnel, + wks->group, key_type); + wks->priority = MLX5_PRIORITY_MAP_L3; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; +- if (items->mask != NULL && +- items->spec != NULL && +- ((const struct rte_flow_item_ipv6 *) +- items->mask)->hdr.proto) { +- next_protocol = +- ((const struct rte_flow_item_ipv6 *) +- items->spec)->hdr.proto; +- next_protocol &= +- ((const struct rte_flow_item_ipv6 *) +- items->mask)->hdr.proto; +- } else if (key_type == MLX5_SET_MATCHER_HS_M && +- items->mask != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv6 *) +- (items->mask))->hdr.proto; +- } else if (key_type == MLX5_SET_MATCHER_HS_V && +- items->spec != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv6 *) +- (items->spec))->hdr.proto; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ if (l3_tunnel_detection == l3_tunnel_outer) ++ wks->item_flags |= l3_tunnel_flag; + break; + case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: + flow_dv_translate_item_ipv6_frag_ext + (key, items, tunnel, key_type); + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : + MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; +- if (items->mask != NULL && +- items->spec != NULL && +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->mask)->hdr.next_header) { +- next_protocol = +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->spec)->hdr.next_header; +- next_protocol &= +- ((const struct rte_flow_item_ipv6_frag_ext *) +- items->mask)->hdr.next_header; +- } else if (key_type == MLX5_SET_MATCHER_HS_M && +- items->mask != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *) +- (items->mask))->hdr.next_header; +- } else if (key_type == MLX5_SET_MATCHER_HS_V && +- items->spec != NULL) { +- next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *) +- (items->spec))->hdr.next_header; +- } else { +- /* Reset for inner layer. */ +- next_protocol = 0xff; +- } ++ next_protocol = mlx5_flow_l3_next_protocol(items, key_type); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + flow_dv_translate_item_tcp(key, items, tunnel, key_type); +@@ -14280,7 +14253,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, + * Avoid be overwritten by other sub mlx5_flows. + */ + if (wks.geneve_tlv_option) +- dev_flow->flow->geneve_tlv_option = wks.geneve_tlv_option; ++ dev_flow->flow->geneve_tlv_option += wks.geneve_tlv_option; + return 0; + } + +@@ -15420,7 +15393,8 @@ error: + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, dh, next) { + /* hrxq is union, don't clear it if the flag is not set. */ +- if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) { ++ if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq && ++ !dh->dvh.rix_sample && !dh->dvh.rix_dest_array) { + mlx5_hrxq_release(dev, dh->rix_hrxq); + dh->rix_hrxq = 0; + } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { +@@ -15884,9 +15858,9 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) + flow_dv_aso_ct_release(dev, flow->ct, NULL); + else if (flow->age) + flow_dv_aso_age_release(dev, flow->age); +- if (flow->geneve_tlv_option) { ++ while (flow->geneve_tlv_option) { + flow_dev_geneve_tlv_option_resource_release(priv->sh); +- flow->geneve_tlv_option = 0; ++ flow->geneve_tlv_option--; + } + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; +@@ -16350,6 +16324,8 @@ flow_dv_action_create(struct rte_eth_dev *dev, + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + ret = flow_dv_translate_create_conntrack(dev, action->conf, + err); ++ if (!ret) ++ break; + idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret); + break; + default: +@@ -17675,9 +17651,8 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, + } + } + tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl); +- if (priority < RTE_COLOR_RED) +- flow_dv_match_meta_reg(matcher.mask.buf, +- (enum modify_reg)color_reg_c_idx, color_mask, color_mask); ++ flow_dv_match_meta_reg(matcher.mask.buf, ++ (enum modify_reg)color_reg_c_idx, color_mask, color_mask); + matcher.priority = priority; + matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, + matcher.mask.size); +@@ -17711,7 +17686,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, + static int + __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + struct mlx5_flow_meter_sub_policy *sub_policy, +- uint8_t egress, uint8_t transfer, bool match_src_port, ++ uint8_t egress, uint8_t transfer, bool *match_src_port, + struct mlx5_meter_policy_acts acts[RTE_COLORS]) + { + struct mlx5_priv *priv = dev->data->dev_private; +@@ -17726,9 +17701,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + .reserved = 0, + }; + int i; ++ uint16_t priority; + int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err); + struct mlx5_sub_policy_color_rule *color_rule; +- bool svport_match; + struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL}; + + if (ret < 0) +@@ -17761,13 +17736,12 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + TAILQ_INSERT_TAIL(&sub_policy->color_rules[i], + color_rule, next_port); + color_rule->src_port = priv->representor_id; +- /* No use. */ +- attr.priority = i; ++ priority = (match_src_port[i] == match_src_port[RTE_COLOR_GREEN]) ? ++ MLX5_MTR_POLICY_MATCHER_PRIO : (MLX5_MTR_POLICY_MATCHER_PRIO + 1); + /* Create matchers for colors. */ +- svport_match = (i != RTE_COLOR_RED) ? match_src_port : false; + if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx, +- MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy, +- &attr, svport_match, NULL, ++ priority, sub_policy, ++ &attr, match_src_port[i], NULL, + &color_rule->matcher, &flow_err)) { + DRV_LOG(ERR, "Failed to create color%u matcher.", i); + goto err_exit; +@@ -17777,7 +17751,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, + color_reg_c_idx, (enum rte_color)i, + color_rule->matcher, + acts[i].actions_n, acts[i].dv_actions, +- svport_match, NULL, &color_rule->rule, ++ match_src_port[i], NULL, &color_rule->rule, + &attr)) { + DRV_LOG(ERR, "Failed to create color%u rule.", i); + goto err_exit; +@@ -17825,7 +17799,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, + uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; + uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0; + bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX); +- bool match_src_port = false; ++ bool match_src_port[RTE_COLORS] = {false}; + int i; + + /* If RSS or Queue, no previous actions / rules is created. */ +@@ -17896,7 +17870,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, + acts[i].dv_actions[acts[i].actions_n] = + port_action->action; + acts[i].actions_n++; +- match_src_port = true; ++ match_src_port[i] = true; + break; + case MLX5_FLOW_FATE_DROP: + case MLX5_FLOW_FATE_JUMP: +@@ -17948,7 +17922,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, + acts[i].dv_actions[acts[i].actions_n++] = + tbl_data->jump.action; + if (mtr_policy->act_cnt[i].modify_hdr) +- match_src_port = !!transfer; ++ match_src_port[i] = !!transfer; + break; + default: + /*Queue action do nothing*/ +@@ -17962,9 +17936,9 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, + "Failed to create policy rules per domain."); + goto err_exit; + } +- if (match_src_port) { +- mtr_policy->match_port = match_src_port; +- mtr_policy->hierarchy_match_port = match_src_port; ++ if (match_src_port[RTE_COLOR_GREEN] || match_src_port[RTE_COLOR_YELLOW]) { ++ mtr_policy->match_port = 1; ++ mtr_policy->hierarchy_match_port = 1; + } + return 0; + err_exit: +@@ -18026,6 +18000,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) + uint8_t egress, transfer; + struct rte_flow_error error; + struct mlx5_meter_policy_acts acts[RTE_COLORS]; ++ bool match_src_port[RTE_COLORS] = {false}; + int ret; + + egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; +@@ -18101,7 +18076,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) + /* Create default policy rules. */ + ret = __flow_dv_create_domain_policy_rules(dev, + &def_policy->sub_policy, +- egress, transfer, false, acts); ++ egress, transfer, match_src_port, acts); + if (ret) { + DRV_LOG(ERR, "Failed to create default policy rules."); + goto def_policy_error; +@@ -18660,7 +18635,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + struct { + struct mlx5_flow_meter_policy *fm_policy; + struct mlx5_flow_meter_info *next_fm; +- struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS]; ++ struct mlx5_sub_policy_color_rule *tag_rule[RTE_COLORS]; + } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} }; + uint32_t fm_cnt = 0; + uint32_t i, j; +@@ -18694,14 +18669,22 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + mtr_policy = fm_info[i].fm_policy; + rte_spinlock_lock(&mtr_policy->sl); + sub_policy = mtr_policy->sub_policys[domain][0]; +- for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) { ++ for (j = 0; j < RTE_COLORS; j++) { + uint8_t act_n = 0; +- struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; ++ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr = NULL; + struct mlx5_flow_dv_port_id_action_resource *port_action; ++ uint8_t fate_action; + +- if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR && +- mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_PORT_ID) +- continue; ++ if (j == RTE_COLOR_RED) { ++ fate_action = MLX5_FLOW_FATE_DROP; ++ } else { ++ fate_action = mtr_policy->act_cnt[j].fate_action; ++ modify_hdr = mtr_policy->act_cnt[j].modify_hdr; ++ if (fate_action != MLX5_FLOW_FATE_MTR && ++ fate_action != MLX5_FLOW_FATE_PORT_ID && ++ fate_action != MLX5_FLOW_FATE_DROP) ++ continue; ++ } + color_rule = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(struct mlx5_sub_policy_color_rule), + 0, SOCKET_ID_ANY); +@@ -18713,9 +18696,8 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + goto err_exit; + } + color_rule->src_port = src_port; +- modify_hdr = mtr_policy->act_cnt[j].modify_hdr; + /* Prepare to create color rule. */ +- if (mtr_policy->act_cnt[j].fate_action == MLX5_FLOW_FATE_MTR) { ++ if (fate_action == MLX5_FLOW_FATE_MTR) { + next_fm = fm_info[i].next_fm; + if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) { + mlx5_free(color_rule); +@@ -18742,7 +18724,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + } + acts.dv_actions[act_n++] = tbl_data->jump.action; + acts.actions_n = act_n; +- } else { ++ } else if (fate_action == MLX5_FLOW_FATE_PORT_ID) { + port_action = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], + mtr_policy->act_cnt[j].rix_port_id_action); +@@ -18755,6 +18737,9 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, + acts.dv_actions[act_n++] = modify_hdr->action; + acts.dv_actions[act_n++] = port_action->action; + acts.actions_n = act_n; ++ } else { ++ acts.dv_actions[act_n++] = mtr_policy->dr_drop_action[domain]; ++ acts.actions_n = act_n; + } + fm_info[i].tag_rule[j] = color_rule; + TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port); +@@ -18786,7 +18771,7 @@ err_exit: + mtr_policy = fm_info[i].fm_policy; + rte_spinlock_lock(&mtr_policy->sl); + sub_policy = mtr_policy->sub_policys[domain][0]; +- for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) { ++ for (j = 0; j < RTE_COLORS; j++) { + color_rule = fm_info[i].tag_rule[j]; + if (!color_rule) + continue; +@@ -19116,8 +19101,7 @@ flow_dv_get_aged_flows(struct rte_eth_dev *dev, + LIST_FOREACH(act, &age_info->aged_aso, next) { + nb_flows++; + if (nb_contexts) { +- context[nb_flows - 1] = +- act->age_params.context; ++ context[nb_flows - 1] = act->age_params.context; + if (!(--nb_contexts)) + break; + } +@@ -19675,11 +19659,13 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, + } + } + if (next_mtr && *policy_mode == MLX5_MTR_POLICY_MODE_ALL) { +- if (!(action_flags[RTE_COLOR_GREEN] & action_flags[RTE_COLOR_YELLOW] & +- MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)) ++ uint64_t hierarchy_type_flag = ++ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | MLX5_FLOW_ACTION_JUMP; ++ if (!(action_flags[RTE_COLOR_GREEN] & hierarchy_type_flag) || ++ !(action_flags[RTE_COLOR_YELLOW] & hierarchy_type_flag)) + return -rte_mtr_error_set(error, EINVAL, RTE_MTR_ERROR_TYPE_METER_POLICY, + NULL, +- "Meter hierarchy supports meter action only."); ++ "Unsupported action in meter hierarchy."); + } + /* If both colors have RSS, the attributes should be the same. */ + if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN], +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_flex.c b/dpdk/drivers/net/mlx5/mlx5_flow_flex.c +index 4ae03a23f1..58d8c61443 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_flex.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_flex.c +@@ -118,28 +118,32 @@ mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item, + uint32_t pos, uint32_t width, uint32_t shift) + { + const uint8_t *ptr = item->pattern + pos / CHAR_BIT; +- uint32_t val, vbits; ++ uint32_t val, vbits, skip = pos % CHAR_BIT; + + /* Proceed the bitfield start byte. */ + MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width); + MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT); + if (item->length <= pos / CHAR_BIT) + return 0; +- val = *ptr++ >> (pos % CHAR_BIT); ++ /* Bits are enumerated in byte in network order: 01234567 */ ++ val = *ptr++; + vbits = CHAR_BIT - pos % CHAR_BIT; +- pos = (pos + vbits) / CHAR_BIT; ++ pos = RTE_ALIGN_CEIL(pos, CHAR_BIT) / CHAR_BIT; + vbits = RTE_MIN(vbits, width); +- val &= RTE_BIT32(vbits) - 1; ++ /* Load bytes to cover the field width, checking pattern boundary */ + while (vbits < width && pos < item->length) { + uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT); + uint32_t tmp = *ptr++; + +- pos++; +- tmp &= RTE_BIT32(part) - 1; +- val |= tmp << vbits; ++ val |= tmp << RTE_ALIGN_CEIL(vbits, CHAR_BIT); + vbits += part; ++ pos++; + } +- return rte_bswap32(val <<= shift); ++ val = rte_cpu_to_be_32(val); ++ val <<= skip; ++ val >>= shift; ++ val &= (RTE_BIT64(width) - 1) << (sizeof(uint32_t) * CHAR_BIT - shift - width); ++ return val; + } + + #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \ +@@ -211,21 +215,17 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v, + * Where to search the value and mask. + * @param[in] is_inner + * For inner matching or not. +- * @param[in, def] def +- * Mask generated by mapping shift and width. + * + * @return + * 0 on success, -1 to ignore. + */ + int + mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp, +- uint32_t idx, uint32_t *pos, +- bool is_inner, uint32_t *def) ++ uint32_t idx, uint32_t *pos, bool is_inner) + { + const struct mlx5_flex_pattern_field *map = tp->map + idx; + uint32_t id = map->reg_id; + +- *def = (RTE_BIT64(map->width) - 1) << map->shift; + /* Skip placeholders for DUMMY fields. */ + if (id == MLX5_INVALID_SAMPLE_REG_ID) { + *pos += map->width; +@@ -252,8 +252,6 @@ mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp, + * Mlx5 flex item sample mapping handle. + * @param[in] byte_off + * Mlx5 flex item format_select_dw. +- * @param[in] is_mask +- * Spec or mask. + * @param[in] tunnel + * Tunnel mode or not. + * @param[in, def] value +@@ -265,25 +263,23 @@ mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp, + int + mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item, + void *flex, uint32_t byte_off, +- bool is_mask, bool tunnel, uint32_t *value) ++ bool tunnel, uint32_t *value) + { + struct mlx5_flex_pattern_field *map; + struct mlx5_flex_item *tp = flex; +- uint32_t def, i, pos, val; ++ uint32_t i, pos, val; + int id; + + *value = 0; + for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) { + map = tp->map + i; +- id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel, &def); ++ id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel); + if (id == -1) + continue; + if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM) + return -1; + if (byte_off == tp->devx_fp->sample_info[id].sample_dw_data * sizeof(uint32_t)) { + val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift); +- if (is_mask) +- val &= RTE_BE32(def); + *value |= val; + } + pos += map->width; +@@ -291,6 +287,33 @@ mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item, + return 0; + } + ++/** ++ * Get the flex parser tunnel mode. ++ * ++ * @param[in] item ++ * RTE Flex item. ++ * @param[in, out] tunnel_mode ++ * Pointer to return tunnel mode. ++ * ++ * @return ++ * 0 on success, otherwise negative error code. ++ */ ++int ++mlx5_flex_get_tunnel_mode(const struct rte_flow_item *item, ++ enum rte_flow_item_flex_tunnel_mode *tunnel_mode) ++{ ++ if (item && item->spec && tunnel_mode) { ++ const struct rte_flow_item_flex *spec = item->spec; ++ struct mlx5_flex_item *flex = (struct mlx5_flex_item *)spec->handle; ++ ++ if (flex) { ++ *tunnel_mode = flex->tunnel_mode; ++ return 0; ++ } ++ } ++ return -EINVAL; ++} ++ + /** + * Translate item pattern into matcher fields according to translation + * array. +@@ -328,10 +351,10 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, + spec = item->spec; + mask = item->mask; + tp = (struct mlx5_flex_item *)spec->handle; +- for (i = 0; i < tp->mapnum; i++) { ++ for (i = 0; i < tp->mapnum && pos < (spec->length * CHAR_BIT); i++) { + struct mlx5_flex_pattern_field *map = tp->map + i; + uint32_t val, msk, def; +- int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def); ++ int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner); + + if (id == -1) + continue; +@@ -339,11 +362,14 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, + if (id >= (int)tp->devx_fp->num_samples || + id >= MLX5_GRAPH_NODE_SAMPLE_NUM) + return; ++ def = (uint32_t)(RTE_BIT64(map->width) - 1); ++ def <<= (sizeof(uint32_t) * CHAR_BIT - map->shift - map->width); + val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift); +- msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift); ++ msk = pos < (mask->length * CHAR_BIT) ? ++ mlx5_flex_get_bitfield(mask, pos, map->width, map->shift) : def; + sample_id = tp->devx_fp->sample_ids[id]; + mlx5_flex_set_match_sample(misc4_m, misc4_v, +- def, msk & def, val & msk & def, ++ def, msk, val & msk, + sample_id, id); + pos += map->width; + } +@@ -423,12 +449,14 @@ mlx5_flex_release_index(struct rte_eth_dev *dev, + * + * shift mask + * ------- --------------- +- * 0 b111100 0x3C +- * 1 b111110 0x3E +- * 2 b111111 0x3F +- * 3 b011111 0x1F +- * 4 b001111 0x0F +- * 5 b000111 0x07 ++ * 0 b11111100 0x3C ++ * 1 b01111110 0x3E ++ * 2 b00111111 0x3F ++ * 3 b00011111 0x1F ++ * 4 b00001111 0x0F ++ * 5 b00000111 0x07 ++ * 6 b00000011 0x03 ++ * 7 b00000001 0x01 + */ + static uint8_t + mlx5_flex_hdr_len_mask(uint8_t shift, +@@ -438,8 +466,7 @@ mlx5_flex_hdr_len_mask(uint8_t shift, + int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD; + + base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr); +- return diff == 0 ? base_mask : +- diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff; ++ return diff < 0 ? base_mask << -diff : base_mask >> diff; + } + + static int +@@ -450,7 +477,6 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, + { + const struct rte_flow_item_flex_field *field = &conf->next_header; + struct mlx5_devx_graph_node_attr *node = &devx->devx_conf; +- uint32_t len_width, mask; + + if (field->field_base % CHAR_BIT) + return rte_flow_error_set +@@ -478,7 +504,14 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, + "negative header length field base (FIXED)"); + node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; + break; +- case FIELD_MODE_OFFSET: ++ case FIELD_MODE_OFFSET: { ++ uint32_t msb, lsb; ++ int32_t shift = field->offset_shift; ++ uint32_t offset = field->offset_base; ++ uint32_t mask = field->offset_mask; ++ uint32_t wmax = attr->header_length_mask_width + ++ MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD; ++ + if (!(attr->header_length_mode & + RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD))) + return rte_flow_error_set +@@ -488,47 +521,73 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "field size is a must for offset mode"); +- if (field->field_size + field->offset_base < attr->header_length_mask_width) ++ if ((offset ^ (field->field_size + offset)) >> 5) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "field size plus offset_base is too small"); +- node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD; +- if (field->offset_mask == 0 || +- !rte_is_power_of_2(field->offset_mask + 1)) ++ "field crosses the 32-bit word boundary"); ++ /* Hardware counts in dwords, all shifts done by offset within mask */ ++ if (shift < 0 || (uint32_t)shift >= wmax) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "header length field shift exceeds limits (OFFSET)"); ++ if (!mask) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "invalid length field offset mask (OFFSET)"); +- len_width = rte_fls_u32(field->offset_mask); +- if (len_width > attr->header_length_mask_width) ++ "zero length field offset mask (OFFSET)"); ++ msb = rte_fls_u32(mask) - 1; ++ lsb = rte_bsf32(mask); ++ if (!rte_is_power_of_2((mask >> lsb) + 1)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "length field offset mask too wide (OFFSET)"); +- mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr); +- if (mask < field->offset_mask) ++ "length field offset mask not contiguous (OFFSET)"); ++ if (msb >= field->field_size) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "length field shift too big (OFFSET)"); +- node->header_length_field_mask = RTE_MIN(mask, +- field->offset_mask); ++ "length field offset mask exceeds field size (OFFSET)"); ++ if (msb >= wmax) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "length field offset mask exceeds supported width (OFFSET)"); ++ if (mask & ~mlx5_flex_hdr_len_mask(shift, attr)) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "mask and shift combination not supported (OFFSET)"); ++ msb++; ++ offset += field->field_size - msb; ++ if (msb < attr->header_length_mask_width) { ++ if (attr->header_length_mask_width - msb > offset) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "field size plus offset_base is too small"); ++ offset += msb; ++ /* ++ * Here we can move to preceding dword. Hardware does ++ * cyclic left shift so we should avoid this and stay ++ * at current dword offset. ++ */ ++ offset = (offset & ~0x1Fu) | ++ ((offset - attr->header_length_mask_width) & 0x1F); ++ } ++ node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD; ++ node->header_length_field_mask = mask; ++ node->header_length_field_shift = shift; ++ node->header_length_field_offset = offset; + break; ++ } + case FIELD_MODE_BITMASK: + if (!(attr->header_length_mode & + RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK))) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "unsupported header length field mode (BITMASK)"); +- if (attr->header_length_mask_width < field->field_size) ++ if (field->offset_shift > 15 || field->offset_shift < 0) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "header length field width exceeds limit"); ++ "header length field shift exceeds limit (BITMASK)"); + node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK; +- mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr); +- if (mask < field->offset_mask) +- return rte_flow_error_set +- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "length field shift too big (BITMASK)"); +- node->header_length_field_mask = RTE_MIN(mask, +- field->offset_mask); ++ node->header_length_field_mask = field->offset_mask; ++ node->header_length_field_shift = field->offset_shift; ++ node->header_length_field_offset = field->offset_base; + break; + default: + return rte_flow_error_set +@@ -541,27 +600,6 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "header length field base exceeds limit"); + node->header_length_base_value = field->field_base / CHAR_BIT; +- if (field->field_mode == FIELD_MODE_OFFSET || +- field->field_mode == FIELD_MODE_BITMASK) { +- if (field->offset_shift > 15 || field->offset_shift < 0) +- return rte_flow_error_set +- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "header length field shift exceeds limit"); +- node->header_length_field_shift = field->offset_shift; +- node->header_length_field_offset = field->offset_base; +- } +- if (field->field_mode == FIELD_MODE_OFFSET) { +- if (field->field_size > attr->header_length_mask_width) { +- node->header_length_field_offset += +- field->field_size - attr->header_length_mask_width; +- } else if (field->field_size < attr->header_length_mask_width) { +- node->header_length_field_offset -= +- attr->header_length_mask_width - field->field_size; +- node->header_length_field_mask = +- RTE_MIN(node->header_length_field_mask, +- (1u << field->field_size) - 1); +- } +- } + return 0; + } + +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c +index da873ae2e2..3dc26d5a0b 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c +@@ -104,12 +104,40 @@ struct mlx5_tbl_multi_pattern_ctx { + + #define MLX5_EMPTY_MULTI_PATTERN_CTX {{{0,}},} + ++static __rte_always_inline struct mlx5_hw_q_job * ++flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, ++ const struct rte_flow_action_handle *handle, ++ void *user_data, void *query_data, ++ enum mlx5_hw_job_type type, ++ enum mlx5_hw_indirect_type indirect_type, ++ struct rte_flow_error *error); ++static void ++flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow, ++ struct rte_flow_error *error); ++ + static int + mlx5_tbl_multi_pattern_process(struct rte_eth_dev *dev, + struct rte_flow_template_table *tbl, + struct mlx5_tbl_multi_pattern_ctx *mpat, + struct rte_flow_error *error); + ++static __rte_always_inline enum mlx5_indirect_list_type ++flow_hw_inlist_type_get(const struct rte_flow_action *actions); ++ ++static bool ++mlx5_hw_ctx_validate(const struct rte_eth_dev *dev, struct rte_flow_error *error) ++{ ++ const struct mlx5_priv *priv = dev->data->dev_private; ++ ++ if (!priv->dr_ctx) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "non-template flow engine was not configured"); ++ return false; ++ } ++ return true; ++} ++ + static __rte_always_inline int + mlx5_multi_pattern_reformat_to_index(enum mlx5dr_action_type type) + { +@@ -274,21 +302,6 @@ static const struct rte_flow_item_eth ctrl_rx_eth_bcast_spec = { + .hdr.ether_type = 0, + }; + +-static __rte_always_inline struct mlx5_hw_q_job * +-flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue) +-{ +- MLX5_ASSERT(priv->hw_q[queue].job_idx <= priv->hw_q[queue].size); +- return priv->hw_q[queue].job_idx ? +- priv->hw_q[queue].job[--priv->hw_q[queue].job_idx] : NULL; +-} +- +-static __rte_always_inline void +-flow_hw_job_put(struct mlx5_priv *priv, struct mlx5_hw_q_job *job, uint32_t queue) +-{ +- MLX5_ASSERT(priv->hw_q[queue].job_idx < priv->hw_q[queue].size); +- priv->hw_q[queue].job[priv->hw_q[queue].job_idx++] = job; +-} +- + static inline enum mlx5dr_matcher_insert_mode + flow_hw_matcher_insert_mode_get(enum rte_flow_table_insertion_type insert_type) + { +@@ -381,6 +394,7 @@ flow_hw_matching_item_flags_get(const struct rte_flow_item items[]) + uint64_t last_item = 0; + + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { ++ enum rte_flow_item_flex_tunnel_mode tunnel_mode = FLEX_TUNNEL_MODE_SINGLE; + int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + int item_type = items->type; + +@@ -426,6 +440,13 @@ flow_hw_matching_item_flags_get(const struct rte_flow_item items[]) + case RTE_FLOW_ITEM_TYPE_GTP: + last_item = MLX5_FLOW_LAYER_GTP; + break; ++ break; ++ case RTE_FLOW_ITEM_TYPE_FLEX: ++ mlx5_flex_get_tunnel_mode(items, &tunnel_mode); ++ last_item = tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL ? ++ MLX5_FLOW_ITEM_FLEX_TUNNEL : ++ tunnel ? MLX5_FLOW_ITEM_INNER_FLEX : ++ MLX5_FLOW_ITEM_OUTER_FLEX; + default: + break; + } +@@ -1010,15 +1031,19 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev, + if (!shared_rss || __flow_hw_act_data_shared_rss_append + (priv, acts, + (enum rte_flow_action_type)MLX5_RTE_FLOW_ACTION_TYPE_RSS, +- action_src, action_dst, idx, shared_rss)) ++ action_src, action_dst, idx, shared_rss)) { ++ DRV_LOG(WARNING, "Indirect RSS action index %d translate failed", act_idx); + return -1; ++ } + break; + case MLX5_INDIRECT_ACTION_TYPE_COUNT: + if (__flow_hw_act_data_shared_cnt_append(priv, acts, + (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_COUNT, +- action_src, action_dst, act_idx)) ++ action_src, action_dst, act_idx)) { ++ DRV_LOG(WARNING, "Indirect count action translate failed"); + return -1; ++ } + break; + case MLX5_INDIRECT_ACTION_TYPE_AGE: + /* Not supported, prevent by validate function. */ +@@ -1026,15 +1051,19 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev, + break; + case MLX5_INDIRECT_ACTION_TYPE_CT: + if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, +- idx, &acts->rule_acts[action_dst])) ++ idx, &acts->rule_acts[action_dst])) { ++ DRV_LOG(WARNING, "Indirect CT action translate failed"); + return -1; ++ } + break; + case MLX5_INDIRECT_ACTION_TYPE_METER_MARK: + if (__flow_hw_act_data_shared_mtr_append(priv, acts, + (enum rte_flow_action_type) + MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK, +- action_src, action_dst, idx)) ++ action_src, action_dst, idx)) { ++ DRV_LOG(WARNING, "Indirect meter mark action translate failed"); + return -1; ++ } + break; + case MLX5_INDIRECT_ACTION_TYPE_QUOTA: + flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx); +@@ -1455,7 +1484,7 @@ flow_hw_meter_compile(struct rte_eth_dev *dev, + acts->rule_acts[jump_pos].action = (!!group) ? + acts->jump->hws_action : + acts->jump->root_action; +- if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) ++ if (mlx5_aso_mtr_wait(priv, aso_mtr, true)) + return -ENOMEM; + return 0; + } +@@ -1532,7 +1561,7 @@ static rte_be32_t vlan_hdr_to_be32(const struct rte_flow_action *actions) + static __rte_always_inline struct mlx5_aso_mtr * + flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_action *action, +- void *user_data, bool push) ++ struct mlx5_hw_q_job *job, bool push) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; +@@ -1540,6 +1569,8 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, + struct mlx5_aso_mtr *aso_mtr; + struct mlx5_flow_meter_info *fm; + uint32_t mtr_id; ++ uintptr_t handle = (uintptr_t)MLX5_INDIRECT_ACTION_TYPE_METER_MARK << ++ MLX5_INDIRECT_ACTION_TYPE_OFFSET; + + if (meter_mark->profile == NULL) + return NULL; +@@ -1558,15 +1589,16 @@ flow_hw_meter_mark_alloc(struct rte_eth_dev *dev, uint32_t queue, + ASO_METER_WAIT : ASO_METER_WAIT_ASYNC; + aso_mtr->offset = mtr_id - 1; + aso_mtr->init_color = fm->color_aware ? RTE_COLORS : RTE_COLOR_GREEN; ++ job->action = (void *)(handle | mtr_id); + /* Update ASO flow meter by wqe. */ +- if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr, +- &priv->mtr_bulk, user_data, push)) { ++ if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr, ++ &priv->mtr_bulk, job, push)) { + mlx5_ipool_free(pool->idx_pool, mtr_id); + return NULL; + } + /* Wait for ASO object completion. */ + if (queue == MLX5_HW_INV_QUEUE && +- mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) { ++ mlx5_aso_mtr_wait(priv, aso_mtr, true)) { + mlx5_ipool_free(pool->idx_pool, mtr_id); + return NULL; + } +@@ -1584,10 +1616,18 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev, + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; + struct mlx5_aso_mtr *aso_mtr; ++ struct mlx5_hw_q_job *job = ++ flow_hw_action_job_init(priv, queue, NULL, NULL, NULL, ++ MLX5_HW_Q_JOB_TYPE_CREATE, ++ MLX5_HW_INDIRECT_TYPE_LEGACY, NULL); + +- aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, NULL, true); +- if (!aso_mtr) ++ if (!job) + return -1; ++ aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, true); ++ if (!aso_mtr) { ++ flow_hw_job_put(priv, job, queue); ++ return -1; ++ } + + /* Compile METER_MARK action */ + acts[aso_mtr_pos].action = pool->action; +@@ -1722,15 +1762,9 @@ flow_hw_translate_indirect_meter(struct rte_eth_dev *dev, + const struct rte_flow_indirect_update_flow_meter_mark **flow_conf = + (typeof(flow_conf))action_conf->conf; + +- /* +- * Masked indirect handle set dr5 action during template table +- * translation. +- */ +- if (!dr_rule->action) { +- ret = flow_dr_set_meter(priv, dr_rule, action_conf); +- if (ret) +- return ret; +- } ++ ret = flow_dr_set_meter(priv, dr_rule, action_conf); ++ if (ret) ++ return ret; + if (!act_data->shared_meter.conf_masked) { + if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS) + flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color); +@@ -2512,6 +2546,9 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev, + } + return 0; + err: ++ /* If rte_errno was not initialized and reached error state. */ ++ if (!rte_errno) ++ rte_errno = EINVAL; + err = rte_errno; + __flow_hw_action_template_destroy(dev, acts); + return rte_flow_error_set(error, err, +@@ -2865,6 +2902,30 @@ flow_hw_modify_field_construct(struct mlx5_hw_q_job *job, + return 0; + } + ++/** ++ * Release any actions allocated for the flow rule during actions construction. ++ * ++ * @param[in] flow ++ * Pointer to flow structure. ++ */ ++static void ++flow_hw_release_actions(struct rte_eth_dev *dev, ++ uint32_t queue, ++ struct rte_flow_hw *flow) ++{ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; ++ ++ if (flow->fate_type == MLX5_FLOW_FATE_JUMP) ++ flow_hw_jump_release(dev, flow->jump); ++ else if (flow->fate_type == MLX5_FLOW_FATE_QUEUE) ++ mlx5_hrxq_obj_release(dev, flow->hrxq); ++ if (mlx5_hws_cnt_id_valid(flow->cnt_id)) ++ flow_hw_age_count_release(priv, queue, flow, NULL); ++ if (flow->mtr_id) ++ mlx5_ipool_free(pool->idx_pool, flow->mtr_id); ++} ++ + /** + * Construct flow action array. + * +@@ -2972,7 +3033,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + (int)action->type == act_data->type); + switch ((int)act_data->type) { + case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST: +- act_data->indirect_list_cb(dev, act_data, actions, ++ act_data->indirect_list_cb(dev, act_data, action, + &rule_acts[act_data->action_dst]); + break; + case RTE_FLOW_ACTION_TYPE_INDIRECT: +@@ -2980,7 +3041,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + (dev, queue, action, table, it_idx, + at->action_flags, job->flow, + &rule_acts[act_data->action_dst])) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_VOID: + break; +@@ -3000,7 +3061,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + jump = flow_hw_jump_action_register + (dev, &table->cfg, jump_group, NULL); + if (!jump) +- return -1; ++ goto error; + rule_acts[act_data->action_dst].action = + (!!attr.group) ? jump->hws_action : jump->root_action; + job->flow->jump = jump; +@@ -3012,7 +3073,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + ft_flag, + action); + if (!hrxq) +- return -1; ++ goto error; + rule_acts[act_data->action_dst].action = hrxq->action; + job->flow->hrxq = hrxq; + job->flow->fate_type = MLX5_FLOW_FATE_QUEUE; +@@ -3022,19 +3083,19 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + if (flow_hw_shared_action_get + (dev, act_data, item_flags, + &rule_acts[act_data->action_dst])) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + enc_item = ((const struct rte_flow_action_vxlan_encap *) + action->conf)->definition; + if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL)) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: + enc_item = ((const struct rte_flow_action_nvgre_encap *) + action->conf)->definition; + if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL)) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: + raw_encap_data = +@@ -3063,12 +3124,12 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + hw_acts, + action); + if (ret) +- return -1; ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: + port_action = action->conf; + if (!priv->hw_vport[port_action->port_id]) +- return -1; ++ goto error; + rule_acts[act_data->action_dst].action = + priv->hw_vport[port_action->port_id]; + break; +@@ -3088,7 +3149,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + jump = flow_hw_jump_action_register + (dev, &table->cfg, aso_mtr->fm.group, NULL); + if (!jump) +- return -1; ++ goto error; + MLX5_ASSERT + (!rule_acts[act_data->action_dst + 1].action); + rule_acts[act_data->action_dst + 1].action = +@@ -3096,8 +3157,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + jump->root_action; + job->flow->jump = jump; + job->flow->fate_type = MLX5_FLOW_FATE_JUMP; +- if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) +- return -1; ++ if (mlx5_aso_mtr_wait(priv, aso_mtr, true)) ++ goto error; + break; + case RTE_FLOW_ACTION_TYPE_AGE: + age = action->conf; +@@ -3112,7 +3173,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + job->flow->res_idx, + error); + if (age_idx == 0) +- return -rte_errno; ++ goto error; + job->flow->age_idx = age_idx; + if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) + /* +@@ -3123,11 +3184,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + break; + /* Fall-through. */ + case RTE_FLOW_ACTION_TYPE_COUNT: +- /* If the port is engaged in resource sharing, do not use queue cache. */ +- cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue; ++ cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); + ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx); +- if (ret != 0) +- return ret; ++ if (ret != 0) { ++ rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ACTION, ++ action, "Failed to allocate flow counter"); ++ goto error; ++ } + ret = mlx5_hws_cnt_pool_get_action_offset + (priv->hws_cpool, + cnt_id, +@@ -3135,7 +3198,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + &rule_acts[act_data->action_dst].counter.offset + ); + if (ret != 0) +- return ret; ++ goto error; + job->flow->cnt_id = cnt_id; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_COUNT: +@@ -3146,7 +3209,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + &rule_acts[act_data->action_dst].counter.offset + ); + if (ret != 0) +- return ret; ++ goto error; + job->flow->cnt_id = act_data->shared_counter.id; + break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: +@@ -3154,7 +3217,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + ((uint32_t)(uintptr_t)action->conf); + if (flow_hw_ct_compile(dev, queue, ct_idx, + &rule_acts[act_data->action_dst])) +- return -1; ++ goto error; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK: + mtr_id = act_data->shared_meter.id & +@@ -3162,7 +3225,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + /* Find ASO object. */ + aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id); + if (!aso_mtr) +- return -1; ++ goto error; + rule_acts[act_data->action_dst].action = + pool->action; + rule_acts[act_data->action_dst].aso_meter.offset = +@@ -3177,7 +3240,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + act_data->action_dst, action, + rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE); + if (ret != 0) +- return ret; ++ goto error; + break; + default: + break; +@@ -3215,6 +3278,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, + if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) + job->flow->cnt_id = hw_acts->cnt_id; + return 0; ++ ++error: ++ flow_hw_release_actions(dev, queue, job->flow); ++ rte_errno = EINVAL; ++ return -rte_errno; + } + + static const struct rte_flow_item * +@@ -3316,14 +3384,11 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, + struct rte_flow_hw *flow = NULL; + struct mlx5_hw_q_job *job = NULL; + const struct rte_flow_item *rule_items; ++ struct rte_flow_error sub_error = { 0 }; + uint32_t flow_idx = 0; + uint32_t res_idx = 0; + int ret; + +- if (unlikely((!dev->data->dev_started))) { +- rte_errno = EINVAL; +- goto error; +- } + job = flow_hw_job_get(priv, queue); + if (!job) { + rte_errno = ENOMEM; +@@ -3368,10 +3433,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, + if (flow_hw_actions_construct(dev, job, + &table->ats[action_template_index], + pattern_template_index, actions, +- rule_acts, queue, error)) { +- rte_errno = EINVAL; ++ rule_acts, queue, &sub_error)) + goto error; +- } + rule_items = flow_hw_get_rule_items(dev, table, items, + pattern_template_index, job); + if (!rule_items) +@@ -3389,9 +3452,12 @@ error: + mlx5_ipool_free(table->flow, flow_idx); + if (res_idx) + mlx5_ipool_free(table->resource, res_idx); +- rte_flow_error_set(error, rte_errno, +- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, +- "fail to create rte flow"); ++ if (sub_error.cause != RTE_FLOW_ERROR_TYPE_NONE && error != NULL) ++ *error = sub_error; ++ else ++ rte_flow_error_set(error, rte_errno, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "fail to create rte flow"); + return NULL; + } + +@@ -3722,8 +3788,7 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, + } + return; + } +- /* If the port is engaged in resource sharing, do not use queue cache. */ +- cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue; ++ cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); + /* Put the counter first to reduce the race risk in BG thread. */ + mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id); + flow->cnt_id = 0; +@@ -3780,13 +3845,6 @@ flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job + job->query.hw); + aso_ct->state = ASO_CONNTRACK_READY; + } +- } else { +- /* +- * rte_flow_op_result::user data can point to +- * struct mlx5_aso_mtr object as well +- */ +- if (queue != CTRL_QUEUE_ID(priv)) +- MLX5_ASSERT(false); + } + } + +@@ -4368,12 +4426,23 @@ flow_hw_table_create(struct rte_eth_dev *dev, + matcher_attr.rule.num_log = rte_log2_u32(nb_flows); + /* Parse hints information. */ + if (attr->specialize) { +- if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG) +- matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_WIRE; +- else if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG) +- matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_VPORT; +- else +- DRV_LOG(INFO, "Unsupported hint value %x", attr->specialize); ++ uint32_t val = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG | ++ RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG; ++ ++ if ((attr->specialize & val) == val) { ++ DRV_LOG(INFO, "Invalid hint value %x", ++ attr->specialize); ++ rte_errno = EINVAL; ++ goto it_error; ++ } ++ if (attr->specialize & ++ RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG) ++ matcher_attr.optimize_flow_src = ++ MLX5DR_MATCHER_FLOW_SRC_WIRE; ++ else if (attr->specialize & ++ RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG) ++ matcher_attr.optimize_flow_src = ++ MLX5DR_MATCHER_FLOW_SRC_VPORT; + } + /* Build the item template. */ + for (i = 0; i < nb_item_templates; i++) { +@@ -4623,7 +4692,7 @@ flow_hw_table_destroy(struct rte_eth_dev *dev, + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +- "table in use"); ++ "table is in use"); + } + LIST_REMOVE(table, next); + for (i = 0; i < table->nb_item_templates; i++) +@@ -4975,15 +5044,17 @@ flow_hw_validate_action_modify_field(struct rte_eth_dev *dev, + ret = flow_validate_modify_field_level(&action_conf->dst, error); + if (ret) + return ret; +- if (action_conf->dst.tag_index && +- !flow_modify_field_support_tag_array(action_conf->dst.field)) +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, action, +- "destination tag index is not supported"); +- if (action_conf->dst.class_id) +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, action, +- "destination class id is not supported"); ++ if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM) { ++ if (action_conf->dst.tag_index && ++ !flow_modify_field_support_tag_array(action_conf->dst.field)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, action, ++ "destination tag index is not supported"); ++ if (action_conf->dst.class_id) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, action, ++ "destination class id is not supported"); ++ } + if (mask_conf->dst.level != UINT8_MAX) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, +@@ -4998,15 +5069,17 @@ flow_hw_validate_action_modify_field(struct rte_eth_dev *dev, + "destination field mask and template are not equal"); + if (action_conf->src.field != RTE_FLOW_FIELD_POINTER && + action_conf->src.field != RTE_FLOW_FIELD_VALUE) { +- if (action_conf->src.tag_index && +- !flow_modify_field_support_tag_array(action_conf->src.field)) +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, action, +- "source tag index is not supported"); +- if (action_conf->src.class_id) +- return rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_ACTION, action, +- "source class id is not supported"); ++ if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM) { ++ if (action_conf->src.tag_index && ++ !flow_modify_field_support_tag_array(action_conf->src.field)) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, action, ++ "source tag index is not supported"); ++ if (action_conf->src.class_id) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ACTION, action, ++ "source class id is not supported"); ++ } + if (mask_conf->src.level != UINT8_MAX) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, action, +@@ -5443,6 +5516,69 @@ mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions, + MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP; + } + ++enum mlx5_hw_indirect_list_relative_position { ++ MLX5_INDIRECT_LIST_POSITION_UNKNOWN = -1, ++ MLX5_INDIRECT_LIST_POSITION_BEFORE_MH = 0, ++ MLX5_INDIRECT_LIST_POSITION_AFTER_MH, ++}; ++ ++static enum mlx5_hw_indirect_list_relative_position ++mlx5_hw_indirect_list_mh_position(const struct rte_flow_action *action) ++{ ++ const struct rte_flow_action_indirect_list *conf = action->conf; ++ enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(conf->handle); ++ enum mlx5_hw_indirect_list_relative_position pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN; ++ const union { ++ struct mlx5_indlst_legacy *legacy; ++ struct mlx5_hw_encap_decap_action *reformat; ++ struct rte_flow_action_list_handle *handle; ++ } h = { .handle = conf->handle}; ++ ++ switch (list_type) { ++ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY: ++ switch (h.legacy->legacy_type) { ++ case RTE_FLOW_ACTION_TYPE_AGE: ++ case RTE_FLOW_ACTION_TYPE_COUNT: ++ case RTE_FLOW_ACTION_TYPE_CONNTRACK: ++ case RTE_FLOW_ACTION_TYPE_METER_MARK: ++ case RTE_FLOW_ACTION_TYPE_QUOTA: ++ pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH; ++ break; ++ case RTE_FLOW_ACTION_TYPE_RSS: ++ pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH; ++ break; ++ default: ++ pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN; ++ break; ++ } ++ break; ++ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR: ++ pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH; ++ break; ++ case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT: ++ switch (h.reformat->action_type) { ++ case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: ++ case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: ++ pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH; ++ break; ++ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: ++ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: ++ pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH; ++ break; ++ default: ++ pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN; ++ break; ++ } ++ break; ++ default: ++ pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN; ++ break; ++ } ++ return pos; ++} ++ ++#define MLX5_HW_EXPAND_MH_FAILED 0xffff ++ + static inline uint16_t + flow_hw_template_expand_modify_field(struct rte_flow_action actions[], + struct rte_flow_action masks[], +@@ -5479,6 +5615,7 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[], + * @see action_order_arr[] + */ + for (i = act_num - 2; (int)i >= 0; i--) { ++ enum mlx5_hw_indirect_list_relative_position pos; + enum rte_flow_action_type type = actions[i].type; + uint64_t reformat_type; + +@@ -5509,6 +5646,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[], + if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) + i--; + break; ++ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST: ++ pos = mlx5_hw_indirect_list_mh_position(&actions[i]); ++ if (pos == MLX5_INDIRECT_LIST_POSITION_UNKNOWN) ++ return MLX5_HW_EXPAND_MH_FAILED; ++ if (pos == MLX5_INDIRECT_LIST_POSITION_BEFORE_MH) ++ goto insert; ++ break; + default: + i++; /* new MF inserted AFTER actions[i] */ + goto insert; +@@ -5639,6 +5783,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, + int ret; + const struct rte_flow_action_ipv6_ext_remove *remove_data; + ++ if (!mlx5_hw_ctx_validate(dev, error)) ++ return -rte_errno; + /* FDB actions are only valid to proxy port. */ + if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master)) + return rte_flow_error_set(error, EINVAL, +@@ -6151,7 +6297,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, + rm[set_vlan_vid_ix].conf)->vlan_vid != 0); + const struct rte_flow_action_of_set_vlan_vid *conf = + ra[set_vlan_vid_ix].conf; +- rte_be16_t vid = masked ? conf->vlan_vid : 0; + int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0, + NULL, &error); + *spec = (typeof(*spec)) { +@@ -6162,8 +6307,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, + }, + .src = { + .field = RTE_FLOW_FIELD_VALUE, +- .level = vid, +- .offset = 0, + }, + .width = width, + }; +@@ -6175,11 +6318,15 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, + }, + .src = { + .field = RTE_FLOW_FIELD_VALUE, +- .level = masked ? (1U << width) - 1 : 0, +- .offset = 0, + }, + .width = 0xffffffff, + }; ++ if (masked) { ++ uint32_t mask_val = 0xffffffff; ++ ++ rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid)); ++ rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val)); ++ } + ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; + ra[set_vlan_vid_ix].conf = spec; + rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; +@@ -6206,8 +6353,6 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, + }, + .src = { + .field = RTE_FLOW_FIELD_VALUE, +- .level = vid, +- .offset = 0, + }, + .width = width, + }; +@@ -6216,6 +6361,7 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, + .conf = &conf + }; + ++ rte_memcpy(conf.src.value, &vid, sizeof(vid)); + return flow_hw_modify_field_construct(job, act_data, hw_acts, + &modify_action); + } +@@ -6387,8 +6533,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev, + uint32_t expand_mf_num = 0; + uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, }; + +- if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks, +- &action_flags, error)) ++ if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks, &action_flags, error)) + return NULL; + for (i = 0; ra[i].type != RTE_FLOW_ACTION_TYPE_END; ++i) { + switch (ra[i].type) { +@@ -6463,6 +6608,12 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev, + action_flags, + act_num, + expand_mf_num); ++ if (pos == MLX5_HW_EXPAND_MH_FAILED) { ++ rte_flow_error_set(error, ENOMEM, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, "modify header expansion failed"); ++ return NULL; ++ } + act_num += expand_mf_num; + for (i = pos + expand_mf_num; i < act_num; i++) + src_off[i] += expand_mf_num; +@@ -6585,7 +6736,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev, + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +- "action template in using"); ++ "action template is in use"); + } + if (template->action_flags & flag) + mlx5_free_srh_flex_parser(dev); +@@ -6645,6 +6796,8 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, + bool items_end = false; + uint32_t tag_bitmap = 0; + ++ if (!mlx5_hw_ctx_validate(dev, error)) ++ return -rte_errno; + if (!attr->ingress && !attr->egress && !attr->transfer) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "at least one of the direction attributes" +@@ -7003,7 +7156,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev, + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +- "item template in using"); ++ "item template is in use"); + } + if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT | + MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) +@@ -8366,6 +8519,72 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, + return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); + } + ++/** ++ * Cleans up all template tables and pattern, and actions templates used for ++ * FDB control flow rules. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ */ ++static void ++flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev) ++{ ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; ++ ++ if (!priv->hw_ctrl_fdb) ++ return; ++ hw_ctrl_fdb = priv->hw_ctrl_fdb; ++ /* Clean up templates used for LACP default miss table. */ ++ if (hw_ctrl_fdb->hw_lacp_rx_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL)); ++ if (hw_ctrl_fdb->lacp_rx_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl, ++ NULL)); ++ if (hw_ctrl_fdb->lacp_rx_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl, ++ NULL)); ++ /* Clean up templates used for default Tx metadata copy. */ ++ if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL)); ++ if (hw_ctrl_fdb->tx_meta_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl, ++ NULL)); ++ if (hw_ctrl_fdb->tx_meta_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl, ++ NULL)); ++ /* Clean up templates used for default FDB jump rule. */ ++ if (hw_ctrl_fdb->hw_esw_zero_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL)); ++ if (hw_ctrl_fdb->jump_one_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl, ++ NULL)); ++ if (hw_ctrl_fdb->port_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl, ++ NULL)); ++ /* Clean up templates used for default SQ miss flow rules - non-root table. */ ++ if (hw_ctrl_fdb->hw_esw_sq_miss_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL)); ++ if (hw_ctrl_fdb->regc_sq_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl, ++ NULL)); ++ if (hw_ctrl_fdb->port_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl, ++ NULL)); ++ /* Clean up templates used for default SQ miss flow rules - root table. */ ++ if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) ++ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL)); ++ if (hw_ctrl_fdb->regc_jump_actions_tmpl) ++ claim_zero(flow_hw_actions_template_destroy(dev, ++ hw_ctrl_fdb->regc_jump_actions_tmpl, NULL)); ++ if (hw_ctrl_fdb->esw_mgr_items_tmpl) ++ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl, ++ NULL)); ++ /* Clean up templates structure for FDB control flow rules. */ ++ mlx5_free(hw_ctrl_fdb); ++ priv->hw_ctrl_fdb = NULL; ++} ++ + /* + * Create a table on the root group to for the LACP traffic redirecting. + * +@@ -8415,182 +8634,154 @@ flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev, + * @return + * 0 on success, negative values otherwise + */ +-static __rte_unused int ++static int + flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error) + { + struct mlx5_priv *priv = dev->data->dev_private; +- struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL; +- struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL; +- struct rte_flow_pattern_template *port_items_tmpl = NULL; +- struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL; +- struct rte_flow_pattern_template *lacp_rx_items_tmpl = NULL; +- struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL; +- struct rte_flow_actions_template *port_actions_tmpl = NULL; +- struct rte_flow_actions_template *jump_one_actions_tmpl = NULL; +- struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL; +- struct rte_flow_actions_template *lacp_rx_actions_tmpl = NULL; ++ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; + uint32_t xmeta = priv->sh->config.dv_xmeta_en; + uint32_t repr_matching = priv->sh->config.repr_matching; +- int ret; ++ uint32_t fdb_def_rule = priv->sh->config.fdb_def_rule; + +- /* Create templates and table for default SQ miss flow rules - root table. */ +- esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); +- if (!esw_mgr_items_tmpl) { +- DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" +- " template for control flows", dev->data->port_id); +- goto err; +- } +- regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev, error); +- if (!regc_jump_actions_tmpl) { +- DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" +- " for control flows", dev->data->port_id); +- goto err; +- } +- MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL); +- priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table +- (dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl, error); +- if (!priv->hw_esw_sq_miss_root_tbl) { +- DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" +- " for control flows", dev->data->port_id); +- goto err; +- } +- /* Create templates and table for default SQ miss flow rules - non-root table. */ +- regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); +- if (!regc_sq_items_tmpl) { +- DRV_LOG(ERR, "port %u failed to create SQ item template for" +- " control flows", dev->data->port_id); +- goto err; +- } +- port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error); +- if (!port_actions_tmpl) { +- DRV_LOG(ERR, "port %u failed to create port action template" +- " for control flows", dev->data->port_id); +- goto err; +- } +- MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL); +- priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl, +- port_actions_tmpl, error); +- if (!priv->hw_esw_sq_miss_tbl) { +- DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" +- " for control flows", dev->data->port_id); +- goto err; +- } +- /* Create templates and table for default FDB jump flow rules. */ +- port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error); +- if (!port_items_tmpl) { +- DRV_LOG(ERR, "port %u failed to create SQ item template for" +- " control flows", dev->data->port_id); +- goto err; +- } +- jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template +- (dev, MLX5_HW_LOWEST_USABLE_GROUP, error); +- if (!jump_one_actions_tmpl) { +- DRV_LOG(ERR, "port %u failed to create jump action template" +- " for control flows", dev->data->port_id); ++ MLX5_ASSERT(priv->hw_ctrl_fdb == NULL); ++ hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY); ++ if (!hw_ctrl_fdb) { ++ DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates", ++ dev->data->port_id); ++ rte_errno = ENOMEM; + goto err; + } +- MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL); +- priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl, +- jump_one_actions_tmpl, +- error); +- if (!priv->hw_esw_zero_tbl) { +- DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" +- " for control flows", dev->data->port_id); +- goto err; ++ priv->hw_ctrl_fdb = hw_ctrl_fdb; ++ if (fdb_def_rule) { ++ /* Create templates and table for default SQ miss flow rules - root table. */ ++ hw_ctrl_fdb->esw_mgr_items_tmpl = ++ flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); ++ if (!hw_ctrl_fdb->esw_mgr_items_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" ++ " template for control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->regc_jump_actions_tmpl = ++ flow_hw_create_ctrl_regc_jump_actions_template(dev, error); ++ if (!hw_ctrl_fdb->regc_jump_actions_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->hw_esw_sq_miss_root_tbl = ++ flow_hw_create_ctrl_sq_miss_root_table ++ (dev, hw_ctrl_fdb->esw_mgr_items_tmpl, ++ hw_ctrl_fdb->regc_jump_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) { ++ DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ /* Create templates and table for default SQ miss flow rules - non-root table. */ ++ hw_ctrl_fdb->regc_sq_items_tmpl = ++ flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); ++ if (!hw_ctrl_fdb->regc_sq_items_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create SQ item template for" ++ " control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->port_actions_tmpl = ++ flow_hw_create_ctrl_port_actions_template(dev, error); ++ if (!hw_ctrl_fdb->port_actions_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create port action template" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->hw_esw_sq_miss_tbl = ++ flow_hw_create_ctrl_sq_miss_table ++ (dev, hw_ctrl_fdb->regc_sq_items_tmpl, ++ hw_ctrl_fdb->port_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) { ++ DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ /* Create templates and table for default FDB jump flow rules. */ ++ hw_ctrl_fdb->port_items_tmpl = ++ flow_hw_create_ctrl_port_pattern_template(dev, error); ++ if (!hw_ctrl_fdb->port_items_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create SQ item template for" ++ " control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->jump_one_actions_tmpl = ++ flow_hw_create_ctrl_jump_actions_template ++ (dev, MLX5_HW_LOWEST_USABLE_GROUP, error); ++ if (!hw_ctrl_fdb->jump_one_actions_tmpl) { ++ DRV_LOG(ERR, "port %u failed to create jump action template" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } ++ hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table ++ (dev, hw_ctrl_fdb->port_items_tmpl, ++ hw_ctrl_fdb->jump_one_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_esw_zero_tbl) { ++ DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" ++ " for control flows", dev->data->port_id); ++ goto err; ++ } + } + /* Create templates and table for default Tx metadata copy flow rule. */ + if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) { +- tx_meta_items_tmpl = ++ hw_ctrl_fdb->tx_meta_items_tmpl = + flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error); +- if (!tx_meta_items_tmpl) { ++ if (!hw_ctrl_fdb->tx_meta_items_tmpl) { + DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern" + " template for control flows", dev->data->port_id); + goto err; + } +- tx_meta_actions_tmpl = ++ hw_ctrl_fdb->tx_meta_actions_tmpl = + flow_hw_create_tx_default_mreg_copy_actions_template(dev, error); +- if (!tx_meta_actions_tmpl) { ++ if (!hw_ctrl_fdb->tx_meta_actions_tmpl) { + DRV_LOG(ERR, "port %u failed to Tx metadata copy actions" + " template for control flows", dev->data->port_id); + goto err; + } +- MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL); +- priv->hw_tx_meta_cpy_tbl = +- flow_hw_create_tx_default_mreg_copy_table(dev, tx_meta_items_tmpl, +- tx_meta_actions_tmpl, error); +- if (!priv->hw_tx_meta_cpy_tbl) { ++ hw_ctrl_fdb->hw_tx_meta_cpy_tbl = ++ flow_hw_create_tx_default_mreg_copy_table ++ (dev, hw_ctrl_fdb->tx_meta_items_tmpl, ++ hw_ctrl_fdb->tx_meta_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) { + DRV_LOG(ERR, "port %u failed to create table for default" + " Tx metadata copy flow rule", dev->data->port_id); + goto err; + } + } + /* Create LACP default miss table. */ +- if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { +- lacp_rx_items_tmpl = flow_hw_create_lacp_rx_pattern_template(dev, error); +- if (!lacp_rx_items_tmpl) { ++ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { ++ hw_ctrl_fdb->lacp_rx_items_tmpl = ++ flow_hw_create_lacp_rx_pattern_template(dev, error); ++ if (!hw_ctrl_fdb->lacp_rx_items_tmpl) { + DRV_LOG(ERR, "port %u failed to create pattern template" + " for LACP Rx traffic", dev->data->port_id); + goto err; + } +- lacp_rx_actions_tmpl = flow_hw_create_lacp_rx_actions_template(dev, error); +- if (!lacp_rx_actions_tmpl) { ++ hw_ctrl_fdb->lacp_rx_actions_tmpl = ++ flow_hw_create_lacp_rx_actions_template(dev, error); ++ if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) { + DRV_LOG(ERR, "port %u failed to create actions template" + " for LACP Rx traffic", dev->data->port_id); + goto err; + } +- priv->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table(dev, lacp_rx_items_tmpl, +- lacp_rx_actions_tmpl, error); +- if (!priv->hw_lacp_rx_tbl) { ++ hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table ++ (dev, hw_ctrl_fdb->lacp_rx_items_tmpl, ++ hw_ctrl_fdb->lacp_rx_actions_tmpl, error); ++ if (!hw_ctrl_fdb->hw_lacp_rx_tbl) { + DRV_LOG(ERR, "port %u failed to create template table for" + " for LACP Rx traffic", dev->data->port_id); + goto err; + } + } return 0; + -+error: -+ flow_hw_release_actions(dev, queue, job->flow); -+ rte_errno = EINVAL; -+ return -rte_errno; + err: +- /* Do not overwrite the rte_errno. */ +- ret = -rte_errno; +- if (ret == 0) +- ret = rte_flow_error_set(error, EINVAL, +- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, +- "Failed to create control tables."); +- if (priv->hw_tx_meta_cpy_tbl) { +- flow_hw_table_destroy(dev, priv->hw_tx_meta_cpy_tbl, NULL); +- priv->hw_tx_meta_cpy_tbl = NULL; +- } +- if (priv->hw_esw_zero_tbl) { +- flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL); +- priv->hw_esw_zero_tbl = NULL; +- } +- if (priv->hw_esw_sq_miss_tbl) { +- flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL); +- priv->hw_esw_sq_miss_tbl = NULL; +- } +- if (priv->hw_esw_sq_miss_root_tbl) { +- flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL); +- priv->hw_esw_sq_miss_root_tbl = NULL; +- } +- if (lacp_rx_actions_tmpl) +- flow_hw_actions_template_destroy(dev, lacp_rx_actions_tmpl, NULL); +- if (tx_meta_actions_tmpl) +- flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL); +- if (jump_one_actions_tmpl) +- flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL); +- if (port_actions_tmpl) +- flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL); +- if (regc_jump_actions_tmpl) +- flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL); +- if (lacp_rx_items_tmpl) +- flow_hw_pattern_template_destroy(dev, lacp_rx_items_tmpl, NULL); +- if (tx_meta_items_tmpl) +- flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL); +- if (port_items_tmpl) +- flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL); +- if (regc_sq_items_tmpl) +- flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL); +- if (esw_mgr_items_tmpl) +- flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL); +- return ret; ++ flow_hw_cleanup_ctrl_fdb_tables(dev); ++ return -EINVAL; } - static const struct rte_flow_item * -@@ -3320,10 +3377,6 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, - uint32_t res_idx = 0; - int ret; + static void +@@ -9184,6 +9375,38 @@ flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr, + return true; + } -- if (unlikely((!dev->data->dev_started))) { ++static int ++flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr, ++ uint16_t nb_queue, ++ const struct rte_flow_queue_attr *queue_attr[], ++ struct rte_flow_error *error) ++{ ++ uint32_t size; ++ unsigned int i; ++ ++ if (port_attr == NULL) ++ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "Port attributes must be non-NULL"); ++ ++ if (nb_queue == 0) ++ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "At least one flow queue is required"); ++ ++ if (queue_attr == NULL) ++ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "Queue attributes must be non-NULL"); ++ ++ size = queue_attr[0]->size; ++ for (i = 1; i < nb_queue; ++i) { ++ if (queue_attr[i]->size != size) ++ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, ++ "All flow queues must have the same size"); ++ } ++ ++ return 0; ++} ++ + /** + * Configure port HWS resources. + * +@@ -9235,10 +9458,8 @@ flow_hw_configure(struct rte_eth_dev *dev, + int ret = 0; + uint32_t action_flags; + +- if (!port_attr || !nb_queue || !queue_attr) { - rte_errno = EINVAL; -- goto error; +- goto err; +- } ++ if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error)) ++ return -rte_errno; + /* + * Calling rte_flow_configure() again is allowed if and only if + * provided configuration matches the initially provided one. +@@ -9285,14 +9506,6 @@ flow_hw_configure(struct rte_eth_dev *dev, + /* Allocate the queue job descriptor LIFO. */ + mem_size = sizeof(priv->hw_q[0]) * nb_q_updated; + for (i = 0; i < nb_q_updated; i++) { +- /* +- * Check if the queues' size are all the same as the +- * limitation from HWS layer. +- */ +- if (_queue_attr[i]->size != _queue_attr[0]->size) { +- rte_errno = EINVAL; +- goto err; +- } + mem_size += (sizeof(struct mlx5_hw_q_job *) + + sizeof(struct mlx5_hw_q_job) + + sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN + +@@ -9364,6 +9577,9 @@ flow_hw_configure(struct rte_eth_dev *dev, + } + dr_ctx_attr.pd = priv->sh->cdev->pd; + dr_ctx_attr.queues = nb_q_updated; ++ /* Assign initial value of STC numbers for representors. */ ++ if (priv->representor) ++ dr_ctx_attr.initial_log_stc_memory = MLX5_REPR_STC_MEMORY_LOG; + /* Queue size should all be the same. Take the first one. */ + dr_ctx_attr.queue_size = _queue_attr[0]->size; + if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) { +@@ -9545,6 +9761,14 @@ flow_hw_configure(struct rte_eth_dev *dev, + priv->hws_strict_queue = 1; + return 0; + err: ++ priv->hws_strict_queue = 0; ++ flow_hw_destroy_vlan(dev); ++ if (priv->hws_age_req) ++ mlx5_hws_age_pool_destroy(priv); ++ if (priv->hws_cpool) { ++ mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool); ++ priv->hws_cpool = NULL; ++ } + if (priv->hws_ctpool) { + flow_hw_ct_pool_destroy(dev, priv->hws_ctpool); + priv->hws_ctpool = NULL; +@@ -9553,44 +9777,54 @@ err: + flow_hw_ct_mng_destroy(dev, priv->ct_mng); + priv->ct_mng = NULL; + } +- if (priv->hws_age_req) +- mlx5_hws_age_pool_destroy(priv); +- if (priv->hws_cpool) { +- mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool); +- priv->hws_cpool = NULL; +- } +- mlx5_flow_quota_destroy(dev); + flow_hw_destroy_send_to_kernel_action(priv); ++ flow_hw_cleanup_ctrl_fdb_tables(dev); + flow_hw_free_vport_actions(priv); ++ if (priv->hw_def_miss) { ++ mlx5dr_action_destroy(priv->hw_def_miss); ++ priv->hw_def_miss = NULL; ++ } ++ flow_hw_cleanup_tx_repr_tagging(dev); + for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { +- if (priv->hw_drop[i]) ++ if (priv->hw_drop[i]) { + mlx5dr_action_destroy(priv->hw_drop[i]); +- if (priv->hw_tag[i]) ++ priv->hw_drop[i] = NULL; ++ } ++ if (priv->hw_tag[i]) { + mlx5dr_action_destroy(priv->hw_tag[i]); ++ priv->hw_tag[i] = NULL; ++ } + } +- if (priv->hw_def_miss) +- mlx5dr_action_destroy(priv->hw_def_miss); +- flow_hw_destroy_vlan(dev); +- if (dr_ctx) ++ mlx5_flow_meter_uninit(dev); ++ mlx5_flow_quota_destroy(dev); ++ flow_hw_cleanup_ctrl_rx_tables(dev); ++ if (dr_ctx) { + claim_zero(mlx5dr_context_close(dr_ctx)); +- for (i = 0; i < nb_q_updated; i++) { +- rte_ring_free(priv->hw_q[i].indir_iq); +- rte_ring_free(priv->hw_q[i].indir_cq); ++ priv->dr_ctx = NULL; + } +- mlx5_free(priv->hw_q); +- priv->hw_q = NULL; +- if (priv->acts_ipool) { +- mlx5_ipool_destroy(priv->acts_ipool); +- priv->acts_ipool = NULL; +- } +- if (_queue_attr) +- mlx5_free(_queue_attr); + if (priv->shared_host) { ++ struct mlx5_priv *host_priv = priv->shared_host->data->dev_private; ++ + __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED); + priv->shared_host = NULL; + } ++ if (priv->hw_q) { ++ for (i = 0; i < nb_q_updated; i++) { ++ rte_ring_free(priv->hw_q[i].indir_iq); ++ rte_ring_free(priv->hw_q[i].indir_cq); ++ } ++ mlx5_free(priv->hw_q); ++ priv->hw_q = NULL; ++ } ++ if (priv->acts_ipool) { ++ mlx5_ipool_destroy(priv->acts_ipool); ++ priv->acts_ipool = NULL; ++ } + mlx5_free(priv->hw_attr); + priv->hw_attr = NULL; ++ priv->nb_queue = 0; ++ if (_queue_attr) ++ mlx5_free(_queue_attr); + /* Do not overwrite the internal errno information. */ + if (ret) + return ret; +@@ -9609,37 +9843,48 @@ void + flow_hw_resource_release(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; +- struct rte_flow_template_table *tbl; +- struct rte_flow_pattern_template *it; +- struct rte_flow_actions_template *at; +- struct mlx5_flow_group *grp; ++ struct rte_flow_template_table *tbl, *temp_tbl; ++ struct rte_flow_pattern_template *it, *temp_it; ++ struct rte_flow_actions_template *at, *temp_at; ++ struct mlx5_flow_group *grp, *temp_grp; + uint32_t i; + + if (!priv->dr_ctx) + return; + flow_hw_rxq_flag_set(dev, false); + flow_hw_flush_all_ctrl_flows(dev); ++ flow_hw_cleanup_ctrl_fdb_tables(dev); + flow_hw_cleanup_tx_repr_tagging(dev); + flow_hw_cleanup_ctrl_rx_tables(dev); +- while (!LIST_EMPTY(&priv->flow_hw_grp)) { +- grp = LIST_FIRST(&priv->flow_hw_grp); +- flow_hw_group_unset_miss_group(dev, grp, NULL); +- } +- while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) { +- tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo); +- flow_hw_table_destroy(dev, tbl, NULL); +- } +- while (!LIST_EMPTY(&priv->flow_hw_tbl)) { +- tbl = LIST_FIRST(&priv->flow_hw_tbl); +- flow_hw_table_destroy(dev, tbl, NULL); +- } +- while (!LIST_EMPTY(&priv->flow_hw_itt)) { +- it = LIST_FIRST(&priv->flow_hw_itt); +- flow_hw_pattern_template_destroy(dev, it, NULL); - } +- while (!LIST_EMPTY(&priv->flow_hw_at)) { +- at = LIST_FIRST(&priv->flow_hw_at); +- flow_hw_actions_template_destroy(dev, at, NULL); ++ grp = LIST_FIRST(&priv->flow_hw_grp); ++ while (grp) { ++ temp_grp = LIST_NEXT(grp, next); ++ claim_zero(flow_hw_group_unset_miss_group(dev, grp, NULL)); ++ grp = temp_grp; ++ } ++ tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo); ++ while (tbl) { ++ temp_tbl = LIST_NEXT(tbl, next); ++ claim_zero(flow_hw_table_destroy(dev, tbl, NULL)); ++ tbl = temp_tbl; ++ } ++ tbl = LIST_FIRST(&priv->flow_hw_tbl); ++ while (tbl) { ++ temp_tbl = LIST_NEXT(tbl, next); ++ claim_zero(flow_hw_table_destroy(dev, tbl, NULL)); ++ tbl = temp_tbl; ++ } ++ it = LIST_FIRST(&priv->flow_hw_itt); ++ while (it) { ++ temp_it = LIST_NEXT(it, next); ++ claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL)); ++ it = temp_it; ++ } ++ at = LIST_FIRST(&priv->flow_hw_at); ++ while (at) { ++ temp_at = LIST_NEXT(at, next); ++ claim_zero(flow_hw_actions_template_destroy(dev, at, NULL)); ++ at = temp_at; + } + for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { + if (priv->hw_drop[i]) +@@ -9677,13 +9922,11 @@ flow_hw_resource_release(struct rte_eth_dev *dev) + } + mlx5_free(priv->hw_q); + priv->hw_q = NULL; +- claim_zero(mlx5dr_context_close(priv->dr_ctx)); + if (priv->shared_host) { + struct mlx5_priv *host_priv = priv->shared_host->data->dev_private; + __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED); + priv->shared_host = NULL; + } +- priv->dr_ctx = NULL; + mlx5_free(priv->hw_attr); + priv->hw_attr = NULL; + priv->nb_queue = 0; +@@ -9853,6 +10096,13 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue, + "CT is not enabled"); + return 0; + } ++ if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "CT supports port indexes up to " ++ RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT)); ++ return 0; ++ } + ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx); + if (!ct) { + rte_flow_error_set(error, rte_errno, +@@ -9967,11 +10217,13 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, + const struct rte_flow_action_handle *handle, + void *user_data, void *query_data, + enum mlx5_hw_job_type type, ++ enum mlx5_hw_indirect_type indirect_type, + struct rte_flow_error *error) + { + struct mlx5_hw_q_job *job; + +- MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE); ++ if (queue == MLX5_HW_INV_QUEUE) ++ queue = CTRL_QUEUE_ID(priv); job = flow_hw_job_get(priv, queue); if (!job) { - rte_errno = ENOMEM; -@@ -3368,10 +3421,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, - if (flow_hw_actions_construct(dev, job, - &table->ats[action_template_index], - pattern_template_index, actions, -- rule_acts, queue, error)) { -- rte_errno = EINVAL; -+ rule_acts, queue, error)) - goto error; -- } - rule_items = flow_hw_get_rule_items(dev, table, items, - pattern_template_index, job); - if (!rule_items) -@@ -3722,8 +3773,7 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, - } - return; + rte_flow_error_set(error, ENOMEM, +@@ -9983,9 +10235,21 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, + job->action = handle; + job->user_data = user_data; + job->query.user = query_data; ++ job->indirect_type = indirect_type; + return job; + } + ++struct mlx5_hw_q_job * ++mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue, ++ const struct rte_flow_action_handle *handle, ++ void *user_data, void *query_data, ++ enum mlx5_hw_job_type type, ++ struct rte_flow_error *error) ++{ ++ return flow_hw_action_job_init(priv, queue, handle, user_data, query_data, ++ type, MLX5_HW_INDIRECT_TYPE_LEGACY, error); ++} ++ + static __rte_always_inline void + flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue, + struct mlx5_hw_q_job *job, +@@ -10045,15 +10309,17 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, + const struct rte_flow_action_age *age; + struct mlx5_aso_mtr *aso_mtr; + cnt_id_t cnt_id; +- uint32_t mtr_id; + uint32_t age_idx; + bool push = flow_hw_action_push(attr); + bool aso = false; ++ bool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK; + +- if (attr) { ++ if (!mlx5_hw_ctx_validate(dev, error)) ++ return NULL; ++ if (attr || force_job) { + job = flow_hw_action_job_init(priv, queue, NULL, user_data, + NULL, MLX5_HW_Q_JOB_TYPE_CREATE, +- error); ++ MLX5_HW_INDIRECT_TYPE_LEGACY, error); + if (!job) + return NULL; } -- /* If the port is engaged in resource sharing, do not use queue cache. */ -- cnt_queue = mlx5_hws_cnt_is_pool_shared(priv) ? NULL : &queue; -+ cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); - /* Put the counter first to reduce the race risk in BG thread. */ - mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id); - flow->cnt_id = 0; -@@ -3780,13 +3830,6 @@ flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job - job->query.hw); - aso_ct->state = ASO_CONNTRACK_READY; +@@ -10105,9 +10371,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, + aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push); + if (!aso_mtr) + break; +- mtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK << +- MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id); +- handle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id; ++ handle = (void *)(uintptr_t)job->action; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + handle = flow_dv_action_create(dev, conf, action, error); +@@ -10122,9 +10386,8 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, + NULL, "action type not supported"); + break; + } +- if (job) { ++ if (job && !force_job) { + job->action = handle; +- job->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY; + flow_hw_action_finalize(dev, queue, job, push, aso, + handle != NULL); + } +@@ -10155,15 +10418,17 @@ mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue, + fm->color_aware = meter_mark->color_mode; + if (upd_meter_mark->state_valid) + fm->is_enable = meter_mark->state; ++ aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ? ++ ASO_METER_WAIT : ASO_METER_WAIT_ASYNC; + /* Update ASO flow meter by wqe. */ +- if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, ++ if (mlx5_aso_meter_update_by_wqe(priv, queue, + aso_mtr, &priv->mtr_bulk, job, push)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Unable to update ASO meter WQE"); + /* Wait for ASO object completion. */ + if (queue == MLX5_HW_INV_QUEUE && +- mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) ++ mlx5_aso_mtr_wait(priv, aso_mtr, true)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Unable to wait for ASO meter CQE"); +@@ -10209,11 +10474,12 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, + int ret = 0; + bool push = flow_hw_action_push(attr); + bool aso = false; ++ bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK; + +- if (attr) { ++ if (attr || force_job) { + job = flow_hw_action_job_init(priv, queue, handle, user_data, + NULL, MLX5_HW_Q_JOB_TYPE_UPDATE, +- error); ++ MLX5_HW_INDIRECT_TYPE_LEGACY, error); + if (!job) + return -rte_errno; + } +@@ -10247,7 +10513,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, + "action type not supported"); + break; + } +- if (job) ++ if (job && !force_job) + flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0); + return ret; + } +@@ -10290,11 +10556,12 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, + bool push = flow_hw_action_push(attr); + bool aso = false; + int ret = 0; ++ bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK; + +- if (attr) { ++ if (attr || force_job) { + job = flow_hw_action_job_init(priv, queue, handle, user_data, + NULL, MLX5_HW_Q_JOB_TYPE_DESTROY, +- error); ++ MLX5_HW_INDIRECT_TYPE_LEGACY, error); + if (!job) + return -rte_errno; + } +@@ -10327,7 +10594,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, + fm = &aso_mtr->fm; + fm->is_enable = 0; + /* Update ASO flow meter by wqe. */ +- if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr, ++ if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr, + &priv->mtr_bulk, job, push)) { + ret = -EINVAL; + rte_flow_error_set(error, EINVAL, +@@ -10337,17 +10604,14 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, } -- } else { -- /* -- * rte_flow_op_result::user data can point to -- * struct mlx5_aso_mtr object as well -- */ -- if (queue != CTRL_QUEUE_ID(priv)) -- MLX5_ASSERT(false); + /* Wait for ASO object completion. */ + if (queue == MLX5_HW_INV_QUEUE && +- mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) { ++ mlx5_aso_mtr_wait(priv, aso_mtr, true)) { + ret = -EINVAL; + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "Unable to wait for ASO meter CQE"); + break; + } +- if (!job) +- mlx5_ipool_free(pool->idx_pool, idx); +- else +- aso = true; ++ aso = true; + break; + case MLX5_INDIRECT_ACTION_TYPE_RSS: + ret = flow_dv_action_destroy(dev, handle, error); +@@ -10361,7 +10625,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, + "action type not supported"); + break; } +- if (job) ++ if (job && !force_job) + flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0); + return ret; } - -@@ -4368,12 +4411,23 @@ flow_hw_table_create(struct rte_eth_dev *dev, - matcher_attr.rule.num_log = rte_log2_u32(nb_flows); - /* Parse hints information. */ - if (attr->specialize) { -- if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG) -- matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_WIRE; -- else if (attr->specialize == RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG) -- matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_VPORT; -- else -- DRV_LOG(INFO, "Unsupported hint value %x", attr->specialize); -+ uint32_t val = RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG | -+ RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG; -+ -+ if ((attr->specialize & val) == val) { -+ DRV_LOG(INFO, "Invalid hint value %x", -+ attr->specialize); -+ rte_errno = EINVAL; -+ goto it_error; -+ } -+ if (attr->specialize & -+ RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_WIRE_ORIG) -+ matcher_attr.optimize_flow_src = -+ MLX5DR_MATCHER_FLOW_SRC_WIRE; -+ else if (attr->specialize & -+ RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG) -+ matcher_attr.optimize_flow_src = -+ MLX5DR_MATCHER_FLOW_SRC_VPORT; +@@ -10607,7 +10871,7 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue, + if (attr) { + job = flow_hw_action_job_init(priv, queue, handle, user_data, + data, MLX5_HW_Q_JOB_TYPE_QUERY, +- error); ++ MLX5_HW_INDIRECT_TYPE_LEGACY, error); + if (!job) + return -rte_errno; } - /* Build the item template. */ - for (i = 0; i < nb_item_templates; i++) { -@@ -4623,7 +4677,7 @@ flow_hw_table_destroy(struct rte_eth_dev *dev, - return rte_flow_error_set(error, EBUSY, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, -- "table in use"); -+ "table is in use"); +@@ -10661,7 +10925,7 @@ flow_hw_async_action_handle_query_update + job = flow_hw_action_job_init(priv, queue, handle, user_data, + query, + MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, +- error); ++ MLX5_HW_INDIRECT_TYPE_LEGACY, error); + if (!job) + return -rte_errno; } - LIST_REMOVE(table, next); - for (i = 0; i < table->nb_item_templates; i++) -@@ -4975,15 +5029,17 @@ flow_hw_validate_action_modify_field(struct rte_eth_dev *dev, - ret = flow_validate_modify_field_level(&action_conf->dst, error); - if (ret) - return ret; -- if (action_conf->dst.tag_index && -- !flow_modify_field_support_tag_array(action_conf->dst.field)) -- return rte_flow_error_set(error, EINVAL, -- RTE_FLOW_ERROR_TYPE_ACTION, action, -- "destination tag index is not supported"); -- if (action_conf->dst.class_id) -- return rte_flow_error_set(error, EINVAL, -- RTE_FLOW_ERROR_TYPE_ACTION, action, -- "destination class id is not supported"); -+ if (action_conf->dst.field != RTE_FLOW_FIELD_FLEX_ITEM) { -+ if (action_conf->dst.tag_index && -+ !flow_modify_field_support_tag_array(action_conf->dst.field)) -+ return rte_flow_error_set(error, EINVAL, -+ RTE_FLOW_ERROR_TYPE_ACTION, action, -+ "destination tag index is not supported"); -+ if (action_conf->dst.class_id) -+ return rte_flow_error_set(error, EINVAL, -+ RTE_FLOW_ERROR_TYPE_ACTION, action, -+ "destination class id is not supported"); -+ } - if (mask_conf->dst.level != UINT8_MAX) +@@ -10742,6 +11006,10 @@ flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id, return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, action, -@@ -4998,15 +5054,17 @@ flow_hw_validate_action_modify_field(struct rte_eth_dev *dev, - "destination field mask and template are not equal"); - if (action_conf->src.field != RTE_FLOW_FIELD_POINTER && - action_conf->src.field != RTE_FLOW_FIELD_VALUE) { -- if (action_conf->src.tag_index && -- !flow_modify_field_support_tag_array(action_conf->src.field)) -- return rte_flow_error_set(error, EINVAL, -- RTE_FLOW_ERROR_TYPE_ACTION, action, -- "source tag index is not supported"); -- if (action_conf->src.class_id) -- return rte_flow_error_set(error, EINVAL, -- RTE_FLOW_ERROR_TYPE_ACTION, action, -- "source class id is not supported"); -+ if (action_conf->src.field != RTE_FLOW_FIELD_FLEX_ITEM) { -+ if (action_conf->src.tag_index && -+ !flow_modify_field_support_tag_array(action_conf->src.field)) -+ return rte_flow_error_set(error, EINVAL, -+ RTE_FLOW_ERROR_TYPE_ACTION, action, -+ "source tag index is not supported"); -+ if (action_conf->src.class_id) -+ return rte_flow_error_set(error, EINVAL, -+ RTE_FLOW_ERROR_TYPE_ACTION, action, -+ "source class id is not supported"); -+ } - if (mask_conf->src.level != UINT8_MAX) + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "empty context"); ++ if (!priv->hws_age_req) ++ return rte_flow_error_set(error, ENOENT, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, ++ NULL, "No aging initialized"); + if (priv->hws_strict_queue) { + if (queue_id >= age_info->hw_q_age->nb_rings) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, action, -@@ -5443,6 +5501,69 @@ mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions, - MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP; +@@ -11319,6 +11587,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue, + } + }; + ++ if (!mlx5_hw_ctx_validate(dev, error)) ++ return NULL; + if (!actions) { + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "No action list"); +@@ -11337,7 +11607,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue, + if (attr) { + job = flow_hw_action_job_init(priv, queue, NULL, user_data, + NULL, MLX5_HW_Q_JOB_TYPE_CREATE, +- error); ++ MLX5_HW_INDIRECT_TYPE_LIST, error); + if (!job) + return NULL; + } +@@ -11357,7 +11627,6 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue, + } + if (job) { + job->action = handle; +- job->indirect_type = MLX5_HW_INDIRECT_TYPE_LIST; + flow_hw_action_finalize(dev, queue, job, push, false, + handle != NULL); + } +@@ -11402,7 +11671,7 @@ flow_hw_async_action_list_handle_destroy + if (attr) { + job = flow_hw_action_job_init(priv, queue, NULL, user_data, + NULL, MLX5_HW_Q_JOB_TYPE_DESTROY, +- error); ++ MLX5_HW_INDIRECT_TYPE_LIST, error); + if (!job) + return rte_errno; + } +@@ -11881,8 +12150,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool + proxy_port_id, port_id); + return 0; + } +- if (!proxy_priv->hw_esw_sq_miss_root_tbl || +- !proxy_priv->hw_esw_sq_miss_tbl) { ++ if (!proxy_priv->hw_ctrl_fdb || ++ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl || ++ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) { + DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " + "default flow tables were not created.", + proxy_port_id, port_id); +@@ -11914,7 +12184,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool + actions[2] = (struct rte_flow_action) { + .type = RTE_FLOW_ACTION_TYPE_END, + }; +- ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl, ++ ret = flow_hw_create_ctrl_flow(dev, proxy_dev, ++ proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, + items, 0, actions, 0, &flow_info, external); + if (ret) { + DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d", +@@ -11945,7 +12216,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool + .type = RTE_FLOW_ACTION_TYPE_END, + }; + flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS; +- ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl, ++ ret = flow_hw_create_ctrl_flow(dev, proxy_dev, ++ proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl, + items, 0, actions, 0, &flow_info, external); + if (ret) { + DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d", +@@ -11989,10 +12261,13 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) + } + proxy_dev = &rte_eth_devices[proxy_port_id]; + proxy_priv = proxy_dev->data->dev_private; ++ /* FDB default flow rules must be enabled. */ ++ MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule); + if (!proxy_priv->dr_ctx) + return 0; +- if (!proxy_priv->hw_esw_sq_miss_root_tbl || +- !proxy_priv->hw_esw_sq_miss_tbl) ++ if (!proxy_priv->hw_ctrl_fdb || ++ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl || ++ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) + return 0; + cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows); + while (cf != NULL) { +@@ -12052,6 +12327,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + } + proxy_dev = &rte_eth_devices[proxy_port_id]; + proxy_priv = proxy_dev->data->dev_private; ++ /* FDB default flow rules must be enabled. */ ++ MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule); + if (!proxy_priv->dr_ctx) { + DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured " + "for HWS to create default FDB jump rule. Default rule will " +@@ -12059,7 +12336,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + proxy_port_id, port_id); + return 0; + } +- if (!proxy_priv->hw_esw_zero_tbl) { ++ if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) { + DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " + "default flow tables were not created.", + proxy_port_id, port_id); +@@ -12067,7 +12344,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) + return -rte_errno; + } + return flow_hw_create_ctrl_flow(dev, proxy_dev, +- proxy_priv->hw_esw_zero_tbl, ++ proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl, + items, 0, actions, 0, &flow_info, false); + } + +@@ -12119,10 +12396,12 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev) + }; + + MLX5_ASSERT(priv->master); +- if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl) ++ if (!priv->dr_ctx || ++ !priv->hw_ctrl_fdb || ++ !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl) + return 0; + return flow_hw_create_ctrl_flow(dev, dev, +- priv->hw_tx_meta_cpy_tbl, ++ priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl, + eth_all, 0, copy_reg_action, 0, &flow_info, false); + } + +@@ -12214,11 +12493,11 @@ mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev) + .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX, + }; + +- MLX5_ASSERT(priv->master); +- if (!priv->dr_ctx || !priv->hw_lacp_rx_tbl) ++ if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl) + return 0; +- return flow_hw_create_ctrl_flow(dev, dev, priv->hw_lacp_rx_tbl, eth_lacp, 0, +- miss_action, 0, &flow_info, false); ++ return flow_hw_create_ctrl_flow(dev, dev, ++ priv->hw_ctrl_fdb->hw_lacp_rx_tbl, ++ eth_lacp, 0, miss_action, 0, &flow_info, false); + } + + static uint32_t +@@ -12533,7 +12812,7 @@ mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx; + unsigned int i; +- unsigned int j; ++ int j; + int ret = 0; + + RTE_SET_USED(priv); +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +index 7cbf772ea4..1376533604 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +@@ -100,8 +100,8 @@ mlx5_flow_meter_profile_find(struct mlx5_priv *priv, uint32_t meter_profile_id) + + if (priv->mtr_profile_arr) + return &priv->mtr_profile_arr[meter_profile_id]; +- if (mlx5_l3t_get_entry(priv->mtr_profile_tbl, +- meter_profile_id, &data) || !data.ptr) ++ if (!priv->mtr_profile_tbl || ++ mlx5_l3t_get_entry(priv->mtr_profile_tbl, meter_profile_id, &data) || !data.ptr) + return NULL; + fmp = data.ptr; + /* Remove reference taken by the mlx5_l3t_get_entry. */ +@@ -618,6 +618,7 @@ mlx5_flow_meter_profile_get(struct rte_eth_dev *dev, + meter_profile_id); } -+enum mlx5_hw_indirect_list_relative_position { -+ MLX5_INDIRECT_LIST_POSITION_UNKNOWN = -1, -+ MLX5_INDIRECT_LIST_POSITION_BEFORE_MH = 0, -+ MLX5_INDIRECT_LIST_POSITION_AFTER_MH, -+}; -+ -+static enum mlx5_hw_indirect_list_relative_position -+mlx5_hw_indirect_list_mh_position(const struct rte_flow_action *action) -+{ -+ const struct rte_flow_action_indirect_list *conf = action->conf; -+ enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(conf->handle); -+ enum mlx5_hw_indirect_list_relative_position pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN; -+ const union { -+ struct mlx5_indlst_legacy *legacy; -+ struct mlx5_hw_encap_decap_action *reformat; -+ struct rte_flow_action_list_handle *handle; -+ } h = { .handle = conf->handle}; -+ -+ switch (list_type) { -+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY: -+ switch (h.legacy->legacy_type) { -+ case RTE_FLOW_ACTION_TYPE_AGE: -+ case RTE_FLOW_ACTION_TYPE_COUNT: -+ case RTE_FLOW_ACTION_TYPE_CONNTRACK: -+ case RTE_FLOW_ACTION_TYPE_METER_MARK: -+ case RTE_FLOW_ACTION_TYPE_QUOTA: -+ pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH; -+ break; -+ case RTE_FLOW_ACTION_TYPE_RSS: -+ pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH; -+ break; -+ default: -+ pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN; -+ break; -+ } -+ break; -+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR: -+ pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH; -+ break; -+ case MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT: -+ switch (h.reformat->action_type) { -+ case MLX5DR_ACTION_TYP_REFORMAT_TNL_L2_TO_L2: -+ case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2: -+ pos = MLX5_INDIRECT_LIST_POSITION_BEFORE_MH; -+ break; -+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2: -+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3: -+ pos = MLX5_INDIRECT_LIST_POSITION_AFTER_MH; -+ break; -+ default: -+ pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN; -+ break; -+ } -+ break; -+ default: -+ pos = MLX5_INDIRECT_LIST_POSITION_UNKNOWN; -+ break; -+ } -+ return pos; -+} -+ -+#define MLX5_HW_EXPAND_MH_FAILED 0xffff -+ - static inline uint16_t - flow_hw_template_expand_modify_field(struct rte_flow_action actions[], - struct rte_flow_action masks[], -@@ -5479,6 +5600,7 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[], - * @see action_order_arr[] - */ - for (i = act_num - 2; (int)i >= 0; i--) { -+ enum mlx5_hw_indirect_list_relative_position pos; - enum rte_flow_action_type type = actions[i].type; - uint64_t reformat_type; ++#if defined(HAVE_MLX5_HWS_SUPPORT) + /** + * Callback to add MTR profile with HWS. + * +@@ -697,6 +698,7 @@ mlx5_flow_meter_profile_hws_delete(struct rte_eth_dev *dev, + memset(fmp, 0, sizeof(struct mlx5_flow_meter_profile)); + return 0; + } ++#endif -@@ -5509,6 +5631,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[], - if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP) - i--; - break; -+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST: -+ pos = mlx5_hw_indirect_list_mh_position(&actions[i]); -+ if (pos == MLX5_INDIRECT_LIST_POSITION_UNKNOWN) -+ return MLX5_HW_EXPAND_MH_FAILED; -+ if (pos == MLX5_INDIRECT_LIST_POSITION_BEFORE_MH) -+ goto insert; -+ break; - default: - i++; /* new MF inserted AFTER actions[i] */ - goto insert; -@@ -5639,6 +5768,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, + /** + * Find policy by id. +@@ -839,6 +841,7 @@ mlx5_flow_meter_policy_validate(struct rte_eth_dev *dev, + return 0; + } + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + /** + * Callback to check MTR policy action validate for HWS + * +@@ -875,6 +878,7 @@ mlx5_flow_meter_policy_hws_validate(struct rte_eth_dev *dev, + } + return 0; + } ++#endif + + static int + __mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev, +@@ -1201,6 +1205,7 @@ mlx5_flow_meter_policy_get(struct rte_eth_dev *dev, + &policy_idx); + } + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + /** + * Callback to delete MTR policy for HWS. + * +@@ -1523,7 +1528,7 @@ policy_add_err: + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, "Failed to create meter policy."); + } +- ++#endif + /** + * Check meter validation. + * +@@ -1608,12 +1613,13 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv, + if (sh->meter_aso_en) { + fm->is_enable = !!is_enable; + aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); +- ret = mlx5_aso_meter_update_by_wqe(sh, MLX5_HW_INV_QUEUE, ++ aso_mtr->state = ASO_METER_WAIT; ++ ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, + aso_mtr, &priv->mtr_bulk, + NULL, true); + if (ret) + return ret; +- ret = mlx5_aso_mtr_wait(sh, MLX5_HW_INV_QUEUE, aso_mtr); ++ ret = mlx5_aso_mtr_wait(priv, aso_mtr, false); + if (ret) + return ret; + } else { +@@ -1859,7 +1865,8 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id, + /* If ASO meter supported, update ASO flow meter by wqe. */ + if (priv->sh->meter_aso_en) { + aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); +- ret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, ++ aso_mtr->state = ASO_METER_WAIT; ++ ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, + aso_mtr, &priv->mtr_bulk, NULL, true); + if (ret) + goto error; +@@ -1893,6 +1900,7 @@ error: + NULL, "Failed to create devx meter."); + } + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + /** + * Create meter rules. + * +@@ -1920,6 +1928,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id, + struct mlx5_flow_meter_info *fm; + struct mlx5_flow_meter_policy *policy = NULL; + struct mlx5_aso_mtr *aso_mtr; ++ struct mlx5_hw_q_job *job; int ret; - const struct rte_flow_action_ipv6_ext_remove *remove_data; -+ if (!mlx5_hw_ctx_validate(dev, error)) -+ return -rte_errno; - /* FDB actions are only valid to proxy port. */ - if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master)) - return rte_flow_error_set(error, EINVAL, -@@ -6151,7 +6282,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, - rm[set_vlan_vid_ix].conf)->vlan_vid != 0); - const struct rte_flow_action_of_set_vlan_vid *conf = - ra[set_vlan_vid_ix].conf; -- rte_be16_t vid = masked ? conf->vlan_vid : 0; - int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0, - NULL, &error); - *spec = (typeof(*spec)) { -@@ -6162,8 +6292,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, - }, - .src = { - .field = RTE_FLOW_FIELD_VALUE, -- .level = vid, -- .offset = 0, - }, - .width = width, - }; -@@ -6175,11 +6303,15 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, - }, - .src = { - .field = RTE_FLOW_FIELD_VALUE, -- .level = masked ? (1U << width) - 1 : 0, -- .offset = 0, - }, - .width = 0xffffffff, - }; -+ if (masked) { -+ uint32_t mask_val = 0xffffffff; -+ -+ rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid)); -+ rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val)); + if (!priv->mtr_profile_arr || +@@ -1965,17 +1974,26 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id, + fm->shared = !!shared; + fm->initialized = 1; + /* Update ASO flow meter by wqe. */ +- ret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr, +- &priv->mtr_bulk, NULL, true); +- if (ret) ++ job = mlx5_flow_action_job_init(priv, MLX5_HW_INV_QUEUE, NULL, NULL, ++ NULL, MLX5_HW_Q_JOB_TYPE_CREATE, NULL); ++ if (!job) ++ return -rte_mtr_error_set(error, ENOMEM, ++ RTE_MTR_ERROR_TYPE_MTR_ID, ++ NULL, "No job context."); ++ ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, aso_mtr, ++ &priv->mtr_bulk, job, true); ++ if (ret) { ++ flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv)); + return -rte_mtr_error_set(error, ENOTSUP, +- RTE_MTR_ERROR_TYPE_UNSPECIFIED, +- NULL, "Failed to create devx meter."); ++ RTE_MTR_ERROR_TYPE_UNSPECIFIED, ++ NULL, "Failed to create devx meter."); + } - ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; - ra[set_vlan_vid_ix].conf = spec; - rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; -@@ -6206,8 +6338,6 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, - }, - .src = { - .field = RTE_FLOW_FIELD_VALUE, -- .level = vid, -- .offset = 0, - }, - .width = width, - }; -@@ -6216,6 +6346,7 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, - .conf = &conf - }; + fm->active_state = params->meter_enable; + __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED); + __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED); + return 0; + } ++#endif + + static int + mlx5_flow_meter_params_flush(struct rte_eth_dev *dev, +@@ -2460,6 +2478,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = { + .stats_read = mlx5_flow_meter_stats_read, + }; + ++#if defined(HAVE_MLX5_HWS_SUPPORT) + static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { + .capabilities_get = mlx5_flow_mtr_cap_get, + .meter_profile_add = mlx5_flow_meter_profile_hws_add, +@@ -2478,6 +2497,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { + .stats_update = NULL, + .stats_read = NULL, + }; ++#endif + + /** + * Get meter operations. +@@ -2493,12 +2513,16 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { + int + mlx5_flow_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) + { ++#if defined(HAVE_MLX5_HWS_SUPPORT) + struct mlx5_priv *priv = dev->data->dev_private; -+ rte_memcpy(conf.src.value, &vid, sizeof(vid)); - return flow_hw_modify_field_construct(job, act_data, hw_acts, - &modify_action); + if (priv->sh->config.dv_flow_en == 2) + *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_hws_ops; + else + *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; ++#else ++ *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; ++#endif + return 0; } -@@ -6463,6 +6594,12 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev, - action_flags, - act_num, - expand_mf_num); -+ if (pos == MLX5_HW_EXPAND_MH_FAILED) { -+ rte_flow_error_set(error, ENOMEM, -+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, -+ NULL, "modify header expansion failed"); -+ return NULL; -+ } - act_num += expand_mf_num; - for (i = pos + expand_mf_num; i < act_num; i++) - src_off[i] += expand_mf_num; -@@ -6585,7 +6722,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev, - return rte_flow_error_set(error, EBUSY, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, -- "action template in using"); -+ "action template is in use"); - } - if (template->action_flags & flag) - mlx5_free_srh_flex_parser(dev); -@@ -6645,6 +6782,8 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, - bool items_end = false; - uint32_t tag_bitmap = 0; -+ if (!mlx5_hw_ctx_validate(dev, error)) -+ return -rte_errno; - if (!attr->ingress && !attr->egress && !attr->transfer) - return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL, - "at least one of the direction attributes" -@@ -7003,7 +7142,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev, - return rte_flow_error_set(error, EBUSY, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, -- "item template in using"); -+ "item template is in use"); +@@ -2614,7 +2638,7 @@ mlx5_flow_meter_attach(struct mlx5_priv *priv, + struct mlx5_aso_mtr *aso_mtr; + + aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); +- if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) { ++ if (mlx5_aso_mtr_wait(priv, aso_mtr, false)) { + return rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, +@@ -2877,7 +2901,6 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) + struct mlx5_flow_meter_profile *fmp; + struct mlx5_legacy_flow_meter *legacy_fm; + struct mlx5_flow_meter_info *fm; +- struct mlx5_flow_meter_policy *policy; + struct mlx5_flow_meter_sub_policy *sub_policy; + void *tmp; + uint32_t i, mtr_idx, policy_idx; +@@ -2945,15 +2968,20 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) + mlx5_l3t_destroy(priv->policy_idx_tbl); + priv->policy_idx_tbl = NULL; } - if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT | - MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) -@@ -8366,6 +8505,72 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, - return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); ++#if defined(HAVE_MLX5_HWS_SUPPORT) + if (priv->mtr_policy_arr) { ++ struct mlx5_flow_meter_policy *policy; ++ + for (i = 0; i < priv->mtr_config.nb_meter_policies; i++) { + policy = mlx5_flow_meter_policy_find(dev, i, + &policy_idx); +- if (policy->initialized) ++ if (policy->initialized) { + mlx5_flow_meter_policy_hws_delete(dev, i, + error); ++ } + } + } ++#endif + if (priv->mtr_profile_tbl) { + MLX5_L3T_FOREACH(priv->mtr_profile_tbl, i, entry) { + fmp = entry; +@@ -2967,14 +2995,17 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) + mlx5_l3t_destroy(priv->mtr_profile_tbl); + priv->mtr_profile_tbl = NULL; + } ++#if defined(HAVE_MLX5_HWS_SUPPORT) + if (priv->mtr_profile_arr) { + for (i = 0; i < priv->mtr_config.nb_meter_profiles; i++) { + fmp = mlx5_flow_meter_profile_find(priv, i); +- if (fmp->initialized) ++ if (fmp->initialized) { + mlx5_flow_meter_profile_hws_delete(dev, i, + error); ++ } + } + } ++#endif + /* Delete default policy table. */ + mlx5_flow_destroy_def_policy(dev); + if (priv->sh->refcnt == 1) +diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c +index a3bea94811..7a88a4001a 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c ++++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c +@@ -56,26 +56,29 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh, + uint32_t ret __rte_unused; + + reset_cnt_num = rte_ring_count(reset_list); +- do { +- cpool->query_gen++; +- mlx5_aso_cnt_query(sh, cpool); +- zcdr.n1 = 0; +- zcdu.n1 = 0; +- ret = rte_ring_enqueue_zc_burst_elem_start(reuse_list, +- sizeof(cnt_id_t), +- reset_cnt_num, &zcdu, +- NULL); +- MLX5_ASSERT(ret == reset_cnt_num); +- ret = rte_ring_dequeue_zc_burst_elem_start(reset_list, +- sizeof(cnt_id_t), +- reset_cnt_num, &zcdr, +- NULL); +- MLX5_ASSERT(ret == reset_cnt_num); +- __hws_cnt_r2rcpy(&zcdu, &zcdr, reset_cnt_num); +- rte_ring_dequeue_zc_elem_finish(reset_list, reset_cnt_num); +- rte_ring_enqueue_zc_elem_finish(reuse_list, reset_cnt_num); ++ cpool->query_gen++; ++ mlx5_aso_cnt_query(sh, cpool); ++ zcdr.n1 = 0; ++ zcdu.n1 = 0; ++ ret = rte_ring_enqueue_zc_burst_elem_start(reuse_list, ++ sizeof(cnt_id_t), ++ reset_cnt_num, &zcdu, ++ NULL); ++ MLX5_ASSERT(ret == reset_cnt_num); ++ ret = rte_ring_dequeue_zc_burst_elem_start(reset_list, ++ sizeof(cnt_id_t), ++ reset_cnt_num, &zcdr, ++ NULL); ++ MLX5_ASSERT(ret == reset_cnt_num); ++ __hws_cnt_r2rcpy(&zcdu, &zcdr, reset_cnt_num); ++ rte_ring_dequeue_zc_elem_finish(reset_list, reset_cnt_num); ++ rte_ring_enqueue_zc_elem_finish(reuse_list, reset_cnt_num); ++ ++ if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) { + reset_cnt_num = rte_ring_count(reset_list); +- } while (reset_cnt_num > 0); ++ DRV_LOG(DEBUG, "ibdev %s cpool %p wait_reset_cnt=%" PRIu32, ++ sh->ibdev_name, (void *)cpool, reset_cnt_num); ++ } } -+/** -+ * Cleans up all template tables and pattern, and actions templates used for -+ * FDB control flow rules. -+ * -+ * @param dev -+ * Pointer to Ethernet device. -+ */ -+static void -+flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev) + /** +@@ -315,6 +318,11 @@ mlx5_hws_cnt_svc(void *opaque) + rte_spinlock_unlock(&sh->cpool_lock); + query_us = query_cycle / (rte_get_timer_hz() / US_PER_S); + sleep_us = interval - query_us; ++ DRV_LOG(DEBUG, "ibdev %s counter service thread: " ++ "interval_us=%" PRIu64 " query_us=%" PRIu64 " " ++ "sleep_us=%" PRIu64, ++ sh->ibdev_name, interval, query_us, ++ interval > query_us ? sleep_us : 0); + if (interval > query_us) + rte_delay_us_sleep(sleep_us); + } +@@ -340,6 +348,55 @@ mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool * const cntp) + mlx5_free(cntp); + } + ++static bool ++mlx5_hws_cnt_should_enable_cache(const struct mlx5_hws_cnt_pool_cfg *pcfg, ++ const struct mlx5_hws_cache_param *ccfg) +{ -+ struct mlx5_priv *priv = dev->data->dev_private; -+ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; ++ /* ++ * Enable cache if and only if there are enough counters requested ++ * to populate all of the caches. ++ */ ++ return pcfg->request_num >= ccfg->q_num * ccfg->size; ++} + -+ if (!priv->hw_ctrl_fdb) -+ return; -+ hw_ctrl_fdb = priv->hw_ctrl_fdb; -+ /* Clean up templates used for LACP default miss table. */ -+ if (hw_ctrl_fdb->hw_lacp_rx_tbl) -+ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL)); -+ if (hw_ctrl_fdb->lacp_rx_actions_tmpl) -+ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl, -+ NULL)); -+ if (hw_ctrl_fdb->lacp_rx_items_tmpl) -+ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl, -+ NULL)); -+ /* Clean up templates used for default Tx metadata copy. */ -+ if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl) -+ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL)); -+ if (hw_ctrl_fdb->tx_meta_actions_tmpl) -+ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl, -+ NULL)); -+ if (hw_ctrl_fdb->tx_meta_items_tmpl) -+ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl, -+ NULL)); -+ /* Clean up templates used for default FDB jump rule. */ -+ if (hw_ctrl_fdb->hw_esw_zero_tbl) -+ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL)); -+ if (hw_ctrl_fdb->jump_one_actions_tmpl) -+ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl, -+ NULL)); -+ if (hw_ctrl_fdb->port_items_tmpl) -+ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl, -+ NULL)); -+ /* Clean up templates used for default SQ miss flow rules - non-root table. */ -+ if (hw_ctrl_fdb->hw_esw_sq_miss_tbl) -+ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL)); -+ if (hw_ctrl_fdb->regc_sq_items_tmpl) -+ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl, -+ NULL)); -+ if (hw_ctrl_fdb->port_actions_tmpl) -+ claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl, -+ NULL)); -+ /* Clean up templates used for default SQ miss flow rules - root table. */ -+ if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) -+ claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL)); -+ if (hw_ctrl_fdb->regc_jump_actions_tmpl) -+ claim_zero(flow_hw_actions_template_destroy(dev, -+ hw_ctrl_fdb->regc_jump_actions_tmpl, NULL)); -+ if (hw_ctrl_fdb->esw_mgr_items_tmpl) -+ claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl, -+ NULL)); -+ /* Clean up templates structure for FDB control flow rules. */ -+ mlx5_free(hw_ctrl_fdb); -+ priv->hw_ctrl_fdb = NULL; ++static struct mlx5_hws_cnt_pool_caches * ++mlx5_hws_cnt_cache_init(const struct mlx5_hws_cnt_pool_cfg *pcfg, ++ const struct mlx5_hws_cache_param *ccfg) ++{ ++ struct mlx5_hws_cnt_pool_caches *cache; ++ char mz_name[RTE_MEMZONE_NAMESIZE]; ++ uint32_t qidx; ++ ++ /* If counter pool is big enough, setup the counter pool cache. */ ++ cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, ++ sizeof(*cache) + ++ sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0]) ++ * ccfg->q_num, 0, SOCKET_ID_ANY); ++ if (cache == NULL) ++ return NULL; ++ /* Store the necessary cache parameters. */ ++ cache->fetch_sz = ccfg->fetch_sz; ++ cache->preload_sz = ccfg->preload_sz; ++ cache->threshold = ccfg->threshold; ++ cache->q_num = ccfg->q_num; ++ for (qidx = 0; qidx < ccfg->q_num; qidx++) { ++ snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); ++ cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, ++ SOCKET_ID_ANY, ++ RING_F_SP_ENQ | RING_F_SC_DEQ | ++ RING_F_EXACT_SZ); ++ if (cache->qcache[qidx] == NULL) ++ goto error; ++ } ++ return cache; ++ ++error: ++ while (qidx--) ++ rte_ring_free(cache->qcache[qidx]); ++ mlx5_free(cache); ++ return NULL; +} + - /* - * Create a table on the root group to for the LACP traffic redirecting. + static struct mlx5_hws_cnt_pool * + mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + const struct mlx5_hws_cnt_pool_cfg *pcfg, +@@ -348,7 +405,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + char mz_name[RTE_MEMZONE_NAMESIZE]; + struct mlx5_hws_cnt_pool *cntp; + uint64_t cnt_num = 0; +- uint32_t qidx; + + MLX5_ASSERT(pcfg); + MLX5_ASSERT(ccfg); +@@ -360,17 +416,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + cntp->cfg = *pcfg; + if (cntp->cfg.host_cpool) + return cntp; +- cntp->cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, +- sizeof(*cntp->cache) + +- sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0]) +- * ccfg->q_num, 0, SOCKET_ID_ANY); +- if (cntp->cache == NULL) +- goto error; +- /* store the necessary cache parameters. */ +- cntp->cache->fetch_sz = ccfg->fetch_sz; +- cntp->cache->preload_sz = ccfg->preload_sz; +- cntp->cache->threshold = ccfg->threshold; +- cntp->cache->q_num = ccfg->q_num; + if (pcfg->request_num > sh->hws_max_nb_counters) { + DRV_LOG(ERR, "Counter number %u " + "is greater than the maximum supported (%u).", +@@ -418,13 +463,10 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, + DRV_LOG(ERR, "failed to create reuse list ring"); + goto error; + } +- for (qidx = 0; qidx < ccfg->q_num; qidx++) { +- snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); +- cntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, +- SOCKET_ID_ANY, +- RING_F_SP_ENQ | RING_F_SC_DEQ | +- RING_F_EXACT_SZ); +- if (cntp->cache->qcache[qidx] == NULL) ++ /* Allocate counter cache only if needed. */ ++ if (mlx5_hws_cnt_should_enable_cache(pcfg, ccfg)) { ++ cntp->cache = mlx5_hws_cnt_cache_init(pcfg, ccfg); ++ if (cntp->cache == NULL) + goto error; + } + /* Initialize the time for aging-out calculation. */ +@@ -685,7 +727,9 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, + * Maybe blocked for at most 200ms here. + */ + rte_spinlock_lock(&sh->cpool_lock); +- LIST_REMOVE(cpool, next); ++ /* Try to remove cpool before it was added to list caused segfault. */ ++ if (!LIST_EMPTY(&sh->hws_cpool_list) && cpool->next.le_prev) ++ LIST_REMOVE(cpool, next); + rte_spinlock_unlock(&sh->cpool_lock); + if (cpool->cfg.host_cpool == NULL) { + if (--sh->cnt_svc->refcnt == 0) +diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h +index 585b5a83ad..e00596088f 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h ++++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h +@@ -557,19 +557,32 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue, + } + + /** +- * Check if counter pool allocated for HWS is shared between ports. ++ * Decide if the given queue can be used to perform counter allocation/deallcation ++ * based on counter configuration + * + * @param[in] priv + * Pointer to the port private data structure. ++ * @param[in] queue ++ * Pointer to the queue index. * -@@ -8415,182 +8620,154 @@ flow_hw_create_lacp_rx_table(struct rte_eth_dev *dev, * @return - * 0 on success, negative values otherwise +- * True if counter pools is shared between ports. False otherwise. ++ * @p queue if cache related to the queue can be used. NULL otherwise. */ --static __rte_unused int -+static int - flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error) +-static __rte_always_inline bool +-mlx5_hws_cnt_is_pool_shared(struct mlx5_priv *priv) ++static __rte_always_inline uint32_t * ++mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue) { - struct mlx5_priv *priv = dev->data->dev_private; -- struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL; -- struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL; -- struct rte_flow_pattern_template *port_items_tmpl = NULL; -- struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL; -- struct rte_flow_pattern_template *lacp_rx_items_tmpl = NULL; -- struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL; -- struct rte_flow_actions_template *port_actions_tmpl = NULL; -- struct rte_flow_actions_template *jump_one_actions_tmpl = NULL; -- struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL; -- struct rte_flow_actions_template *lacp_rx_actions_tmpl = NULL; -+ struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; - uint32_t xmeta = priv->sh->config.dv_xmeta_en; - uint32_t repr_matching = priv->sh->config.repr_matching; -- int ret; -+ uint32_t fdb_def_rule = priv->sh->config.fdb_def_rule; +- return priv && priv->hws_cpool && +- (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL); ++ if (priv && priv->hws_cpool) { ++ /* Do not use queue cache if counter pool is shared. */ ++ if (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL) ++ return NULL; ++ /* Do not use queue cache if counter cache is disabled. */ ++ if (priv->hws_cpool->cache == NULL) ++ return NULL; ++ return queue; ++ } ++ /* This case should not be reached if counter pool was successfully configured. */ ++ MLX5_ASSERT(false); ++ return NULL; + } -- /* Create templates and table for default SQ miss flow rules - root table. */ -- esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); -- if (!esw_mgr_items_tmpl) { -- DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" -- " template for control flows", dev->data->port_id); -- goto err; -- } -- regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev, error); -- if (!regc_jump_actions_tmpl) { -- DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" -- " for control flows", dev->data->port_id); -- goto err; -- } -- MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL); -- priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table -- (dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl, error); -- if (!priv->hw_esw_sq_miss_root_tbl) { -- DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" -- " for control flows", dev->data->port_id); -- goto err; -- } -- /* Create templates and table for default SQ miss flow rules - non-root table. */ -- regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); -- if (!regc_sq_items_tmpl) { -- DRV_LOG(ERR, "port %u failed to create SQ item template for" -- " control flows", dev->data->port_id); -- goto err; -- } -- port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error); -- if (!port_actions_tmpl) { -- DRV_LOG(ERR, "port %u failed to create port action template" -- " for control flows", dev->data->port_id); -- goto err; -- } -- MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL); -- priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl, -- port_actions_tmpl, error); -- if (!priv->hw_esw_sq_miss_tbl) { -- DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" -- " for control flows", dev->data->port_id); -- goto err; -- } -- /* Create templates and table for default FDB jump flow rules. */ -- port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error); -- if (!port_items_tmpl) { -- DRV_LOG(ERR, "port %u failed to create SQ item template for" -- " control flows", dev->data->port_id); -- goto err; -- } -- jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template -- (dev, MLX5_HW_LOWEST_USABLE_GROUP, error); -- if (!jump_one_actions_tmpl) { -- DRV_LOG(ERR, "port %u failed to create jump action template" -- " for control flows", dev->data->port_id); -+ MLX5_ASSERT(priv->hw_ctrl_fdb == NULL); -+ hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY); -+ if (!hw_ctrl_fdb) { -+ DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates", -+ dev->data->port_id); -+ rte_errno = ENOMEM; - goto err; - } -- MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL); -- priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl, -- jump_one_actions_tmpl, -- error); -- if (!priv->hw_esw_zero_tbl) { -- DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" -- " for control flows", dev->data->port_id); -- goto err; -+ priv->hw_ctrl_fdb = hw_ctrl_fdb; -+ if (fdb_def_rule) { -+ /* Create templates and table for default SQ miss flow rules - root table. */ -+ hw_ctrl_fdb->esw_mgr_items_tmpl = -+ flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); -+ if (!hw_ctrl_fdb->esw_mgr_items_tmpl) { -+ DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" -+ " template for control flows", dev->data->port_id); -+ goto err; -+ } -+ hw_ctrl_fdb->regc_jump_actions_tmpl = -+ flow_hw_create_ctrl_regc_jump_actions_template(dev, error); -+ if (!hw_ctrl_fdb->regc_jump_actions_tmpl) { -+ DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" -+ " for control flows", dev->data->port_id); -+ goto err; -+ } -+ hw_ctrl_fdb->hw_esw_sq_miss_root_tbl = -+ flow_hw_create_ctrl_sq_miss_root_table -+ (dev, hw_ctrl_fdb->esw_mgr_items_tmpl, -+ hw_ctrl_fdb->regc_jump_actions_tmpl, error); -+ if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) { -+ DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" -+ " for control flows", dev->data->port_id); -+ goto err; -+ } -+ /* Create templates and table for default SQ miss flow rules - non-root table. */ -+ hw_ctrl_fdb->regc_sq_items_tmpl = -+ flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); -+ if (!hw_ctrl_fdb->regc_sq_items_tmpl) { -+ DRV_LOG(ERR, "port %u failed to create SQ item template for" -+ " control flows", dev->data->port_id); -+ goto err; -+ } -+ hw_ctrl_fdb->port_actions_tmpl = -+ flow_hw_create_ctrl_port_actions_template(dev, error); -+ if (!hw_ctrl_fdb->port_actions_tmpl) { -+ DRV_LOG(ERR, "port %u failed to create port action template" -+ " for control flows", dev->data->port_id); -+ goto err; -+ } -+ hw_ctrl_fdb->hw_esw_sq_miss_tbl = -+ flow_hw_create_ctrl_sq_miss_table -+ (dev, hw_ctrl_fdb->regc_sq_items_tmpl, -+ hw_ctrl_fdb->port_actions_tmpl, error); -+ if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) { -+ DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" -+ " for control flows", dev->data->port_id); -+ goto err; -+ } -+ /* Create templates and table for default FDB jump flow rules. */ -+ hw_ctrl_fdb->port_items_tmpl = -+ flow_hw_create_ctrl_port_pattern_template(dev, error); -+ if (!hw_ctrl_fdb->port_items_tmpl) { -+ DRV_LOG(ERR, "port %u failed to create SQ item template for" -+ " control flows", dev->data->port_id); -+ goto err; -+ } -+ hw_ctrl_fdb->jump_one_actions_tmpl = -+ flow_hw_create_ctrl_jump_actions_template -+ (dev, MLX5_HW_LOWEST_USABLE_GROUP, error); -+ if (!hw_ctrl_fdb->jump_one_actions_tmpl) { -+ DRV_LOG(ERR, "port %u failed to create jump action template" -+ " for control flows", dev->data->port_id); -+ goto err; -+ } -+ hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table -+ (dev, hw_ctrl_fdb->port_items_tmpl, -+ hw_ctrl_fdb->jump_one_actions_tmpl, error); -+ if (!hw_ctrl_fdb->hw_esw_zero_tbl) { -+ DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" -+ " for control flows", dev->data->port_id); -+ goto err; -+ } - } - /* Create templates and table for default Tx metadata copy flow rule. */ - if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) { -- tx_meta_items_tmpl = -+ hw_ctrl_fdb->tx_meta_items_tmpl = - flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error); -- if (!tx_meta_items_tmpl) { -+ if (!hw_ctrl_fdb->tx_meta_items_tmpl) { - DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern" - " template for control flows", dev->data->port_id); - goto err; - } -- tx_meta_actions_tmpl = -+ hw_ctrl_fdb->tx_meta_actions_tmpl = - flow_hw_create_tx_default_mreg_copy_actions_template(dev, error); -- if (!tx_meta_actions_tmpl) { -+ if (!hw_ctrl_fdb->tx_meta_actions_tmpl) { - DRV_LOG(ERR, "port %u failed to Tx metadata copy actions" - " template for control flows", dev->data->port_id); - goto err; + static __rte_always_inline unsigned int +diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c +index 5bf1a679b2..86a7e090a1 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rx.c ++++ b/dpdk/drivers/net/mlx5/mlx5_rx.c +@@ -459,7 +459,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + union { + volatile struct mlx5_cqe *cqe; +- volatile struct mlx5_err_cqe *err_cqe; ++ volatile struct mlx5_error_cqe *err_cqe; + } u = { + .cqe = &(*rxq->cqes)[(rxq->cq_ci - vec) & cqe_mask], + }; +@@ -613,7 +613,8 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, + * @param mprq + * Indication if it is called from MPRQ. + * @return +- * 0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, ++ * 0 in case of empty CQE, ++ * MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, + * MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset, + * otherwise the packet size in regular RxQ, + * and striding byte count format in mprq case. +@@ -697,6 +698,11 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, + if (ret == MLX5_RECOVERY_ERROR_RET || + ret == MLX5_RECOVERY_COMPLETED_RET) + return MLX5_CRITICAL_ERROR_CQE_RET; ++ if (!mprq && ret == MLX5_RECOVERY_IGNORE_RET) { ++ *skip_cnt = 1; ++ ++rxq->cq_ci; ++ return MLX5_ERROR_CQE_MASK; ++ } + } else { + return 0; + } +@@ -971,19 +977,18 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; + len = mlx5_rx_poll_len(rxq, cqe, cqe_n, cqe_mask, &mcqe, &skip_cnt, false); + if (unlikely(len & MLX5_ERROR_CQE_MASK)) { ++ /* We drop packets with non-critical errors */ ++ rte_mbuf_raw_free(rep); + if (len == MLX5_CRITICAL_ERROR_CQE_RET) { +- rte_mbuf_raw_free(rep); + rq_ci = rxq->rq_ci << sges_n; + break; + } ++ /* Skip specified amount of error CQEs packets */ + rq_ci >>= sges_n; + rq_ci += skip_cnt; + rq_ci <<= sges_n; +- idx = rq_ci & wqe_mask; +- wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; +- seg = (*rxq->elts)[idx]; +- cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; +- len = len & ~MLX5_ERROR_CQE_MASK; ++ MLX5_ASSERT(!pkt); ++ continue; + } + if (len == 0) { + rte_mbuf_raw_free(rep); +diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.h b/dpdk/drivers/net/mlx5/mlx5_rx.h +index 2fce908499..db912adf2a 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rx.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rx.h +@@ -101,14 +101,14 @@ struct mlx5_rxq_data { + unsigned int shared:1; /* Shared RXQ. */ + unsigned int delay_drop:1; /* Enable delay drop. */ + unsigned int cqe_comp_layout:1; /* CQE Compression Layout*/ +- unsigned int cq_ci:24; ++ uint16_t port_id; + volatile uint32_t *rq_db; + volatile uint32_t *cq_db; +- uint16_t port_id; + uint32_t elts_ci; + uint32_t rq_ci; + uint16_t consumed_strd; /* Number of consumed strides in WQE. */ + uint32_t rq_pi; ++ uint32_t cq_ci:24; + uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ + uint32_t byte_mask; + union { +@@ -151,13 +151,13 @@ struct mlx5_rxq_data { + /* RX queue control descriptor. */ + struct mlx5_rxq_ctrl { + struct mlx5_rxq_data rxq; /* Data path structure. */ +- LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ + LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */ + struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */ + struct mlx5_dev_ctx_shared *sh; /* Shared context. */ + bool is_hairpin; /* Whether RxQ type is Hairpin. */ + unsigned int socket; /* CPU socket ID for allocations. */ + LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */ ++ RTE_ATOMIC(int32_t) ctrl_ref; /* Reference counter. */ + uint32_t share_group; /* Group ID of shared RXQ. */ + uint16_t share_qid; /* Shared RxQ ID in group. */ + unsigned int started:1; /* Whether (shared) RXQ has been started. */ +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c +index 1bb036afeb..dccfc4eb36 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxq.c ++++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c +@@ -655,6 +655,14 @@ mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc, + struct mlx5_rxq_priv *rxq; + bool empty; + ++ if (*desc > 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz) { ++ DRV_LOG(ERR, ++ "port %u number of descriptors requested for Rx queue" ++ " %u is more than supported", ++ dev->data->port_id, idx); ++ rte_errno = EINVAL; ++ return -EINVAL; ++ } + if (!rte_is_power_of_2(*desc)) { + *desc = 1 << log2above(*desc); + DRV_LOG(WARNING, +@@ -945,6 +953,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + /* Join owner list. */ + LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry); + rxq->ctrl = rxq_ctrl; ++ rte_atomic_fetch_add_explicit(&rxq_ctrl->ctrl_ref, 1, rte_memory_order_relaxed); + mlx5_rxq_ref(dev, idx); + DRV_LOG(DEBUG, "port %u adding Rx queue %u to list", + dev->data->port_id, idx); +@@ -1962,9 +1971,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + tmpl->rxq.shared = 1; + tmpl->share_group = conf->share_group; + tmpl->share_qid = conf->share_qid; +- LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry); + } +- LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); ++ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry); ++ rte_atomic_store_explicit(&tmpl->ctrl_ref, 1, rte_memory_order_relaxed); + return tmpl; + error: + mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh); +@@ -2017,7 +2026,8 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, + tmpl->rxq.idx = idx; + rxq->hairpin_conf = *hairpin_conf; + mlx5_rxq_ref(dev, idx); +- LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); ++ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry); ++ rte_atomic_store_explicit(&tmpl->ctrl_ref, 1, rte_memory_order_relaxed); + return tmpl; + } + +@@ -2259,6 +2269,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) + struct mlx5_rxq_priv *rxq; + struct mlx5_rxq_ctrl *rxq_ctrl; + uint32_t refcnt; ++ int32_t ctrl_ref; + + if (priv->rxq_privs == NULL) + return 0; +@@ -2283,14 +2294,14 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) + RTE_ETH_QUEUE_STATE_STOPPED; } -- MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL); -- priv->hw_tx_meta_cpy_tbl = -- flow_hw_create_tx_default_mreg_copy_table(dev, tx_meta_items_tmpl, -- tx_meta_actions_tmpl, error); -- if (!priv->hw_tx_meta_cpy_tbl) { -+ hw_ctrl_fdb->hw_tx_meta_cpy_tbl = -+ flow_hw_create_tx_default_mreg_copy_table -+ (dev, hw_ctrl_fdb->tx_meta_items_tmpl, -+ hw_ctrl_fdb->tx_meta_actions_tmpl, error); -+ if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) { - DRV_LOG(ERR, "port %u failed to create table for default" - " Tx metadata copy flow rule", dev->data->port_id); - goto err; + } else { /* Refcnt zero, closing device. */ +- LIST_REMOVE(rxq_ctrl, next); + LIST_REMOVE(rxq, owner_entry); +- if (LIST_EMPTY(&rxq_ctrl->owners)) { ++ ctrl_ref = rte_atomic_fetch_sub_explicit(&rxq_ctrl->ctrl_ref, 1, ++ rte_memory_order_relaxed) - 1; ++ if (ctrl_ref == 1 && LIST_EMPTY(&rxq_ctrl->owners)) { + if (!rxq_ctrl->is_hairpin) + mlx5_mr_btree_free + (&rxq_ctrl->rxq.mr_ctrl.cache_bh); +- if (rxq_ctrl->rxq.shared) +- LIST_REMOVE(rxq_ctrl, share_entry); ++ LIST_REMOVE(rxq_ctrl, share_entry); + mlx5_free(rxq_ctrl); } - } - /* Create LACP default miss table. */ -- if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { -- lacp_rx_items_tmpl = flow_hw_create_lacp_rx_pattern_template(dev, error); -- if (!lacp_rx_items_tmpl) { -+ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { -+ hw_ctrl_fdb->lacp_rx_items_tmpl = -+ flow_hw_create_lacp_rx_pattern_template(dev, error); -+ if (!hw_ctrl_fdb->lacp_rx_items_tmpl) { - DRV_LOG(ERR, "port %u failed to create pattern template" - " for LACP Rx traffic", dev->data->port_id); - goto err; + dev->data->rx_queues[idx] = NULL; +@@ -2316,7 +2327,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev) + struct mlx5_rxq_ctrl *rxq_ctrl; + int ret = 0; + +- LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { ++ LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) { + DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", + dev->data->port_id, rxq_ctrl->rxq.idx); + ++ret; +@@ -2884,6 +2895,7 @@ static void + __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) + { + struct mlx5_priv *priv = dev->data->dev_private; ++ bool deref_rxqs = true; + + #ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (hrxq->hws_flags) +@@ -2893,9 +2905,10 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) + #endif + priv->obj_ops.hrxq_destroy(hrxq); + if (!hrxq->standalone) { +- mlx5_ind_table_obj_release(dev, hrxq->ind_table, +- hrxq->hws_flags ? +- (!!dev->data->dev_started) : true); ++ if (!dev->data->dev_started && hrxq->hws_flags && ++ !priv->hws_rule_flushing) ++ deref_rxqs = false; ++ mlx5_ind_table_obj_release(dev, hrxq->ind_table, deref_rxqs); + } + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); + } +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h +index cccfa7f2d3..efe0db4ca5 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h +@@ -96,8 +96,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + 11, 10, 9, 8}; /* bswap32, rss */ + /* Restore the compressed count. Must be 16 bits. */ + uint16_t mcqe_n = (rxq->cqe_comp_layout) ? +- (MLX5_CQE_NUM_MINIS(cq->op_own) + 1) : +- t_pkt->data_len + (rxq->crc_present * RTE_ETHER_CRC_LEN); ++ (MLX5_CQE_NUM_MINIS(cq->op_own) + 1U) : rte_be_to_cpu_32(cq->byte_cnt); + uint16_t pkts_n = mcqe_n; + const __vector unsigned char rearm = + (__vector unsigned char)vec_vsx_ld(0, +@@ -1249,9 +1248,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); + if (unlikely(rxq->shared)) { + pkts[pos]->port = cq[pos].user_index_low; +- pkts[pos + p1]->port = cq[pos + p1].user_index_low; +- pkts[pos + p2]->port = cq[pos + p2].user_index_low; +- pkts[pos + p3]->port = cq[pos + p3].user_index_low; ++ pkts[pos + 1]->port = cq[pos + p1].user_index_low; ++ pkts[pos + 2]->port = cq[pos + p2].user_index_low; ++ pkts[pos + 3]->port = cq[pos + p3].user_index_low; } -- lacp_rx_actions_tmpl = flow_hw_create_lacp_rx_actions_template(dev, error); -- if (!lacp_rx_actions_tmpl) { -+ hw_ctrl_fdb->lacp_rx_actions_tmpl = -+ flow_hw_create_lacp_rx_actions_template(dev, error); -+ if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) { - DRV_LOG(ERR, "port %u failed to create actions template" - " for LACP Rx traffic", dev->data->port_id); - goto err; + if (rxq->hw_timestamp) { + int offset = rxq->timestamp_offset; +@@ -1295,17 +1294,17 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + metadata; + pkts[pos]->ol_flags |= metadata ? flag : 0ULL; + metadata = rte_be_to_cpu_32 +- (cq[pos + 1].flow_table_metadata) & mask; ++ (cq[pos + p1].flow_table_metadata) & mask; + *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) = + metadata; + pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL; + metadata = rte_be_to_cpu_32 +- (cq[pos + 2].flow_table_metadata) & mask; ++ (cq[pos + p2].flow_table_metadata) & mask; + *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) = + metadata; + pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL; + metadata = rte_be_to_cpu_32 +- (cq[pos + 3].flow_table_metadata) & mask; ++ (cq[pos + p3].flow_table_metadata) & mask; + *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) = + metadata; + pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL; +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +index 3ed688191f..02817a9645 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +@@ -95,8 +95,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + }; + /* Restore the compressed count. Must be 16 bits. */ + uint16_t mcqe_n = (rxq->cqe_comp_layout) ? +- (MLX5_CQE_NUM_MINIS(cq->op_own) + 1) : +- t_pkt->data_len + (rxq->crc_present * RTE_ETHER_CRC_LEN); ++ (MLX5_CQE_NUM_MINIS(cq->op_own) + 1U) : rte_be_to_cpu_32(cq->byte_cnt); + uint16_t pkts_n = mcqe_n; + const uint64x2_t rearm = + vld1q_u64((void *)&t_pkt->rearm_data); +@@ -835,13 +834,13 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag, + opcode, &elts[pos]); + if (unlikely(rxq->shared)) { +- elts[pos]->port = container_of(p0, struct mlx5_cqe, ++ pkts[pos]->port = container_of(p0, struct mlx5_cqe, + pkt_info)->user_index_low; +- elts[pos + 1]->port = container_of(p1, struct mlx5_cqe, ++ pkts[pos + 1]->port = container_of(p1, struct mlx5_cqe, + pkt_info)->user_index_low; +- elts[pos + 2]->port = container_of(p2, struct mlx5_cqe, ++ pkts[pos + 2]->port = container_of(p2, struct mlx5_cqe, + pkt_info)->user_index_low; +- elts[pos + 3]->port = container_of(p3, struct mlx5_cqe, ++ pkts[pos + 3]->port = container_of(p3, struct mlx5_cqe, + pkt_info)->user_index_low; } -- priv->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table(dev, lacp_rx_items_tmpl, -- lacp_rx_actions_tmpl, error); -- if (!priv->hw_lacp_rx_tbl) { -+ hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table -+ (dev, hw_ctrl_fdb->lacp_rx_items_tmpl, -+ hw_ctrl_fdb->lacp_rx_actions_tmpl, error); -+ if (!hw_ctrl_fdb->hw_lacp_rx_tbl) { - DRV_LOG(ERR, "port %u failed to create template table for" - " for LACP Rx traffic", dev->data->port_id); - goto err; + if (unlikely(rxq->hw_timestamp)) { +@@ -853,34 +852,34 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + ts = rte_be_to_cpu_64 + (container_of(p0, struct mlx5_cqe, + pkt_info)->timestamp); +- mlx5_timestamp_set(elts[pos], offset, ++ mlx5_timestamp_set(pkts[pos], offset, + mlx5_txpp_convert_rx_ts(sh, ts)); + ts = rte_be_to_cpu_64 + (container_of(p1, struct mlx5_cqe, + pkt_info)->timestamp); +- mlx5_timestamp_set(elts[pos + 1], offset, ++ mlx5_timestamp_set(pkts[pos + 1], offset, + mlx5_txpp_convert_rx_ts(sh, ts)); + ts = rte_be_to_cpu_64 + (container_of(p2, struct mlx5_cqe, + pkt_info)->timestamp); +- mlx5_timestamp_set(elts[pos + 2], offset, ++ mlx5_timestamp_set(pkts[pos + 2], offset, + mlx5_txpp_convert_rx_ts(sh, ts)); + ts = rte_be_to_cpu_64 + (container_of(p3, struct mlx5_cqe, + pkt_info)->timestamp); +- mlx5_timestamp_set(elts[pos + 3], offset, ++ mlx5_timestamp_set(pkts[pos + 3], offset, + mlx5_txpp_convert_rx_ts(sh, ts)); + } else { +- mlx5_timestamp_set(elts[pos], offset, ++ mlx5_timestamp_set(pkts[pos], offset, + rte_be_to_cpu_64(container_of(p0, + struct mlx5_cqe, pkt_info)->timestamp)); +- mlx5_timestamp_set(elts[pos + 1], offset, ++ mlx5_timestamp_set(pkts[pos + 1], offset, + rte_be_to_cpu_64(container_of(p1, + struct mlx5_cqe, pkt_info)->timestamp)); +- mlx5_timestamp_set(elts[pos + 2], offset, ++ mlx5_timestamp_set(pkts[pos + 2], offset, + rte_be_to_cpu_64(container_of(p2, + struct mlx5_cqe, pkt_info)->timestamp)); +- mlx5_timestamp_set(elts[pos + 3], offset, ++ mlx5_timestamp_set(pkts[pos + 3], offset, + rte_be_to_cpu_64(container_of(p3, + struct mlx5_cqe, pkt_info)->timestamp)); + } +diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +index 2bdd1f676d..e7271abef6 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h ++++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +@@ -94,8 +94,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + -1, -1, -1, -1 /* skip packet_type */); + /* Restore the compressed count. Must be 16 bits. */ + uint16_t mcqe_n = (rxq->cqe_comp_layout) ? +- (MLX5_CQE_NUM_MINIS(cq->op_own) + 1) : +- t_pkt->data_len + (rxq->crc_present * RTE_ETHER_CRC_LEN); ++ (MLX5_CQE_NUM_MINIS(cq->op_own) + 1U) : rte_be_to_cpu_32(cq->byte_cnt); + uint16_t pkts_n = mcqe_n; + const __m128i rearm = + _mm_loadu_si128((__m128i *)&t_pkt->rearm_data); +@@ -783,9 +782,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); + if (unlikely(rxq->shared)) { + pkts[pos]->port = cq[pos].user_index_low; +- pkts[pos + p1]->port = cq[pos + p1].user_index_low; +- pkts[pos + p2]->port = cq[pos + p2].user_index_low; +- pkts[pos + p3]->port = cq[pos + p3].user_index_low; ++ pkts[pos + 1]->port = cq[pos + p1].user_index_low; ++ pkts[pos + 2]->port = cq[pos + p2].user_index_low; ++ pkts[pos + 3]->port = cq[pos + p3].user_index_low; } + if (unlikely(rxq->hw_timestamp)) { + int offset = rxq->timestamp_offset; +diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c +index 615e1d073d..f4ac58e2f9 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_stats.c ++++ b/dpdk/drivers/net/mlx5/mlx5_stats.c +@@ -39,24 +39,36 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, + unsigned int n) + { + struct mlx5_priv *priv = dev->data->dev_private; +- unsigned int i; +- uint64_t counters[n]; ++ uint64_t counters[MLX5_MAX_XSTATS]; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; ++ unsigned int i; ++ uint16_t stats_n = 0; ++ uint16_t stats_n_2nd = 0; + uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + + if (n >= mlx5_stats_n && stats) { +- int stats_n; + int ret; + +- stats_n = mlx5_os_get_stats_n(dev); +- if (stats_n < 0) +- return stats_n; +- if (xstats_ctrl->stats_n != stats_n) ++ ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); ++ if (ret < 0) ++ return ret; ++ /* ++ * The number of statistics fetched via "ETH_SS_STATS" may vary because ++ * of the port configuration each time. This is also true between 2 ++ * ports. There might be a case that the numbers are the same even if ++ * configurations are different. ++ * It is not recommended to change the configuration without using ++ * RTE API. The port(traffic) restart may trigger another initialization ++ * to make sure the map are correct. ++ */ ++ if (xstats_ctrl->stats_n != stats_n || ++ (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) + mlx5_os_stats_init(dev); +- ret = mlx5_os_read_dev_counters(dev, counters); +- if (ret) ++ ret = mlx5_os_read_dev_counters(dev, bond_master, counters); ++ if (ret < 0) + return ret; +- for (i = 0; i != mlx5_stats_n; ++i) { ++ for (i = 0; i != mlx5_stats_n; i++) { + stats[i].id = i; + if (xstats_ctrl->info[i].dev) { + uint64_t wrap_n; +@@ -225,30 +237,32 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; +- int stats_n; + unsigned int i; + uint64_t *counters; + int ret; ++ uint16_t stats_n = 0; ++ uint16_t stats_n_2nd = 0; ++ bool bond_master = (priv->master && priv->pf_bond >= 0); + +- stats_n = mlx5_os_get_stats_n(dev); +- if (stats_n < 0) { ++ ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); ++ if (ret < 0) { + DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id, +- strerror(-stats_n)); +- return stats_n; ++ strerror(-ret)); ++ return ret; } - return 0; -+ - err: -- /* Do not overwrite the rte_errno. */ -- ret = -rte_errno; -- if (ret == 0) -- ret = rte_flow_error_set(error, EINVAL, -- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -- "Failed to create control tables."); -- if (priv->hw_tx_meta_cpy_tbl) { -- flow_hw_table_destroy(dev, priv->hw_tx_meta_cpy_tbl, NULL); -- priv->hw_tx_meta_cpy_tbl = NULL; -- } -- if (priv->hw_esw_zero_tbl) { -- flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL); -- priv->hw_esw_zero_tbl = NULL; -- } -- if (priv->hw_esw_sq_miss_tbl) { -- flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL); -- priv->hw_esw_sq_miss_tbl = NULL; -- } -- if (priv->hw_esw_sq_miss_root_tbl) { -- flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL); -- priv->hw_esw_sq_miss_root_tbl = NULL; -- } -- if (lacp_rx_actions_tmpl) -- flow_hw_actions_template_destroy(dev, lacp_rx_actions_tmpl, NULL); -- if (tx_meta_actions_tmpl) -- flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL); -- if (jump_one_actions_tmpl) -- flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL); -- if (port_actions_tmpl) -- flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL); -- if (regc_jump_actions_tmpl) -- flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL); -- if (lacp_rx_items_tmpl) -- flow_hw_pattern_template_destroy(dev, lacp_rx_items_tmpl, NULL); -- if (tx_meta_items_tmpl) -- flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL); -- if (port_items_tmpl) -- flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL); -- if (regc_sq_items_tmpl) -- flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL); -- if (esw_mgr_items_tmpl) -- flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL); -- return ret; -+ flow_hw_cleanup_ctrl_fdb_tables(dev); -+ return -EINVAL; - } +- if (xstats_ctrl->stats_n != stats_n) ++ if (xstats_ctrl->stats_n != stats_n || ++ (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) + mlx5_os_stats_init(dev); +- counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * +- xstats_ctrl->mlx5_stats_n, 0, +- SOCKET_ID_ANY); ++ /* Considering to use stack directly. */ ++ counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * xstats_ctrl->mlx5_stats_n, ++ 0, SOCKET_ID_ANY); + if (!counters) { +- DRV_LOG(WARNING, "port %u unable to allocate memory for xstats " +- "counters", ++ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats counters", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; + } +- ret = mlx5_os_read_dev_counters(dev, counters); ++ ret = mlx5_os_read_dev_counters(dev, bond_master, counters); + if (ret) { + DRV_LOG(ERR, "port %u cannot read device counters: %s", + dev->data->port_id, strerror(rte_errno)); +diff --git a/dpdk/drivers/net/mlx5/mlx5_trace.h b/dpdk/drivers/net/mlx5/mlx5_trace.h +index 888d96f60b..656dbb1a4f 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_trace.h ++++ b/dpdk/drivers/net/mlx5/mlx5_trace.h +@@ -22,21 +22,24 @@ extern "C" { + /* TX burst subroutines trace points. */ + RTE_TRACE_POINT_FP( + rte_pmd_mlx5_trace_tx_entry, +- RTE_TRACE_POINT_ARGS(uint16_t port_id, uint16_t queue_id), ++ RTE_TRACE_POINT_ARGS(uint64_t real_time, uint16_t port_id, uint16_t queue_id), ++ rte_trace_point_emit_u64(real_time); + rte_trace_point_emit_u16(port_id); + rte_trace_point_emit_u16(queue_id); + ) - static void -@@ -9184,6 +9361,38 @@ flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr, - return true; + RTE_TRACE_POINT_FP( + rte_pmd_mlx5_trace_tx_exit, +- RTE_TRACE_POINT_ARGS(uint16_t nb_sent, uint16_t nb_req), ++ RTE_TRACE_POINT_ARGS(uint64_t real_time, uint16_t nb_sent, uint16_t nb_req), ++ rte_trace_point_emit_u64(real_time); + rte_trace_point_emit_u16(nb_sent); + rte_trace_point_emit_u16(nb_req); + ) + + RTE_TRACE_POINT_FP( + rte_pmd_mlx5_trace_tx_wqe, +- RTE_TRACE_POINT_ARGS(uint32_t opcode), ++ RTE_TRACE_POINT_ARGS(uint64_t real_time, uint32_t opcode), ++ rte_trace_point_emit_u64(real_time); + rte_trace_point_emit_u32(opcode); + ) + +diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c +index 5ac25d7e2d..fe2c512c5c 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_trigger.c ++++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c +@@ -1498,7 +1498,9 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) + if (!txq) + continue; + queue = mlx5_txq_get_sqn(txq); +- if ((priv->representor || priv->master) && config->dv_esw_en) { ++ if ((priv->representor || priv->master) && ++ config->dv_esw_en && ++ config->fdb_def_rule) { + if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) { + mlx5_txq_release(dev, i); + goto error; +@@ -1524,7 +1526,7 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) + } + if (priv->isolated) + return 0; +- if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) ++ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) + if (mlx5_flow_hw_lacp_rx_flow(dev)) + goto error; + if (dev->data->promiscuous) +@@ -1632,14 +1634,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) + DRV_LOG(INFO, "port %u FDB default rule is disabled", + dev->data->port_id); + } +- if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { ++ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { + ret = mlx5_flow_lacp_miss(dev); + if (ret) + DRV_LOG(INFO, "port %u LACP rule cannot be created - " + "forward LACP to kernel.", dev->data->port_id); + else +- DRV_LOG(INFO, "LACP traffic will be missed in port %u." +- , dev->data->port_id); ++ DRV_LOG(INFO, "LACP traffic will be missed in port %u.", ++ dev->data->port_id); + } + if (priv->isolated) + return 0; +diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.c b/dpdk/drivers/net/mlx5/mlx5_tx.c +index 1fe9521dfc..4148d6d899 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_tx.c ++++ b/dpdk/drivers/net/mlx5/mlx5_tx.c +@@ -55,7 +55,7 @@ tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl) + + /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */ + static int +-check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) ++check_err_cqe_seen(volatile struct mlx5_error_cqe *err_cqe) + { + static const uint8_t magic[] = "seen"; + int ret = 1; +@@ -83,7 +83,7 @@ check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) + */ + static int + mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq, +- volatile struct mlx5_err_cqe *err_cqe) ++ volatile struct mlx5_error_cqe *err_cqe) + { + if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) { + const uint16_t wqe_m = ((1 << txq->wqe_n) - 1); +@@ -107,7 +107,7 @@ mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq, + mlx5_dump_debug_information(name, "MLX5 Error CQ:", + (const void *)((uintptr_t) + txq->cqes), +- sizeof(struct mlx5_cqe) * ++ sizeof(struct mlx5_error_cqe) * + (1 << txq->cqe_n)); + mlx5_dump_debug_information(name, "MLX5 Error SQ:", + (const void *)((uintptr_t) +@@ -206,7 +206,7 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq, + */ + rte_wmb(); + ret = mlx5_tx_error_cqe_handle +- (txq, (volatile struct mlx5_err_cqe *)cqe); ++ (txq, (volatile struct mlx5_error_cqe *)cqe); + if (unlikely(ret < 0)) { + /* + * Some error occurred on queue error +diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.h b/dpdk/drivers/net/mlx5/mlx5_tx.h +index e59ce37667..46559426fe 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_tx.h ++++ b/dpdk/drivers/net/mlx5/mlx5_tx.h +@@ -369,6 +369,46 @@ mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts) + return ci; } -+static int -+flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr, -+ uint16_t nb_queue, -+ const struct rte_flow_queue_attr *queue_attr[], -+ struct rte_flow_error *error) ++/** ++ * Read real time clock counter directly from the device PCI BAR area. ++ * The PCI BAR must be mapped to the process memory space at initialization. ++ * ++ * @param dev ++ * Device to read clock counter from ++ * ++ * @return ++ * 0 - if HCA BAR is not supported or not mapped. ++ * !=0 - read 64-bit value of real-time in UTC formatv (nanoseconds) ++ */ ++static __rte_always_inline uint64_t mlx5_read_pcibar_clock(struct rte_eth_dev *dev) +{ -+ uint32_t size; -+ unsigned int i; -+ -+ if (port_attr == NULL) -+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -+ "Port attributes must be non-NULL"); -+ -+ if (nb_queue == 0) -+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -+ "At least one flow queue is required"); -+ -+ if (queue_attr == NULL) -+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -+ "Queue attributes must be non-NULL"); -+ -+ size = queue_attr[0]->size; -+ for (i = 1; i < nb_queue; ++i) { -+ if (queue_attr[i]->size != size) -+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, -+ NULL, -+ "All flow queues must have the same size"); ++ struct mlx5_proc_priv *ppriv = dev->process_private; ++ ++ if (ppriv && ppriv->hca_bar) { ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_dev_ctx_shared *sh = priv->sh; ++ uint64_t *hca_ptr = (uint64_t *)(ppriv->hca_bar) + ++ __mlx5_64_off(initial_seg, real_time); ++ uint64_t __rte_atomic *ts_addr; ++ uint64_t ts; ++ ++ ts_addr = (uint64_t __rte_atomic *)hca_ptr; ++ ts = rte_atomic_load_explicit(ts_addr, rte_memory_order_seq_cst); ++ ts = rte_be_to_cpu_64(ts); ++ ts = mlx5_txpp_convert_rx_ts(sh, ts); ++ return ts; + } -+ + return 0; +} + ++static __rte_always_inline uint64_t mlx5_read_pcibar_clock_from_txq(struct mlx5_txq_data *txq) ++{ ++ struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); ++ struct rte_eth_dev *dev = ETH_DEV(txq_ctrl->priv); ++ ++ return mlx5_read_pcibar_clock(dev); ++} ++ /** - * Configure port HWS resources. - * -@@ -9235,10 +9444,8 @@ flow_hw_configure(struct rte_eth_dev *dev, - int ret = 0; - uint32_t action_flags; + * Set Software Parser flags and offsets in Ethernet Segment of WQE. + * Flags must be preliminary initialized to zero. +@@ -806,6 +846,7 @@ mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq, + unsigned int olx) + { + struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg; ++ uint64_t real_time; + + /* For legacy MPW replace the EMPW by TSO with modifier. */ + if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW) +@@ -819,9 +860,12 @@ mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq, + cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << + MLX5_COMP_MODE_OFFSET); + cs->misc = RTE_BE32(0); +- if (__rte_trace_point_fp_is_enabled() && !loc->pkts_sent) +- rte_pmd_mlx5_trace_tx_entry(txq->port_id, txq->idx); +- rte_pmd_mlx5_trace_tx_wqe((txq->wqe_ci << 8) | opcode); ++ if (__rte_trace_point_fp_is_enabled()) { ++ real_time = mlx5_read_pcibar_clock_from_txq(txq); ++ if (!loc->pkts_sent) ++ rte_pmd_mlx5_trace_tx_entry(real_time, txq->port_id, txq->idx); ++ rte_pmd_mlx5_trace_tx_wqe(real_time, (txq->wqe_ci << 8) | opcode); ++ } + } -- if (!port_attr || !nb_queue || !queue_attr) { -- rte_errno = EINVAL; -- goto err; -- } -+ if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error)) -+ return -rte_errno; - /* - * Calling rte_flow_configure() again is allowed if and only if - * provided configuration matches the initially provided one. -@@ -9285,14 +9492,6 @@ flow_hw_configure(struct rte_eth_dev *dev, - /* Allocate the queue job descriptor LIFO. */ - mem_size = sizeof(priv->hw_q[0]) * nb_q_updated; - for (i = 0; i < nb_q_updated; i++) { -- /* -- * Check if the queues' size are all the same as the -- * limitation from HWS layer. -- */ -- if (_queue_attr[i]->size != _queue_attr[0]->size) { -- rte_errno = EINVAL; -- goto err; -- } - mem_size += (sizeof(struct mlx5_hw_q_job *) + - sizeof(struct mlx5_hw_q_job) + - sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN + -@@ -9545,6 +9744,14 @@ flow_hw_configure(struct rte_eth_dev *dev, - priv->hws_strict_queue = 1; - return 0; - err: -+ priv->hws_strict_queue = 0; -+ flow_hw_destroy_vlan(dev); -+ if (priv->hws_age_req) -+ mlx5_hws_age_pool_destroy(priv); -+ if (priv->hws_cpool) { -+ mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool); -+ priv->hws_cpool = NULL; + /** +@@ -3783,7 +3827,8 @@ burst_exit: + __mlx5_tx_free_mbuf(txq, pkts, loc.mbuf_free, olx); + /* Trace productive bursts only. */ + if (__rte_trace_point_fp_is_enabled() && loc.pkts_sent) +- rte_pmd_mlx5_trace_tx_exit(loc.pkts_sent, pkts_n); ++ rte_pmd_mlx5_trace_tx_exit(mlx5_read_pcibar_clock_from_txq(txq), ++ loc.pkts_sent, pkts_n); + return loc.pkts_sent; + } + +diff --git a/dpdk/drivers/net/mlx5/mlx5_txpp.c b/dpdk/drivers/net/mlx5/mlx5_txpp.c +index 5a5df2d1bb..0184060c3f 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_txpp.c ++++ b/dpdk/drivers/net/mlx5/mlx5_txpp.c +@@ -971,7 +971,6 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) + { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; +- struct mlx5_proc_priv *ppriv; + uint64_t ts; + int ret; + +@@ -997,15 +996,9 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) + *timestamp = ts; + return 0; + } +- /* Check and try to map HCA PIC BAR to allow reading real time. */ +- ppriv = dev->process_private; +- if (ppriv && !ppriv->hca_bar && +- sh->dev_cap.rt_timestamp && mlx5_dev_is_pci(dev->device)) +- mlx5_txpp_map_hca_bar(dev); + /* Check if we can read timestamp directly from hardware. */ +- if (ppriv && ppriv->hca_bar) { +- ts = MLX5_GET64(initial_seg, ppriv->hca_bar, real_time); +- ts = mlx5_txpp_convert_rx_ts(sh, ts); ++ ts = mlx5_read_pcibar_clock(dev); ++ if (ts != 0) { + *timestamp = ts; + return 0; + } +diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c +index 1ac43548b2..52a39ae073 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_txq.c ++++ b/dpdk/drivers/net/mlx5/mlx5_txq.c +@@ -332,6 +332,14 @@ mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc) + { + struct mlx5_priv *priv = dev->data->dev_private; + ++ if (*desc > 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz) { ++ DRV_LOG(ERR, ++ "port %u number of descriptors requested for Tx queue" ++ " %u is more than supported", ++ dev->data->port_id, idx); ++ rte_errno = EINVAL; ++ return -EINVAL; + } - if (priv->hws_ctpool) { - flow_hw_ct_pool_destroy(dev, priv->hws_ctpool); - priv->hws_ctpool = NULL; -@@ -9553,44 +9760,54 @@ err: - flow_hw_ct_mng_destroy(dev, priv->ct_mng); - priv->ct_mng = NULL; + if (*desc <= MLX5_TX_COMP_THRESH) { + DRV_LOG(WARNING, + "port %u number of descriptors requested for Tx queue" +@@ -1311,11 +1319,18 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num) } -- if (priv->hws_age_req) -- mlx5_hws_age_pool_destroy(priv); -- if (priv->hws_cpool) { -- mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool); -- priv->hws_cpool = NULL; -- } -- mlx5_flow_quota_destroy(dev); - flow_hw_destroy_send_to_kernel_action(priv); -+ flow_hw_cleanup_ctrl_fdb_tables(dev); - flow_hw_free_vport_actions(priv); -+ if (priv->hw_def_miss) { -+ mlx5dr_action_destroy(priv->hw_def_miss); -+ priv->hw_def_miss = NULL; + #ifdef HAVE_MLX5_HWS_SUPPORT + if (priv->sh->config.dv_flow_en == 2) { +- if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true)) +- return -rte_errno; ++ bool sq_miss_created = false; ++ ++ if (priv->sh->config.fdb_def_rule) { ++ if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true)) ++ return -rte_errno; ++ sq_miss_created = true; ++ } ++ + if (priv->sh->config.repr_matching && + mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) { +- mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num); ++ if (sq_miss_created) ++ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num); + return -rte_errno; + } + return 0; +diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.c b/dpdk/drivers/net/mlx5/mlx5_utils.c +index 4db738785f..b5b6c7c728 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_utils.c ++++ b/dpdk/drivers/net/mlx5/mlx5_utils.c +@@ -379,7 +379,8 @@ _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) + idx -= 1; + trunk_idx = mlx5_trunk_idx_get(pool, idx); + trunk = lc->trunks[trunk_idx]; +- MLX5_ASSERT(trunk); ++ if (!trunk) ++ return NULL; + entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx); + return &trunk->data[entry_idx * pool->cfg.size]; + } +diff --git a/dpdk/drivers/net/mlx5/tools/mlx5_trace.py b/dpdk/drivers/net/mlx5/tools/mlx5_trace.py +index 8c1fd0a350..5eb634a490 100755 +--- a/dpdk/drivers/net/mlx5/tools/mlx5_trace.py ++++ b/dpdk/drivers/net/mlx5/tools/mlx5_trace.py +@@ -174,7 +174,9 @@ def do_tx_entry(msg, trace): + return + # allocate the new burst and append to the queue + burst = MlxBurst() +- burst.call_ts = msg.default_clock_snapshot.ns_from_origin ++ burst.call_ts = event["real_time"] ++ if burst.call_ts == 0: ++ burst.call_ts = msg.default_clock_snapshot.ns_from_origin + trace.tx_blst[cpu_id] = burst + pq_id = event["port_id"] << 16 | event["queue_id"] + queue = trace.tx_qlst.get(pq_id) +@@ -194,7 +196,9 @@ def do_tx_exit(msg, trace): + burst = trace.tx_blst.get(cpu_id) + if burst is None: + return +- burst.done_ts = msg.default_clock_snapshot.ns_from_origin ++ burst.done_ts = event["real_time"] ++ if burst.done_ts == 0: ++ burst.done_ts = msg.default_clock_snapshot.ns_from_origin + burst.req = event["nb_req"] + burst.done = event["nb_sent"] + trace.tx_blst.pop(cpu_id) +@@ -210,7 +214,9 @@ def do_tx_wqe(msg, trace): + wqe = MlxWqe() + wqe.wait_ts = trace.tx_wlst.get(cpu_id) + if wqe.wait_ts is None: +- wqe.wait_ts = msg.default_clock_snapshot.ns_from_origin ++ wqe.wait_ts = event["real_time"] ++ if wqe.wait_ts == 0: ++ wqe.wait_ts = msg.default_clock_snapshot.ns_from_origin + wqe.opcode = event["opcode"] + burst.wqes.append(wqe) + +@@ -258,13 +264,14 @@ def do_tx_complete(msg, trace): + if burst.comp(wqe_id, wqe_ts) == 0: + break + rmv += 1 +- # mode completed burst to done list ++ # move completed burst(s) to done list + if rmv != 0: + idx = 0 + while idx < rmv: ++ burst = queue.wait_burst[idx] + queue.done_burst.append(burst) + idx += 1 +- del queue.wait_burst[0:rmv] ++ queue.wait_burst = queue.wait_burst[rmv:] + + + def do_tx(msg, trace): +diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c +index a31e1b5494..49f750be68 100644 +--- a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c ++++ b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c +@@ -178,20 +178,29 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) + return -ENOTSUP; + } + +-/** ++/* + * Query the number of statistics provided by ETHTOOL. + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. ++ * @param n_stats ++ * Pointer to number of stats to store. ++ * @param n_stats_sec ++ * Pointer to number of stats to store for the 2nd port of the bond. + * + * @return +- * Number of statistics on success, negative errno value otherwise and +- * rte_errno is set. ++ * 0 on success, negative errno value otherwise and rte_errno is set. + */ + int +-mlx5_os_get_stats_n(struct rte_eth_dev *dev) ++mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, ++ uint16_t *n_stats, uint16_t *n_stats_sec) + { + RTE_SET_USED(dev); ++ RTE_SET_USED(bond_master); ++ RTE_SET_USED(n_stats); ++ RTE_SET_USED(n_stats_sec); + return -ENOTSUP; + } + +@@ -221,6 +230,8 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + * + * @param dev + * Pointer to Ethernet device. ++ * @param bond_master ++ * Indicate if the device is a bond master. + * @param[out] stats + * Counters table output buffer. + * +@@ -229,9 +240,10 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) + * rte_errno is set. + */ + int +-mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) ++mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) + { + RTE_SET_USED(dev); ++ RTE_SET_USED(bond_master); + RTE_SET_USED(stats); + return -ENOTSUP; + } +diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_os.c +index b731bdff06..a9614b125b 100644 +--- a/dpdk/drivers/net/mlx5/windows/mlx5_os.c ++++ b/dpdk/drivers/net/mlx5/windows/mlx5_os.c +@@ -518,9 +518,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); + priv->ctrl_flows = 0; + TAILQ_INIT(&priv->flow_meters); +- priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); +- if (!priv->mtr_profile_tbl) +- goto error; ++ if (priv->mtr_en) { ++ priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); ++ if (!priv->mtr_profile_tbl) ++ goto error; ++ } + /* Bring Ethernet device up. */ + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.", + eth_dev->data->port_id); +diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c +index daa69e533a..7700a63071 100644 +--- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c ++++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c +@@ -91,6 +91,12 @@ mvneta_ifnames_get(const char *key __rte_unused, const char *value, + { + struct mvneta_ifnames *ifnames = extra_args; + ++ if (ifnames->idx >= NETA_NUM_ETH_PPIO) { ++ MVNETA_LOG(ERR, "Too many ifnames specified (max %u)", ++ NETA_NUM_ETH_PPIO); ++ return -EINVAL; + } -+ flow_hw_cleanup_tx_repr_tagging(dev); - for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { -- if (priv->hw_drop[i]) -+ if (priv->hw_drop[i]) { - mlx5dr_action_destroy(priv->hw_drop[i]); -- if (priv->hw_tag[i]) -+ priv->hw_drop[i] = NULL; -+ } -+ if (priv->hw_tag[i]) { - mlx5dr_action_destroy(priv->hw_tag[i]); -+ priv->hw_tag[i] = NULL; -+ } - } -- if (priv->hw_def_miss) -- mlx5dr_action_destroy(priv->hw_def_miss); -- flow_hw_destroy_vlan(dev); -- if (dr_ctx) -+ mlx5_flow_meter_uninit(dev); -+ mlx5_flow_quota_destroy(dev); -+ flow_hw_cleanup_ctrl_rx_tables(dev); -+ if (dr_ctx) { - claim_zero(mlx5dr_context_close(dr_ctx)); -- for (i = 0; i < nb_q_updated; i++) { -- rte_ring_free(priv->hw_q[i].indir_iq); -- rte_ring_free(priv->hw_q[i].indir_cq); -+ priv->dr_ctx = NULL; - } -- mlx5_free(priv->hw_q); -- priv->hw_q = NULL; -- if (priv->acts_ipool) { -- mlx5_ipool_destroy(priv->acts_ipool); -- priv->acts_ipool = NULL; -- } -- if (_queue_attr) -- mlx5_free(_queue_attr); - if (priv->shared_host) { -+ struct mlx5_priv *host_priv = priv->shared_host->data->dev_private; + - __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED); - priv->shared_host = NULL; - } -+ if (priv->hw_q) { -+ for (i = 0; i < nb_q_updated; i++) { -+ rte_ring_free(priv->hw_q[i].indir_iq); -+ rte_ring_free(priv->hw_q[i].indir_cq); + ifnames->names[ifnames->idx++] = value; + + return 0; +@@ -198,7 +204,8 @@ mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L4_TCP, +- RTE_PTYPE_L4_UDP ++ RTE_PTYPE_L4_UDP, ++ RTE_PTYPE_UNKNOWN + }; + + return ptypes; +diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +index c12364941d..4cc64c7cad 100644 +--- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c ++++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c +@@ -1777,7 +1777,8 @@ mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L4_TCP, +- RTE_PTYPE_L4_UDP ++ RTE_PTYPE_L4_UDP, ++ RTE_PTYPE_UNKNOWN + }; + + return ptypes; +diff --git a/dpdk/drivers/net/netvsc/hn_ethdev.c b/dpdk/drivers/net/netvsc/hn_ethdev.c +index b8a32832d7..1736cb5d07 100644 +--- a/dpdk/drivers/net/netvsc/hn_ethdev.c ++++ b/dpdk/drivers/net/netvsc/hn_ethdev.c +@@ -313,6 +313,15 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev, + + if (reta_conf[idx].mask & mask) + hv->rss_ind[i] = reta_conf[idx].reta[shift]; ++ ++ /* ++ * Ensure we don't allow config that directs traffic to an Rx ++ * queue that we aren't going to poll ++ */ ++ if (hv->rss_ind[i] >= dev->data->nb_rx_queues) { ++ PMD_DRV_LOG(ERR, "RSS distributing traffic to invalid Rx queue"); ++ return -EINVAL; + } -+ mlx5_free(priv->hw_q); -+ priv->hw_q = NULL; -+ } -+ if (priv->acts_ipool) { -+ mlx5_ipool_destroy(priv->acts_ipool); -+ priv->acts_ipool = NULL; + } + + err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE); +@@ -1127,8 +1136,10 @@ hn_reinit(struct rte_eth_dev *dev, uint16_t mtu) + int i, ret = 0; + + /* Point primary queues at new primary channel */ +- rxqs[0]->chan = hv->channels[0]; +- txqs[0]->chan = hv->channels[0]; ++ if (rxqs[0]) { ++ rxqs[0]->chan = hv->channels[0]; ++ txqs[0]->chan = hv->channels[0]; + } - mlx5_free(priv->hw_attr); - priv->hw_attr = NULL; -+ priv->nb_queue = 0; -+ if (_queue_attr) -+ mlx5_free(_queue_attr); - /* Do not overwrite the internal errno information. */ + + ret = hn_attach(hv, mtu); if (ret) +@@ -1140,10 +1151,12 @@ hn_reinit(struct rte_eth_dev *dev, uint16_t mtu) return ret; -@@ -9609,37 +9826,48 @@ void - flow_hw_resource_release(struct rte_eth_dev *dev) - { - struct mlx5_priv *priv = dev->data->dev_private; -- struct rte_flow_template_table *tbl; -- struct rte_flow_pattern_template *it; -- struct rte_flow_actions_template *at; -- struct mlx5_flow_group *grp; -+ struct rte_flow_template_table *tbl, *temp_tbl; -+ struct rte_flow_pattern_template *it, *temp_it; -+ struct rte_flow_actions_template *at, *temp_at; -+ struct mlx5_flow_group *grp, *temp_grp; - uint32_t i; - if (!priv->dr_ctx) - return; - flow_hw_rxq_flag_set(dev, false); - flow_hw_flush_all_ctrl_flows(dev); -+ flow_hw_cleanup_ctrl_fdb_tables(dev); - flow_hw_cleanup_tx_repr_tagging(dev); - flow_hw_cleanup_ctrl_rx_tables(dev); -- while (!LIST_EMPTY(&priv->flow_hw_grp)) { -- grp = LIST_FIRST(&priv->flow_hw_grp); -- flow_hw_group_unset_miss_group(dev, grp, NULL); -- } -- while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) { -- tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo); -- flow_hw_table_destroy(dev, tbl, NULL); -- } -- while (!LIST_EMPTY(&priv->flow_hw_tbl)) { -- tbl = LIST_FIRST(&priv->flow_hw_tbl); -- flow_hw_table_destroy(dev, tbl, NULL); -- } -- while (!LIST_EMPTY(&priv->flow_hw_itt)) { -- it = LIST_FIRST(&priv->flow_hw_itt); -- flow_hw_pattern_template_destroy(dev, it, NULL); -- } -- while (!LIST_EMPTY(&priv->flow_hw_at)) { -- at = LIST_FIRST(&priv->flow_hw_at); -- flow_hw_actions_template_destroy(dev, at, NULL); -+ grp = LIST_FIRST(&priv->flow_hw_grp); -+ while (grp) { -+ temp_grp = LIST_NEXT(grp, next); -+ claim_zero(flow_hw_group_unset_miss_group(dev, grp, NULL)); -+ grp = temp_grp; -+ } -+ tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo); -+ while (tbl) { -+ temp_tbl = LIST_NEXT(tbl, next); -+ claim_zero(flow_hw_table_destroy(dev, tbl, NULL)); -+ tbl = temp_tbl; -+ } -+ tbl = LIST_FIRST(&priv->flow_hw_tbl); -+ while (tbl) { -+ temp_tbl = LIST_NEXT(tbl, next); -+ claim_zero(flow_hw_table_destroy(dev, tbl, NULL)); -+ tbl = temp_tbl; -+ } -+ it = LIST_FIRST(&priv->flow_hw_itt); -+ while (it) { -+ temp_it = LIST_NEXT(it, next); -+ claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL)); -+ it = temp_it; + /* Point any additional queues at new subchannels */ +- for (i = 1; i < dev->data->nb_rx_queues; i++) +- rxqs[i]->chan = hv->channels[i]; +- for (i = 1; i < dev->data->nb_tx_queues; i++) +- txqs[i]->chan = hv->channels[i]; ++ if (rxqs[0]) { ++ for (i = 1; i < dev->data->nb_rx_queues; i++) ++ rxqs[i]->chan = hv->channels[i]; ++ for (i = 1; i < dev->data->nb_tx_queues; i++) ++ txqs[i]->chan = hv->channels[i]; + } -+ at = LIST_FIRST(&priv->flow_hw_at); -+ while (at) { -+ temp_at = LIST_NEXT(at, next); -+ claim_zero(flow_hw_actions_template_destroy(dev, at, NULL)); -+ at = temp_at; - } - for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { - if (priv->hw_drop[i]) -@@ -9677,13 +9905,11 @@ flow_hw_resource_release(struct rte_eth_dev *dev) - } - mlx5_free(priv->hw_q); - priv->hw_q = NULL; -- claim_zero(mlx5dr_context_close(priv->dr_ctx)); - if (priv->shared_host) { - struct mlx5_priv *host_priv = priv->shared_host->data->dev_private; - __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED); - priv->shared_host = NULL; - } -- priv->dr_ctx = NULL; - mlx5_free(priv->hw_attr); - priv->hw_attr = NULL; - priv->nb_queue = 0; -@@ -9853,6 +10079,13 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue, - "CT is not enabled"); - return 0; + + return ret; + } +diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c +index e4f5015aa3..eea120ae82 100644 +--- a/dpdk/drivers/net/netvsc/hn_rxtx.c ++++ b/dpdk/drivers/net/netvsc/hn_rxtx.c +@@ -234,6 +234,17 @@ static void hn_reset_txagg(struct hn_tx_queue *txq) + txq->agg_prevpkt = NULL; + } + ++static void ++hn_rx_queue_free_common(struct hn_rx_queue *rxq) ++{ ++ if (!rxq) ++ return; ++ ++ rte_free(rxq->rxbuf_info); ++ rte_free(rxq->event_buf); ++ rte_free(rxq); ++} ++ + int + hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, +@@ -243,6 +254,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + { + struct hn_data *hv = dev->data->dev_private; + struct hn_tx_queue *txq; ++ struct hn_rx_queue *rxq = NULL; + char name[RTE_MEMPOOL_NAMESIZE]; + uint32_t tx_free_thresh; + int err = -ENOMEM; +@@ -257,7 +269,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + if (tx_free_thresh + 3 >= nb_desc) { + PMD_INIT_LOG(ERR, + "tx_free_thresh must be less than the number of TX entries minus 3(%u)." +- " (tx_free_thresh=%u port=%u queue=%u)\n", ++ " (tx_free_thresh=%u port=%u queue=%u)", + nb_desc - 3, + tx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; +@@ -301,6 +313,27 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + goto error; + } + ++ /* ++ * If there are more Tx queues than Rx queues, allocate rx_queues ++ * with event buffer so that Tx completion messages can still be ++ * received ++ */ ++ if (queue_idx >= dev->data->nb_rx_queues) { ++ rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); ++ ++ if (!rxq) { ++ err = -ENOMEM; ++ goto error; ++ } ++ ++ /* ++ * Don't allocate mbuf pool or rx ring. RSS is always configured ++ * to ensure packets aren't received by this Rx queue. ++ */ ++ rxq->mb_pool = NULL; ++ rxq->rx_ring = NULL; ++ } ++ + txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size); + txq->agg_pktmax = hv->rndis_agg_pkts; + txq->agg_align = hv->rndis_agg_align; +@@ -311,12 +344,15 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + socket_id, tx_conf); + if (err == 0) { + dev->data->tx_queues[queue_idx] = txq; ++ if (rxq != NULL) ++ dev->data->rx_queues[queue_idx] = rxq; + return 0; } -+ if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) { -+ rte_flow_error_set(error, EINVAL, -+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -+ "CT supports port indexes up to " -+ RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT)); -+ return 0; -+ } - ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx); - if (!ct) { - rte_flow_error_set(error, rte_errno, -@@ -9967,11 +10200,13 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, - const struct rte_flow_action_handle *handle, - void *user_data, void *query_data, - enum mlx5_hw_job_type type, -+ enum mlx5_hw_indirect_type indirect_type, - struct rte_flow_error *error) - { - struct mlx5_hw_q_job *job; -- MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE); -+ if (queue == MLX5_HW_INV_QUEUE) -+ queue = CTRL_QUEUE_ID(priv); - job = flow_hw_job_get(priv, queue); - if (!job) { - rte_flow_error_set(error, ENOMEM, -@@ -9983,9 +10218,21 @@ flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, - job->action = handle; - job->user_data = user_data; - job->query.user = query_data; -+ job->indirect_type = indirect_type; - return job; + error: + rte_mempool_free(txq->txdesc_pool); + rte_memzone_free(txq->tx_rndis_mz); ++ hn_rx_queue_free_common(rxq); + rte_free(txq); + return err; } +@@ -363,6 +399,12 @@ hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) -+struct mlx5_hw_q_job * -+mlx5_flow_action_job_init(struct mlx5_priv *priv, uint32_t queue, -+ const struct rte_flow_action_handle *handle, -+ void *user_data, void *query_data, -+ enum mlx5_hw_job_type type, -+ struct rte_flow_error *error) -+{ -+ return flow_hw_action_job_init(priv, queue, handle, user_data, query_data, -+ type, MLX5_HW_INDIRECT_TYPE_LEGACY, error); -+} + if (!txq) + return; ++ /* ++ * Free any Rx queues allocated for a Tx queue without a corresponding ++ * Rx queue ++ */ ++ if (qid >= dev->data->nb_rx_queues) ++ hn_rx_queue_free_common(dev->data->rx_queues[qid]); + + rte_mempool_free(txq->txdesc_pool); + +@@ -552,10 +594,12 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + const struct hn_rxinfo *info) + { + struct hn_data *hv = rxq->hv; +- struct rte_mbuf *m; ++ struct rte_mbuf *m = NULL; + bool use_extbuf = false; + +- m = rte_pktmbuf_alloc(rxq->mb_pool); ++ if (likely(rxq->mb_pool != NULL)) ++ m = rte_pktmbuf_alloc(rxq->mb_pool); + - static __rte_always_inline void - flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue, - struct mlx5_hw_q_job *job, -@@ -10045,15 +10292,17 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, - const struct rte_flow_action_age *age; - struct mlx5_aso_mtr *aso_mtr; - cnt_id_t cnt_id; -- uint32_t mtr_id; - uint32_t age_idx; - bool push = flow_hw_action_push(attr); - bool aso = false; -+ bool force_job = action->type == RTE_FLOW_ACTION_TYPE_METER_MARK; + if (unlikely(!m)) { + struct rte_eth_dev *dev = + &rte_eth_devices[rxq->port_id]; +@@ -612,7 +656,9 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + RTE_PTYPE_L4_MASK); -- if (attr) { -+ if (!mlx5_hw_ctx_validate(dev, error)) -+ return NULL; -+ if (attr || force_job) { - job = flow_hw_action_job_init(priv, queue, NULL, user_data, - NULL, MLX5_HW_Q_JOB_TYPE_CREATE, -- error); -+ MLX5_HW_INDIRECT_TYPE_LEGACY, error); - if (!job) - return NULL; + if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { +- m->vlan_tci = info->vlan_info; ++ m->vlan_tci = RTE_VLAN_TCI_MAKE(NDIS_VLAN_INFO_ID(info->vlan_info), ++ NDIS_VLAN_INFO_PRI(info->vlan_info), ++ NDIS_VLAN_INFO_CFI(info->vlan_info)); + m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; + + /* NDIS always strips tag, put it back if necessary */ +@@ -900,7 +946,7 @@ struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv, + + if (!rxq->rxbuf_info) { + PMD_DRV_LOG(ERR, +- "Could not allocate rxbuf info for queue %d\n", ++ "Could not allocate rxbuf info for queue %d", + queue_id); + rte_free(rxq->event_buf); + rte_free(rxq); +@@ -940,7 +986,15 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + if (queue_idx == 0) { + rxq = hv->primary; + } else { +- rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); ++ /* ++ * If the number of Tx queues was previously greater than the ++ * number of Rx queues, we may already have allocated an rxq. ++ */ ++ if (!dev->data->rx_queues[queue_idx]) ++ rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); ++ else ++ rxq = dev->data->rx_queues[queue_idx]; ++ + if (!rxq) + return -ENOMEM; } -@@ -10105,9 +10354,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, - aso_mtr = flow_hw_meter_mark_alloc(dev, queue, action, job, push); - if (!aso_mtr) +@@ -973,9 +1027,10 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + + fail: + rte_ring_free(rxq->rx_ring); +- rte_free(rxq->rxbuf_info); +- rte_free(rxq->event_buf); +- rte_free(rxq); ++ /* Only free rxq if it was created in this function. */ ++ if (!dev->data->rx_queues[queue_idx]) ++ hn_rx_queue_free_common(rxq); ++ + return error; + } + +@@ -996,9 +1051,7 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary) + if (keep_primary && rxq == rxq->hv->primary) + return; + +- rte_free(rxq->rxbuf_info); +- rte_free(rxq->event_buf); +- rte_free(rxq); ++ hn_rx_queue_free_common(rxq); + } + + void +@@ -1332,7 +1385,9 @@ static void hn_encap(struct rndis_packet_msg *pkt, + if (m->ol_flags & RTE_MBUF_F_TX_VLAN) { + pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, + NDIS_PKTINFO_TYPE_VLAN); +- *pi_data = m->vlan_tci; ++ *pi_data = NDIS_VLAN_INFO_MAKE(RTE_VLAN_TCI_ID(m->vlan_tci), ++ RTE_VLAN_TCI_PRI(m->vlan_tci), ++ RTE_VLAN_TCI_DEI(m->vlan_tci)); + } + + if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { +@@ -1514,14 +1569,32 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *m = tx_pkts[nb_tx]; +- uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; + struct rndis_packet_msg *pkt; + struct hn_txdesc *txd; ++ uint32_t pkt_size; + + txd = hn_txd_get(txq); + if (txd == NULL) break; -- mtr_id = (MLX5_INDIRECT_ACTION_TYPE_METER_MARK << -- MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (aso_mtr->fm.meter_id); -- handle = (struct rte_flow_action_handle *)(uintptr_t)mtr_id; -+ handle = (void *)(uintptr_t)job->action; - break; - case RTE_FLOW_ACTION_TYPE_RSS: - handle = flow_dv_action_create(dev, conf, action, error); -@@ -10122,9 +10369,8 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, - NULL, "action type not supported"); - break; + ++ if (!(m->ol_flags & RTE_MBUF_F_TX_VLAN)) { ++ struct rte_ether_hdr *eh = ++ rte_pktmbuf_mtod(m, struct rte_ether_hdr *); ++ struct rte_vlan_hdr *vh; ++ ++ /* Force TX vlan offloading for 801.2Q packet */ ++ if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { ++ vh = (struct rte_vlan_hdr *)(eh + 1); ++ m->ol_flags |= RTE_MBUF_F_TX_VLAN; ++ m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci); ++ ++ /* Copy ether header over */ ++ memmove(rte_pktmbuf_adj(m, sizeof(struct rte_vlan_hdr)), ++ eh, 2 * RTE_ETHER_ADDR_LEN); ++ } ++ } ++ pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; ++ + /* For small packets aggregate them in chimney buffer */ + if (m->pkt_len <= hv->tx_copybreak && + pkt_size <= txq->agg_szmax) { +diff --git a/dpdk/drivers/net/netvsc/hn_vf.c b/dpdk/drivers/net/netvsc/hn_vf.c +index 90cb6f6923..a4e958419d 100644 +--- a/dpdk/drivers/net/netvsc/hn_vf.c ++++ b/dpdk/drivers/net/netvsc/hn_vf.c +@@ -264,7 +264,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) + goto exit; + } + +- ret = hn_vf_mtu_set(dev, dev->data->mtu); ++ ret = rte_eth_dev_set_mtu(port, dev->data->mtu); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to set VF MTU"); + goto exit; +@@ -794,7 +794,7 @@ int hn_vf_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (hv->vf_ctx.vf_vsc_switched && vf_dev) +- ret = vf_dev->dev_ops->mtu_set(vf_dev, mtu); ++ ret = rte_eth_dev_set_mtu(vf_dev->data->port_id, mtu); + rte_rwlock_read_unlock(&hv->vf_lock); + + return ret; +diff --git a/dpdk/drivers/net/nfb/nfb_rx.c b/dpdk/drivers/net/nfb/nfb_rx.c +index 8a9b232305..7941197b77 100644 +--- a/dpdk/drivers/net/nfb/nfb_rx.c ++++ b/dpdk/drivers/net/nfb/nfb_rx.c +@@ -129,7 +129,7 @@ nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) + + if (rxq->queue != NULL) { + ndp_close_rx_queue(rxq->queue); +- rte_free(rxq); + rxq->queue = NULL; ++ rte_free(rxq); } -- if (job) { -+ if (job && !force_job) { - job->action = handle; -- job->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY; - flow_hw_action_finalize(dev, queue, job, push, aso, - handle != NULL); + } +diff --git a/dpdk/drivers/net/nfb/nfb_tx.c b/dpdk/drivers/net/nfb/nfb_tx.c +index d49fc324e7..5c38d69934 100644 +--- a/dpdk/drivers/net/nfb/nfb_tx.c ++++ b/dpdk/drivers/net/nfb/nfb_tx.c +@@ -108,7 +108,7 @@ nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) + + if (txq->queue != NULL) { + ndp_close_tx_queue(txq->queue); +- rte_free(txq); + txq->queue = NULL; ++ rte_free(txq); } -@@ -10155,15 +10401,17 @@ mlx5_flow_update_meter_mark(struct rte_eth_dev *dev, uint32_t queue, - fm->color_aware = meter_mark->color_mode; - if (upd_meter_mark->state_valid) - fm->is_enable = meter_mark->state; -+ aso_mtr->state = (queue == MLX5_HW_INV_QUEUE) ? -+ ASO_METER_WAIT : ASO_METER_WAIT_ASYNC; - /* Update ASO flow meter by wqe. */ -- if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, -+ if (mlx5_aso_meter_update_by_wqe(priv, queue, - aso_mtr, &priv->mtr_bulk, job, push)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Unable to update ASO meter WQE"); - /* Wait for ASO object completion. */ - if (queue == MLX5_HW_INV_QUEUE && -- mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) -+ mlx5_aso_mtr_wait(priv, aso_mtr, true)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Unable to wait for ASO meter CQE"); -@@ -10209,11 +10457,12 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, - int ret = 0; - bool push = flow_hw_action_push(attr); - bool aso = false; -+ bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK; + } +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.c b/dpdk/drivers/net/nfp/flower/nfp_flower.c +index 6b523d98b0..9ecd5f49c7 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower.c ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower.c +@@ -82,63 +82,6 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) + return 0; + } + +-/* Reset and stop device. The device can not be restarted. */ +-static int +-nfp_flower_pf_close(struct rte_eth_dev *dev) +-{ +- uint16_t i; +- struct nfp_net_hw *hw; +- struct nfp_pf_dev *pf_dev; +- struct nfp_net_txq *this_tx_q; +- struct nfp_net_rxq *this_rx_q; +- struct nfp_flower_representor *repr; +- struct nfp_app_fw_flower *app_fw_flower; +- +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) +- return 0; +- +- repr = dev->data->dev_private; +- hw = repr->app_fw_flower->pf_hw; +- pf_dev = hw->pf_dev; +- app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv); +- +- nfp_mtr_priv_uninit(pf_dev); +- +- /* +- * We assume that the DPDK application is stopping all the +- * threads/queues before calling the device close function. +- */ +- nfp_net_disable_queues(dev); +- +- /* Clear queues */ +- for (i = 0; i < dev->data->nb_tx_queues; i++) { +- this_tx_q = dev->data->tx_queues[i]; +- nfp_net_reset_tx_queue(this_tx_q); +- } +- +- for (i = 0; i < dev->data->nb_rx_queues; i++) { +- this_rx_q = dev->data->rx_queues[i]; +- nfp_net_reset_rx_queue(this_rx_q); +- } +- +- /* Cancel possible impending LSC work here before releasing the port */ +- rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); +- +- nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); +- +- /* Now it is safe to free all PF resources */ +- PMD_DRV_LOG(INFO, "Freeing PF resources"); +- nfp_cpp_area_free(pf_dev->ctrl_area); +- nfp_cpp_area_free(pf_dev->qc_area); +- free(pf_dev->hwinfo); +- free(pf_dev->sym_tbl); +- nfp_cpp_free(pf_dev->cpp); +- rte_free(app_fw_flower); +- rte_free(pf_dev); +- +- return 0; +-} +- + static const struct eth_dev_ops nfp_flower_pf_vnic_ops = { + .dev_infos_get = nfp_net_infos_get, + .link_update = nfp_net_link_update, +@@ -146,7 +89,6 @@ static const struct eth_dev_ops nfp_flower_pf_vnic_ops = { + + .dev_start = nfp_flower_pf_start, + .dev_stop = nfp_net_stop, +- .dev_close = nfp_flower_pf_close, + }; + + static inline struct nfp_flower_representor * +@@ -191,7 +133,9 @@ nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw, + return false; + } + +- rte_ring_enqueue(repr->ring, (void *)mbuf); ++ if (rte_ring_enqueue(repr->ring, (void *)mbuf) != 0) ++ return false; ++ + return true; + } + +@@ -567,6 +511,8 @@ nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw) + + pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1; + ++ nfp_net_disable_queues(eth_dev); ++ + snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name); + for (i = 0; i < hw->max_tx_queues; i++) { + txq = eth_dev->data->tx_queues[i]; +@@ -858,6 +804,23 @@ app_cleanup: + return ret; + } -- if (attr) { -+ if (attr || force_job) { - job = flow_hw_action_job_init(priv, queue, handle, user_data, - NULL, MLX5_HW_Q_JOB_TYPE_UPDATE, -- error); -+ MLX5_HW_INDIRECT_TYPE_LEGACY, error); - if (!job) - return -rte_errno; - } -@@ -10247,7 +10496,7 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue, - "action type not supported"); - break; ++void ++nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev) ++{ ++ struct nfp_app_fw_flower *app_fw_flower; ++ ++ app_fw_flower = pf_dev->app_fw_priv; ++ nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw); ++ nfp_cpp_area_free(app_fw_flower->ctrl_hw->ctrl_area); ++ nfp_cpp_area_free(pf_dev->ctrl_area); ++ rte_free(app_fw_flower->pf_hw); ++ nfp_mtr_priv_uninit(pf_dev); ++ nfp_flow_priv_uninit(pf_dev); ++ if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) ++ PMD_DRV_LOG(WARNING, "Failed to free switch domain for device"); ++ rte_free(app_fw_flower); ++} ++ + int + nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev) + { +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.h b/dpdk/drivers/net/nfp/flower/nfp_flower.h +index 6f27c06acc..8393de66c5 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower.h ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower.h +@@ -106,6 +106,7 @@ nfp_flower_support_decap_v2(const struct nfp_app_fw_flower *app_fw_flower) + + int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev, + const struct nfp_dev_info *dev_info); ++void nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev); + int nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev); + bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw, + struct rte_mbuf *mbuf, +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c +index c25487c277..102daa3d70 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c +@@ -441,6 +441,11 @@ nfp_flower_cmsg_port_mod_rx(struct nfp_app_fw_flower *app_fw_flower, + return -EINVAL; } -- if (job) -+ if (job && !force_job) - flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0); + ++ if (repr == NULL) { ++ PMD_DRV_LOG(ERR, "Can not get 'repr' for port %#x", port); ++ return -EINVAL; ++ } ++ + repr->link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; + if ((msg->info & NFP_FLOWER_CMSG_PORT_MOD_INFO_LINK) != 0) + repr->link.link_status = RTE_ETH_LINK_UP; +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c +index 0f0e63aae0..ada28d07c6 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c +@@ -23,7 +23,6 @@ nfp_flower_repr_link_update(struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) + { + int ret; +- uint32_t nn_link_status; + struct nfp_net_hw *pf_hw; + struct rte_eth_link *link; + struct nfp_flower_representor *repr; +@@ -32,9 +31,10 @@ nfp_flower_repr_link_update(struct rte_eth_dev *dev, + link = &repr->link; + + pf_hw = repr->app_fw_flower->pf_hw; +- nn_link_status = nn_cfg_readw(&pf_hw->super, NFP_NET_CFG_STS); ++ ret = nfp_net_link_update_common(dev, pf_hw, link, link->link_status); + +- ret = nfp_net_link_update_common(dev, pf_hw, link, nn_link_status); ++ if (repr->repr_type == NFP_REPR_TYPE_PF) ++ nfp_net_notify_port_speed(repr->app_fw_flower->pf_hw, link); + return ret; } -@@ -10290,11 +10539,12 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, - bool push = flow_hw_action_push(attr); - bool aso = false; - int ret = 0; -+ bool force_job = type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK; +@@ -291,12 +291,156 @@ nfp_flower_repr_tx_burst(void *tx_queue, + return sent; + } -- if (attr) { -+ if (attr || force_job) { - job = flow_hw_action_job_init(priv, queue, handle, user_data, - NULL, MLX5_HW_Q_JOB_TYPE_DESTROY, -- error); -+ MLX5_HW_INDIRECT_TYPE_LEGACY, error); - if (!job) - return -rte_errno; - } -@@ -10327,7 +10577,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, - fm = &aso_mtr->fm; - fm->is_enable = 0; - /* Update ASO flow meter by wqe. */ -- if (mlx5_aso_meter_update_by_wqe(priv->sh, queue, aso_mtr, -+ if (mlx5_aso_meter_update_by_wqe(priv, queue, aso_mtr, - &priv->mtr_bulk, job, push)) { - ret = -EINVAL; - rte_flow_error_set(error, EINVAL, -@@ -10337,17 +10587,14 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, - } - /* Wait for ASO object completion. */ - if (queue == MLX5_HW_INV_QUEUE && -- mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) { -+ mlx5_aso_mtr_wait(priv, aso_mtr, true)) { - ret = -EINVAL; - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "Unable to wait for ASO meter CQE"); - break; - } -- if (!job) -- mlx5_ipool_free(pool->idx_pool, idx); -- else -- aso = true; -+ aso = true; - break; - case MLX5_INDIRECT_ACTION_TYPE_RSS: - ret = flow_dv_action_destroy(dev, handle, error); -@@ -10361,7 +10608,7 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue, - "action type not supported"); - break; ++static void ++nfp_flower_repr_free_queue(struct nfp_flower_representor *repr) ++{ ++ uint16_t i; ++ struct rte_eth_dev *eth_dev = repr->eth_dev; ++ ++ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) ++ rte_free(eth_dev->data->tx_queues[i]); ++ ++ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) ++ rte_free(eth_dev->data->rx_queues[i]); ++} ++ ++static void ++nfp_flower_pf_repr_close_queue(struct nfp_flower_representor *repr) ++{ ++ struct rte_eth_dev *eth_dev = repr->eth_dev; ++ ++ /* ++ * We assume that the DPDK application is stopping all the ++ * threads/queues before calling the device close function. ++ */ ++ nfp_net_disable_queues(eth_dev); ++ ++ /* Clear queues */ ++ nfp_net_close_tx_queue(eth_dev); ++ nfp_net_close_rx_queue(eth_dev); ++} ++ ++static void ++nfp_flower_repr_close_queue(struct nfp_flower_representor *repr) ++{ ++ switch (repr->repr_type) { ++ case NFP_REPR_TYPE_PHYS_PORT: ++ nfp_flower_repr_free_queue(repr); ++ break; ++ case NFP_REPR_TYPE_PF: ++ nfp_flower_pf_repr_close_queue(repr); ++ break; ++ case NFP_REPR_TYPE_VF: ++ nfp_flower_repr_free_queue(repr); ++ break; ++ default: ++ PMD_DRV_LOG(ERR, "Unsupported repr port type."); ++ break; ++ } ++} ++ ++static int ++nfp_flower_repr_uninit(struct rte_eth_dev *eth_dev) ++{ ++ uint16_t index; ++ struct nfp_flower_representor *repr; ++ ++ repr = eth_dev->data->dev_private; ++ rte_ring_free(repr->ring); ++ ++ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { ++ index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); ++ repr->app_fw_flower->phy_reprs[index] = NULL; ++ } else { ++ index = repr->vf_id; ++ repr->app_fw_flower->vf_reprs[index] = NULL; ++ } ++ ++ return 0; ++} ++ ++static int ++nfp_flower_pf_repr_uninit(struct rte_eth_dev *eth_dev) ++{ ++ struct nfp_flower_representor *repr = eth_dev->data->dev_private; ++ ++ repr->app_fw_flower->pf_repr = NULL; ++ ++ return 0; ++} ++ ++static void ++nfp_flower_repr_free(struct nfp_flower_representor *repr, ++ enum nfp_repr_type repr_type) ++{ ++ switch (repr_type) { ++ case NFP_REPR_TYPE_PHYS_PORT: ++ nfp_flower_repr_uninit(repr->eth_dev); ++ break; ++ case NFP_REPR_TYPE_PF: ++ nfp_flower_pf_repr_uninit(repr->eth_dev); ++ break; ++ case NFP_REPR_TYPE_VF: ++ nfp_flower_repr_uninit(repr->eth_dev); ++ break; ++ default: ++ PMD_DRV_LOG(ERR, "Unsupported repr port type."); ++ break; ++ } ++} ++ ++/* Reset and stop device. The device can not be restarted. */ ++static int ++nfp_flower_repr_dev_close(struct rte_eth_dev *dev) ++{ ++ uint16_t i; ++ struct nfp_net_hw *hw; ++ struct nfp_pf_dev *pf_dev; ++ struct nfp_flower_representor *repr; ++ struct nfp_app_fw_flower *app_fw_flower; ++ ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return 0; ++ ++ repr = dev->data->dev_private; ++ app_fw_flower = repr->app_fw_flower; ++ hw = app_fw_flower->pf_hw; ++ pf_dev = hw->pf_dev; ++ ++ if (pf_dev->app_fw_id != NFP_APP_FW_FLOWER_NIC) ++ return -EINVAL; ++ ++ nfp_flower_repr_close_queue(repr); ++ ++ nfp_flower_repr_free(repr, repr->repr_type); ++ ++ for (i = 0; i < MAX_FLOWER_VFS; i++) { ++ if (app_fw_flower->vf_reprs[i] != NULL) ++ return 0; ++ } ++ ++ for (i = 0; i < NFP_MAX_PHYPORTS; i++) { ++ if (app_fw_flower->phy_reprs[i] != NULL) ++ return 0; ++ } ++ ++ if (app_fw_flower->pf_repr != NULL) ++ return 0; ++ ++ /* Now it is safe to free all PF resources */ ++ nfp_uninit_app_fw_flower(pf_dev); ++ nfp_pf_uninit(pf_dev); ++ ++ return 0; ++} ++ + static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { + .dev_infos_get = nfp_flower_repr_dev_infos_get, + + .dev_start = nfp_flower_pf_start, + .dev_configure = nfp_net_configure, + .dev_stop = nfp_net_stop, ++ .dev_close = nfp_flower_repr_dev_close, + + .rx_queue_setup = nfp_net_rx_queue_setup, + .tx_queue_setup = nfp_net_tx_queue_setup, +@@ -319,6 +463,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { + .dev_start = nfp_flower_repr_dev_start, + .dev_configure = nfp_net_configure, + .dev_stop = nfp_flower_repr_dev_stop, ++ .dev_close = nfp_flower_repr_dev_close, + + .rx_queue_setup = nfp_flower_repr_rx_queue_setup, + .tx_queue_setup = nfp_flower_repr_tx_queue_setup, +@@ -410,6 +555,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev, + + repr->app_fw_flower->pf_repr = repr; + repr->app_fw_flower->pf_hw->eth_dev = eth_dev; ++ repr->eth_dev = eth_dev; + + return 0; + } +@@ -501,6 +647,8 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, + app_fw_flower->vf_reprs[index] = repr; } -- if (job) -+ if (job && !force_job) - flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0); + ++ repr->eth_dev = eth_dev; ++ + return 0; + + mac_cleanup: +@@ -511,6 +659,35 @@ ring_cleanup: return ret; } -@@ -10607,7 +10854,7 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue, - if (attr) { - job = flow_hw_action_job_init(priv, queue, handle, user_data, - data, MLX5_HW_Q_JOB_TYPE_QUERY, -- error); -+ MLX5_HW_INDIRECT_TYPE_LEGACY, error); - if (!job) - return -rte_errno; - } -@@ -10661,7 +10908,7 @@ flow_hw_async_action_handle_query_update - job = flow_hw_action_job_init(priv, queue, handle, user_data, - query, - MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, -- error); -+ MLX5_HW_INDIRECT_TYPE_LEGACY, error); - if (!job) - return -rte_errno; - } -@@ -10742,6 +10989,10 @@ flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id, - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, "empty context"); -+ if (!priv->hws_age_req) -+ return rte_flow_error_set(error, ENOENT, -+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, -+ NULL, "No aging initialized"); - if (priv->hws_strict_queue) { - if (queue_id >= age_info->hw_q_age->nb_rings) - return rte_flow_error_set(error, EINVAL, -@@ -11319,6 +11570,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue, - } - }; -+ if (!mlx5_hw_ctx_validate(dev, error)) -+ return NULL; - if (!actions) { - rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "No action list"); -@@ -11337,7 +11590,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue, - if (attr) { - job = flow_hw_action_job_init(priv, queue, NULL, user_data, - NULL, MLX5_HW_Q_JOB_TYPE_CREATE, -- error); -+ MLX5_HW_INDIRECT_TYPE_LIST, error); - if (!job) - return NULL; - } -@@ -11357,7 +11610,6 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue, - } - if (job) { - job->action = handle; -- job->indirect_type = MLX5_HW_INDIRECT_TYPE_LIST; - flow_hw_action_finalize(dev, queue, job, push, false, - handle != NULL); - } -@@ -11402,7 +11654,7 @@ flow_hw_async_action_list_handle_destroy - if (attr) { - job = flow_hw_action_job_init(priv, queue, NULL, user_data, - NULL, MLX5_HW_Q_JOB_TYPE_DESTROY, -- error); -+ MLX5_HW_INDIRECT_TYPE_LIST, error); - if (!job) - return rte_errno; - } -@@ -11881,8 +12133,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool - proxy_port_id, port_id); - return 0; - } -- if (!proxy_priv->hw_esw_sq_miss_root_tbl || -- !proxy_priv->hw_esw_sq_miss_tbl) { -+ if (!proxy_priv->hw_ctrl_fdb || -+ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl || -+ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) { - DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " - "default flow tables were not created.", - proxy_port_id, port_id); -@@ -11914,7 +12167,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool - actions[2] = (struct rte_flow_action) { - .type = RTE_FLOW_ACTION_TYPE_END, - }; -- ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl, -+ ret = flow_hw_create_ctrl_flow(dev, proxy_dev, -+ proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, - items, 0, actions, 0, &flow_info, external); - if (ret) { - DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d", -@@ -11945,7 +12199,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool - .type = RTE_FLOW_ACTION_TYPE_END, - }; - flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS; -- ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl, -+ ret = flow_hw_create_ctrl_flow(dev, proxy_dev, -+ proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl, - items, 0, actions, 0, &flow_info, external); - if (ret) { - DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d", -@@ -11989,10 +12244,13 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) - } - proxy_dev = &rte_eth_devices[proxy_port_id]; - proxy_priv = proxy_dev->data->dev_private; -+ /* FDB default flow rules must be enabled. */ -+ MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule); - if (!proxy_priv->dr_ctx) - return 0; -- if (!proxy_priv->hw_esw_sq_miss_root_tbl || -- !proxy_priv->hw_esw_sq_miss_tbl) -+ if (!proxy_priv->hw_ctrl_fdb || -+ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl || -+ !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) - return 0; - cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows); - while (cf != NULL) { -@@ -12052,6 +12310,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) - } - proxy_dev = &rte_eth_devices[proxy_port_id]; - proxy_priv = proxy_dev->data->dev_private; -+ /* FDB default flow rules must be enabled. */ -+ MLX5_ASSERT(proxy_priv->sh->config.fdb_def_rule); - if (!proxy_priv->dr_ctx) { - DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured " - "for HWS to create default FDB jump rule. Default rule will " -@@ -12059,7 +12319,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) - proxy_port_id, port_id); - return 0; ++static void ++nfp_flower_repr_free_all(struct nfp_app_fw_flower *app_fw_flower) ++{ ++ uint32_t i; ++ struct nfp_flower_representor *repr; ++ ++ for (i = 0; i < MAX_FLOWER_VFS; i++) { ++ repr = app_fw_flower->vf_reprs[i]; ++ if (repr != NULL) { ++ nfp_flower_repr_free(repr, NFP_REPR_TYPE_VF); ++ app_fw_flower->vf_reprs[i] = NULL; ++ } ++ } ++ ++ for (i = 0; i < NFP_MAX_PHYPORTS; i++) { ++ repr = app_fw_flower->phy_reprs[i]; ++ if (repr != NULL) { ++ nfp_flower_repr_free(repr, NFP_REPR_TYPE_PHYS_PORT); ++ app_fw_flower->phy_reprs[i] = NULL; ++ } ++ } ++ ++ repr = app_fw_flower->pf_repr; ++ if (repr != NULL) { ++ nfp_flower_repr_free(repr, NFP_REPR_TYPE_PF); ++ app_fw_flower->pf_repr = NULL; ++ } ++} ++ + static int + nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + { +@@ -563,7 +740,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) + eth_port = &nfp_eth_table->ports[i]; + flower_repr.repr_type = NFP_REPR_TYPE_PHYS_PORT; + flower_repr.port_id = nfp_flower_get_phys_port_id(eth_port->index); +- flower_repr.nfp_idx = eth_port->eth_index; ++ flower_repr.nfp_idx = eth_port->index; + flower_repr.vf_id = i + 1; + + /* Copy the real mac of the interface to the representor struct */ +@@ -585,7 +762,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) } -- if (!proxy_priv->hw_esw_zero_tbl) { -+ if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) { - DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " - "default flow tables were not created.", - proxy_port_id, port_id); -@@ -12067,7 +12327,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) - return -rte_errno; + + if (i < app_fw_flower->num_phyport_reprs) +- return ret; ++ goto repr_free; + + /* + * Now allocate eth_dev's for VF representors. +@@ -614,9 +791,14 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) } - return flow_hw_create_ctrl_flow(dev, proxy_dev, -- proxy_priv->hw_esw_zero_tbl, -+ proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl, - items, 0, actions, 0, &flow_info, false); + + if (i < app_fw_flower->num_vf_reprs) +- return ret; ++ goto repr_free; + + return 0; ++ ++repr_free: ++ nfp_flower_repr_free_all(app_fw_flower); ++ ++ return ret; } -@@ -12119,10 +12379,12 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev) - }; + int +@@ -634,10 +816,9 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower) + pci_dev = pf_dev->pci_dev; - MLX5_ASSERT(priv->master); -- if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl) -+ if (!priv->dr_ctx || -+ !priv->hw_ctrl_fdb || -+ !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl) - return 0; - return flow_hw_create_ctrl_flow(dev, dev, -- priv->hw_tx_meta_cpy_tbl, -+ priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl, - eth_all, 0, copy_reg_action, 0, &flow_info, false); + /* Allocate a switch domain for the flower app */ +- if (app_fw_flower->switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID && +- rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id)) { ++ ret = rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id); ++ if (ret != 0) + PMD_INIT_LOG(WARNING, "failed to allocate switch domain for device"); +- } + + /* Now parse PCI device args passed for representor info */ + if (pci_dev->device.devargs != NULL) { +@@ -677,8 +858,15 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower) + ret = nfp_flower_repr_alloc(app_fw_flower); + if (ret != 0) { + PMD_INIT_LOG(ERR, "representors allocation failed"); +- return -EINVAL; ++ ret = -EINVAL; ++ goto domain_free; + } + + return 0; ++ ++domain_free: ++ if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) ++ PMD_INIT_LOG(WARNING, "failed to free switch domain for device"); ++ ++ return ret; } +diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h +index bcb4c3cdb5..8053617562 100644 +--- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h ++++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h +@@ -20,6 +20,7 @@ struct nfp_flower_representor { + struct rte_ring *ring; + struct rte_eth_link link; + struct rte_eth_stats repr_stats; ++ struct rte_eth_dev *eth_dev; + }; -@@ -12214,11 +12476,11 @@ mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev) - .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX, - }; + int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower); +diff --git a/dpdk/drivers/net/nfp/meson.build b/dpdk/drivers/net/nfp/meson.build +index cf9c16266d..7bf94710f1 100644 +--- a/dpdk/drivers/net/nfp/meson.build ++++ b/dpdk/drivers/net/nfp/meson.build +@@ -4,6 +4,7 @@ + if not is_linux or not dpdk_conf.get('RTE_ARCH_64') + build = false + reason = 'only supported on 64-bit Linux' ++ subdir_done() + endif -- MLX5_ASSERT(priv->master); -- if (!priv->dr_ctx || !priv->hw_lacp_rx_tbl) -+ if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl) - return 0; -- return flow_hw_create_ctrl_flow(dev, dev, priv->hw_lacp_rx_tbl, eth_lacp, 0, -- miss_action, 0, &flow_info, false); -+ return flow_hw_create_ctrl_flow(dev, dev, -+ priv->hw_ctrl_fdb->hw_lacp_rx_tbl, -+ eth_lacp, 0, miss_action, 0, &flow_info, false); + sources = files( +diff --git a/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c +index ff9b10f046..b9da74bc99 100644 +--- a/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c ++++ b/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c +@@ -137,7 +137,7 @@ nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq, + } } - static uint32_t -diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c -index 7cbf772ea4..7bf5018c70 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c -+++ b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c -@@ -618,6 +618,7 @@ mlx5_flow_meter_profile_get(struct rte_eth_dev *dev, - meter_profile_id); - } +-static inline void ++static inline int + nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, + struct nfp_net_txq *txq, + struct rte_mbuf *pkt) +@@ -174,7 +174,7 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, + } -+#if defined(HAVE_MLX5_HWS_SUPPORT) - /** - * Callback to add MTR profile with HWS. - * -@@ -697,6 +698,7 @@ mlx5_flow_meter_profile_hws_delete(struct rte_eth_dev *dev, - memset(fmp, 0, sizeof(struct mlx5_flow_meter_profile)); - return 0; - } -+#endif + if (meta_data->length == 0) +- return; ++ return 0; - /** - * Find policy by id. -@@ -839,6 +841,7 @@ mlx5_flow_meter_policy_validate(struct rte_eth_dev *dev, - return 0; - } + meta_info = meta_data->header; + meta_data->header = rte_cpu_to_be_32(meta_data->header); +@@ -188,15 +188,16 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, + case NFP_NET_META_VLAN: + if (vlan_layer > 0) { + PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported"); +- return; ++ return -EINVAL; + } ++ + nfp_net_set_meta_vlan(meta_data, pkt, layer); + vlan_layer++; + break; + case NFP_NET_META_IPSEC: + if (ipsec_layer > 2) { + PMD_DRV_LOG(ERR, "At most 3 layers of ipsec is supported for now."); +- return; ++ return -EINVAL; + } -+#if defined(HAVE_MLX5_HWS_SUPPORT) - /** - * Callback to check MTR policy action validate for HWS - * -@@ -875,6 +878,7 @@ mlx5_flow_meter_policy_hws_validate(struct rte_eth_dev *dev, + nfp_net_set_meta_ipsec(meta_data, txq, pkt, layer, ipsec_layer); +@@ -204,11 +205,13 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, + break; + default: + PMD_DRV_LOG(ERR, "The metadata type not supported"); +- return; ++ return -ENOTSUP; + } + + memcpy(meta, &meta_data->data[layer], sizeof(meta_data->data[layer])); } - return 0; ++ ++ return 0; } -+#endif - static int - __mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev, -@@ -1201,6 +1205,7 @@ mlx5_flow_meter_policy_get(struct rte_eth_dev *dev, - &policy_idx); + uint16_t +@@ -225,6 +228,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, + uint16_t nb_pkts, + bool repr_flag) + { ++ int ret; + uint16_t i; + uint8_t offset; + uint32_t pkt_size; +@@ -271,7 +275,10 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, + if (!repr_flag) { + struct nfp_net_meta_raw meta_data; + memset(&meta_data, 0, sizeof(meta_data)); +- nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt); ++ ret = nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt); ++ if (unlikely(ret != 0)) ++ goto xmit_end; ++ + offset = meta_data.length; + } else { + offset = FLOWER_PKT_DATA_OFFSET; +diff --git a/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c +index 0141fbcc8f..772c847b9d 100644 +--- a/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c ++++ b/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c +@@ -167,7 +167,7 @@ close_block: + return nop_slots; } -+#if defined(HAVE_MLX5_HWS_SUPPORT) - /** - * Callback to delete MTR policy for HWS. - * -@@ -1523,7 +1528,7 @@ policy_add_err: - RTE_MTR_ERROR_TYPE_UNSPECIFIED, - NULL, "Failed to create meter policy."); - } -- -+#endif - /** - * Check meter validation. - * -@@ -1608,12 +1613,12 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv, - if (sh->meter_aso_en) { - fm->is_enable = !!is_enable; - aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); -- ret = mlx5_aso_meter_update_by_wqe(sh, MLX5_HW_INV_QUEUE, -+ ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, - aso_mtr, &priv->mtr_bulk, - NULL, true); - if (ret) - return ret; -- ret = mlx5_aso_mtr_wait(sh, MLX5_HW_INV_QUEUE, aso_mtr); -+ ret = mlx5_aso_mtr_wait(priv, aso_mtr, false); - if (ret) - return ret; - } else { -@@ -1859,7 +1864,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id, - /* If ASO meter supported, update ASO flow meter by wqe. */ - if (priv->sh->meter_aso_en) { - aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); -- ret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, -+ ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, - aso_mtr, &priv->mtr_bulk, NULL, true); - if (ret) - goto error; -@@ -1893,6 +1898,7 @@ error: - NULL, "Failed to create devx meter."); - } +-static void ++static int + nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, + struct nfp_net_txq *txq, + uint64_t *metadata) +@@ -178,7 +178,6 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, + uint32_t cap_extend; + struct nfp_net_hw *hw; + uint32_t header_offset; +- uint8_t vlan_layer = 0; + uint8_t ipsec_layer = 0; + struct nfp_net_meta_raw meta_data; -+#if defined(HAVE_MLX5_HWS_SUPPORT) - /** - * Create meter rules. - * -@@ -1920,6 +1926,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id, - struct mlx5_flow_meter_info *fm; - struct mlx5_flow_meter_policy *policy = NULL; - struct mlx5_aso_mtr *aso_mtr; -+ struct mlx5_hw_q_job *job; - int ret; +@@ -206,8 +205,10 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, + meta_data.length += 3 * NFP_NET_META_FIELD_SIZE; + } - if (!priv->mtr_profile_arr || -@@ -1965,17 +1972,26 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id, - fm->shared = !!shared; - fm->initialized = 1; - /* Update ASO flow meter by wqe. */ -- ret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr, -- &priv->mtr_bulk, NULL, true); -- if (ret) -+ job = mlx5_flow_action_job_init(priv, MLX5_HW_INV_QUEUE, NULL, NULL, -+ NULL, MLX5_HW_Q_JOB_TYPE_CREATE, NULL); -+ if (!job) -+ return -rte_mtr_error_set(error, ENOMEM, -+ RTE_MTR_ERROR_TYPE_MTR_ID, -+ NULL, "No job context."); -+ ret = mlx5_aso_meter_update_by_wqe(priv, MLX5_HW_INV_QUEUE, aso_mtr, -+ &priv->mtr_bulk, job, true); -+ if (ret) { -+ flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv)); - return -rte_mtr_error_set(error, ENOTSUP, -- RTE_MTR_ERROR_TYPE_UNSPECIFIED, -- NULL, "Failed to create devx meter."); -+ RTE_MTR_ERROR_TYPE_UNSPECIFIED, -+ NULL, "Failed to create devx meter."); +- if (meta_data.length == 0) +- return; ++ if (meta_data.length == 0) { ++ *metadata = 0; ++ return 0; + } - fm->active_state = params->meter_enable; - __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED); - __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED); - return 0; + + meta_type = meta_data.header; + header_offset = meta_type << NFP_NET_META_NFDK_LENGTH; +@@ -221,17 +222,13 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, + meta += NFP_NET_META_FIELD_SIZE) { + switch (meta_type & NFP_NET_META_FIELD_MASK) { + case NFP_NET_META_VLAN: +- if (vlan_layer > 0) { +- PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported"); +- return; +- } ++ + nfp_net_set_meta_vlan(&meta_data, pkt, layer); +- vlan_layer++; + break; + case NFP_NET_META_IPSEC: + if (ipsec_layer > 2) { + PMD_DRV_LOG(ERR, "At most 3 layers of ipsec is supported for now."); +- return; ++ return -EINVAL; + } + + nfp_net_set_meta_ipsec(&meta_data, txq, pkt, layer, ipsec_layer); +@@ -239,13 +236,15 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, + break; + default: + PMD_DRV_LOG(ERR, "The metadata type not supported"); +- return; ++ return -ENOTSUP; + } + + memcpy(meta, &meta_data.data[layer], sizeof(meta_data.data[layer])); + } + + *metadata = NFDK_DESC_TX_CHAIN_META; ++ ++ return 0; } -+#endif - static int - mlx5_flow_meter_params_flush(struct rte_eth_dev *dev, -@@ -2460,6 +2476,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = { - .stats_read = mlx5_flow_meter_stats_read, - }; + uint16_t +@@ -292,6 +291,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, -+#if defined(HAVE_MLX5_HWS_SUPPORT) - static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { - .capabilities_get = mlx5_flow_mtr_cap_get, - .meter_profile_add = mlx5_flow_meter_profile_hws_add, -@@ -2478,6 +2495,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { - .stats_update = NULL, - .stats_read = NULL, - }; -+#endif + /* Sending packets */ + while (npkts < nb_pkts && free_descs > 0) { ++ int ret; + int nop_descs; + uint32_t type; + uint32_t dma_len; +@@ -319,10 +319,13 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, - /** - * Get meter operations. -@@ -2493,12 +2511,16 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { - int - mlx5_flow_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) + temp_pkt = pkt; + +- if (repr_flag) ++ if (repr_flag) { + metadata = NFDK_DESC_TX_CHAIN_META; +- else +- nfp_net_nfdk_set_meta_data(pkt, txq, &metadata); ++ } else { ++ ret = nfp_net_nfdk_set_meta_data(pkt, txq, &metadata); ++ if (unlikely(ret != 0)) ++ goto xmit_end; ++ } + + if (unlikely(pkt->nb_segs > 1 && + (hw->super.cap & NFP_NET_CFG_CTRL_GATHER) == 0)) { +diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c +index f02caf8056..e704c90dc5 100644 +--- a/dpdk/drivers/net/nfp/nfp_ethdev.c ++++ b/dpdk/drivers/net/nfp/nfp_ethdev.c +@@ -201,30 +201,40 @@ error: + static int + nfp_net_set_link_up(struct rte_eth_dev *dev) { -+#if defined(HAVE_MLX5_HWS_SUPPORT) - struct mlx5_priv *priv = dev->data->dev_private; ++ int ret; + struct nfp_net_hw *hw; - if (priv->sh->config.dv_flow_en == 2) - *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_hws_ops; + hw = dev->data->dev_private; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ +- return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); ++ ret = nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); else - *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; -+#else -+ *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; -+#endif - return 0; +- return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1); ++ ret = nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 1); ++ if (ret < 0) ++ return ret; ++ ++ return 0; } -@@ -2614,7 +2636,7 @@ mlx5_flow_meter_attach(struct mlx5_priv *priv, - struct mlx5_aso_mtr *aso_mtr; + /* Set the link down. */ + static int + nfp_net_set_link_down(struct rte_eth_dev *dev) + { ++ int ret; + struct nfp_net_hw *hw; - aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); -- if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) { -+ if (mlx5_aso_mtr_wait(priv, aso_mtr, false)) { - return rte_flow_error_set(error, ENOENT, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, -@@ -2877,7 +2899,6 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) - struct mlx5_flow_meter_profile *fmp; - struct mlx5_legacy_flow_meter *legacy_fm; - struct mlx5_flow_meter_info *fm; -- struct mlx5_flow_meter_policy *policy; - struct mlx5_flow_meter_sub_policy *sub_policy; - void *tmp; - uint32_t i, mtr_idx, policy_idx; -@@ -2945,15 +2966,20 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) - mlx5_l3t_destroy(priv->policy_idx_tbl); - priv->policy_idx_tbl = NULL; - } -+#if defined(HAVE_MLX5_HWS_SUPPORT) - if (priv->mtr_policy_arr) { -+ struct mlx5_flow_meter_policy *policy; + hw = dev->data->dev_private; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ +- return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); ++ ret = nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); + else +- return nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0); ++ ret = nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0); ++ if (ret < 0) ++ return ret; + - for (i = 0; i < priv->mtr_config.nb_meter_policies; i++) { - policy = mlx5_flow_meter_policy_find(dev, i, - &policy_idx); -- if (policy->initialized) -+ if (policy->initialized) { - mlx5_flow_meter_policy_hws_delete(dev, i, - error); -+ } - } - } -+#endif - if (priv->mtr_profile_tbl) { - MLX5_L3T_FOREACH(priv->mtr_profile_tbl, i, entry) { - fmp = entry; -@@ -2967,14 +2993,17 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) - mlx5_l3t_destroy(priv->mtr_profile_tbl); - priv->mtr_profile_tbl = NULL; - } -+#if defined(HAVE_MLX5_HWS_SUPPORT) - if (priv->mtr_profile_arr) { - for (i = 0; i < priv->mtr_config.nb_meter_profiles; i++) { - fmp = mlx5_flow_meter_profile_find(priv, i); -- if (fmp->initialized) -+ if (fmp->initialized) { - mlx5_flow_meter_profile_hws_delete(dev, i, - error); -+ } - } - } -+#endif - /* Delete default policy table. */ - mlx5_flow_destroy_def_policy(dev); - if (priv->sh->refcnt == 1) -diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c -index a3bea94811..41edd19bb8 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c -+++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c -@@ -340,6 +340,55 @@ mlx5_hws_cnt_pool_deinit(struct mlx5_hws_cnt_pool * const cntp) - mlx5_free(cntp); ++ return 0; } -+static bool -+mlx5_hws_cnt_should_enable_cache(const struct mlx5_hws_cnt_pool_cfg *pcfg, -+ const struct mlx5_hws_cache_param *ccfg) + static uint8_t +@@ -310,6 +320,66 @@ nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf) + rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf); + } + ++static void ++nfp_net_uninit(struct rte_eth_dev *eth_dev) +{ -+ /* -+ * Enable cache if and only if there are enough counters requested -+ * to populate all of the caches. -+ */ -+ return pcfg->request_num >= ccfg->q_num * ccfg->size; ++ struct nfp_net_hw *net_hw; ++ ++ net_hw = eth_dev->data->dev_private; ++ rte_free(net_hw->eth_xstats_base); ++ nfp_ipsec_uninit(eth_dev); +} + -+static struct mlx5_hws_cnt_pool_caches * -+mlx5_hws_cnt_cache_init(const struct mlx5_hws_cnt_pool_cfg *pcfg, -+ const struct mlx5_hws_cache_param *ccfg) ++static void ++nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev, ++ uint8_t id) +{ -+ struct mlx5_hws_cnt_pool_caches *cache; -+ char mz_name[RTE_MEMZONE_NAMESIZE]; -+ uint32_t qidx; ++ struct rte_eth_dev *eth_dev; ++ struct nfp_app_fw_nic *app_fw_nic; + -+ /* If counter pool is big enough, setup the counter pool cache. */ -+ cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, -+ sizeof(*cache) + -+ sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0]) -+ * ccfg->q_num, 0, SOCKET_ID_ANY); -+ if (cache == NULL) -+ return NULL; -+ /* Store the necessary cache parameters. */ -+ cache->fetch_sz = ccfg->fetch_sz; -+ cache->preload_sz = ccfg->preload_sz; -+ cache->threshold = ccfg->threshold; -+ cache->q_num = ccfg->q_num; -+ for (qidx = 0; qidx < ccfg->q_num; qidx++) { -+ snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); -+ cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, -+ SOCKET_ID_ANY, -+ RING_F_SP_ENQ | RING_F_SC_DEQ | -+ RING_F_EXACT_SZ); -+ if (cache->qcache[qidx] == NULL) -+ goto error; ++ app_fw_nic = pf_dev->app_fw_priv; ++ if (app_fw_nic->ports[id] != NULL) { ++ eth_dev = app_fw_nic->ports[id]->eth_dev; ++ if (eth_dev != NULL) ++ nfp_net_uninit(eth_dev); ++ ++ app_fw_nic->ports[id] = NULL; ++ } ++} ++ ++static void ++nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev) ++{ ++ nfp_cpp_area_release_free(pf_dev->ctrl_area); ++ rte_free(pf_dev->app_fw_priv); ++} ++ ++void ++nfp_pf_uninit(struct nfp_pf_dev *pf_dev) ++{ ++ nfp_cpp_area_release_free(pf_dev->mac_stats_area); ++ nfp_cpp_area_release_free(pf_dev->qc_area); ++ free(pf_dev->sym_tbl); ++ if (pf_dev->multi_pf.enabled) { ++ nfp_net_keepalive_stop(&pf_dev->multi_pf); ++ nfp_net_keepalive_uninit(&pf_dev->multi_pf); ++ } ++ free(pf_dev->nfp_eth_table); ++ free(pf_dev->hwinfo); ++ nfp_cpp_free(pf_dev->cpp); ++ rte_free(pf_dev); ++} ++ ++static int ++nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev) ++{ ++ free(pf_dev->sym_tbl); ++ nfp_cpp_free(pf_dev->cpp); ++ rte_free(pf_dev); ++ ++ return 0; ++} ++ + /* Reset and stop device. The device can not be restarted. */ + static int + nfp_net_close(struct rte_eth_dev *dev) +@@ -321,8 +391,19 @@ nfp_net_close(struct rte_eth_dev *dev) + struct rte_pci_device *pci_dev; + struct nfp_app_fw_nic *app_fw_nic; + +- if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ /* ++ * In secondary process, a released eth device can be found by its name ++ * in shared memory. ++ * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the ++ * eth device has been released. ++ */ ++ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { ++ if (dev->state == RTE_ETH_DEV_UNUSED) ++ return 0; ++ ++ nfp_pf_secondary_uninit(dev->process_private); + return 0; + } -+ return cache; + + hw = dev->data->dev_private; + pf_dev = hw->pf_dev; +@@ -339,16 +420,17 @@ nfp_net_close(struct rte_eth_dev *dev) + nfp_net_close_tx_queue(dev); + nfp_net_close_rx_queue(dev); + +- /* Clear ipsec */ +- nfp_ipsec_uninit(dev); +- + /* Cancel possible impending LSC work here before releasing the port */ + rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); + + /* Only free PF resources after all physical ports have been closed */ + /* Mark this port as unused and free device priv resources */ + nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); +- app_fw_nic->ports[hw->idx] = NULL; + -+error: -+ while (qidx--) -+ rte_ring_free(cache->qcache[qidx]); -+ mlx5_free(cache); -+ return NULL; -+} ++ if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC) ++ return -EINVAL; + - static struct mlx5_hws_cnt_pool * - mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, - const struct mlx5_hws_cnt_pool_cfg *pcfg, -@@ -348,7 +397,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, - char mz_name[RTE_MEMZONE_NAMESIZE]; - struct mlx5_hws_cnt_pool *cntp; - uint64_t cnt_num = 0; -- uint32_t qidx; ++ nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx); - MLX5_ASSERT(pcfg); - MLX5_ASSERT(ccfg); -@@ -360,17 +408,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, - cntp->cfg = *pcfg; - if (cntp->cfg.host_cpool) - return cntp; -- cntp->cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, -- sizeof(*cntp->cache) + -- sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0]) -- * ccfg->q_num, 0, SOCKET_ID_ANY); -- if (cntp->cache == NULL) -- goto error; -- /* store the necessary cache parameters. */ -- cntp->cache->fetch_sz = ccfg->fetch_sz; -- cntp->cache->preload_sz = ccfg->preload_sz; -- cntp->cache->threshold = ccfg->threshold; -- cntp->cache->q_num = ccfg->q_num; - if (pcfg->request_num > sh->hws_max_nb_counters) { - DRV_LOG(ERR, "Counter number %u " - "is greater than the maximum supported (%u).", -@@ -418,13 +455,10 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, - DRV_LOG(ERR, "failed to create reuse list ring"); - goto error; - } -- for (qidx = 0; qidx < ccfg->q_num; qidx++) { -- snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); -- cntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, -- SOCKET_ID_ANY, -- RING_F_SP_ENQ | RING_F_SC_DEQ | -- RING_F_EXACT_SZ); -- if (cntp->cache->qcache[qidx] == NULL) -+ /* Allocate counter cache only if needed. */ -+ if (mlx5_hws_cnt_should_enable_cache(pcfg, ccfg)) { -+ cntp->cache = mlx5_hws_cnt_cache_init(pcfg, ccfg); -+ if (cntp->cache == NULL) - goto error; + for (i = 0; i < app_fw_nic->total_phyports; i++) { + id = nfp_function_id_get(pf_dev, i); +@@ -358,26 +440,16 @@ nfp_net_close(struct rte_eth_dev *dev) + return 0; } - /* Initialize the time for aging-out calculation. */ -@@ -685,7 +719,9 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, - * Maybe blocked for at most 200ms here. - */ - rte_spinlock_lock(&sh->cpool_lock); -- LIST_REMOVE(cpool, next); -+ /* Try to remove cpool before it was added to list caused segfault. */ -+ if (!LIST_EMPTY(&sh->hws_cpool_list) && cpool->next.le_prev) -+ LIST_REMOVE(cpool, next); - rte_spinlock_unlock(&sh->cpool_lock); - if (cpool->cfg.host_cpool == NULL) { - if (--sh->cnt_svc->refcnt == 0) -diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h -index 585b5a83ad..e00596088f 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h -+++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.h -@@ -557,19 +557,32 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue, - } - /** -- * Check if counter pool allocated for HWS is shared between ports. -+ * Decide if the given queue can be used to perform counter allocation/deallcation -+ * based on counter configuration - * - * @param[in] priv - * Pointer to the port private data structure. -+ * @param[in] queue -+ * Pointer to the queue index. - * - * @return -- * True if counter pools is shared between ports. False otherwise. -+ * @p queue if cache related to the queue can be used. NULL otherwise. - */ --static __rte_always_inline bool --mlx5_hws_cnt_is_pool_shared(struct mlx5_priv *priv) -+static __rte_always_inline uint32_t * -+mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue) - { -- return priv && priv->hws_cpool && -- (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL); -+ if (priv && priv->hws_cpool) { -+ /* Do not use queue cache if counter pool is shared. */ -+ if (priv->shared_refcnt || priv->hws_cpool->cfg.host_cpool != NULL) -+ return NULL; -+ /* Do not use queue cache if counter cache is disabled. */ -+ if (priv->hws_cpool->cache == NULL) -+ return NULL; -+ return queue; -+ } -+ /* This case should not be reached if counter pool was successfully configured. */ -+ MLX5_ASSERT(false); -+ return NULL; +- /* Now it is safe to free all PF resources */ +- PMD_INIT_LOG(INFO, "Freeing PF resources"); +- if (pf_dev->multi_pf.enabled) { +- nfp_net_keepalive_stop(&pf_dev->multi_pf); +- nfp_net_keepalive_uninit(&pf_dev->multi_pf); +- } +- nfp_cpp_area_free(pf_dev->ctrl_area); +- nfp_cpp_area_free(pf_dev->qc_area); +- free(pf_dev->hwinfo); +- free(pf_dev->sym_tbl); +- nfp_cpp_free(pf_dev->cpp); +- rte_free(app_fw_nic); +- rte_free(pf_dev); +- ++ /* Enable in nfp_net_start() */ + rte_intr_disable(pci_dev->intr_handle); + +- /* Unregister callback func from eal lib */ ++ /* Register in nfp_net_init() */ + rte_intr_callback_unregister(pci_dev->intr_handle, + nfp_net_dev_interrupt_handler, (void *)dev); + ++ nfp_uninit_app_fw_nic(pf_dev); ++ nfp_pf_uninit(pf_dev); ++ + return 0; } - static __rte_always_inline unsigned int -diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c -index 5bf1a679b2..cc087348a4 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_rx.c -+++ b/dpdk/drivers/net/mlx5/mlx5_rx.c -@@ -613,7 +613,8 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec, - * @param mprq - * Indication if it is called from MPRQ. - * @return -- * 0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, -+ * 0 in case of empty CQE, -+ * MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, - * MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset, - * otherwise the packet size in regular RxQ, - * and striding byte count format in mprq case. -@@ -697,6 +698,11 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, - if (ret == MLX5_RECOVERY_ERROR_RET || - ret == MLX5_RECOVERY_COMPLETED_RET) - return MLX5_CRITICAL_ERROR_CQE_RET; -+ if (!mprq && ret == MLX5_RECOVERY_IGNORE_RET) { -+ *skip_cnt = 1; -+ ++rxq->cq_ci; -+ return MLX5_ERROR_CQE_MASK; -+ } - } else { - return 0; - } -@@ -971,19 +977,18 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) - cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; - len = mlx5_rx_poll_len(rxq, cqe, cqe_n, cqe_mask, &mcqe, &skip_cnt, false); - if (unlikely(len & MLX5_ERROR_CQE_MASK)) { -+ /* We drop packets with non-critical errors */ -+ rte_mbuf_raw_free(rep); - if (len == MLX5_CRITICAL_ERROR_CQE_RET) { -- rte_mbuf_raw_free(rep); - rq_ci = rxq->rq_ci << sges_n; - break; - } -+ /* Skip specified amount of error CQEs packets */ - rq_ci >>= sges_n; - rq_ci += skip_cnt; - rq_ci <<= sges_n; -- idx = rq_ci & wqe_mask; -- wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; -- seg = (*rxq->elts)[idx]; -- cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; -- len = len & ~MLX5_ERROR_CQE_MASK; -+ MLX5_ASSERT(!pkt); -+ continue; - } - if (len == 0) { - rte_mbuf_raw_free(rep); -diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.h b/dpdk/drivers/net/mlx5/mlx5_rx.h -index 2fce908499..d0ceae72ea 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_rx.h -+++ b/dpdk/drivers/net/mlx5/mlx5_rx.h -@@ -101,14 +101,14 @@ struct mlx5_rxq_data { - unsigned int shared:1; /* Shared RXQ. */ - unsigned int delay_drop:1; /* Enable delay drop. */ - unsigned int cqe_comp_layout:1; /* CQE Compression Layout*/ -- unsigned int cq_ci:24; -+ uint16_t port_id; - volatile uint32_t *rq_db; - volatile uint32_t *cq_db; -- uint16_t port_id; - uint32_t elts_ci; - uint32_t rq_ci; - uint16_t consumed_strd; /* Number of consumed strides in WQE. */ - uint32_t rq_pi; -+ uint32_t cq_ci:24; - uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */ - uint32_t byte_mask; - union { -diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c -index 615e1d073d..f4ac58e2f9 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_stats.c -+++ b/dpdk/drivers/net/mlx5/mlx5_stats.c -@@ -39,24 +39,36 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, - unsigned int n) - { - struct mlx5_priv *priv = dev->data->dev_private; -- unsigned int i; -- uint64_t counters[n]; -+ uint64_t counters[MLX5_MAX_XSTATS]; - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; -+ unsigned int i; -+ uint16_t stats_n = 0; -+ uint16_t stats_n_2nd = 0; - uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n; -+ bool bond_master = (priv->master && priv->pf_bond >= 0); +@@ -576,28 +648,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + + rte_eth_copy_pci_info(eth_dev, pci_dev); + +- if (port == 0 || pf_dev->multi_pf.enabled) { +- uint32_t min_size; +- ++ if (pf_dev->multi_pf.enabled) + hw->ctrl_bar = pf_dev->ctrl_bar; +- min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index; +- net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats", +- min_size, &net_hw->mac_stats_area); +- if (net_hw->mac_stats_bar == NULL) { +- PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); +- return -EIO; +- } +- +- net_hw->mac_stats = net_hw->mac_stats_bar; +- } else { +- if (pf_dev->ctrl_bar == NULL) +- return -ENODEV; +- +- /* Use port offset in pf ctrl_bar for this ports control bar */ ++ else + hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ); +- net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + ++ ++ net_hw->mac_stats = pf_dev->mac_stats_bar + + (net_hw->nfp_idx * NFP_MAC_STATS_SIZE); +- } - if (n >= mlx5_stats_n && stats) { -- int stats_n; - int ret; + PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); + PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats); +@@ -625,7 +682,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + if (net_hw->eth_xstats_base == NULL) { + PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", + pci_dev->device.name); +- return -ENOMEM; ++ err = -ENOMEM; ++ goto ipsec_exit; + } -- stats_n = mlx5_os_get_stats_n(dev); -- if (stats_n < 0) -- return stats_n; -- if (xstats_ctrl->stats_n != stats_n) -+ ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); + /* Work out where in the BAR the queues start. */ +@@ -655,7 +713,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to space for MAC address"); +- return -ENOMEM; ++ err = -ENOMEM; ++ goto xstats_free; + } + + nfp_net_pf_read_mac(app_fw_nic, port); +@@ -693,6 +752,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + nfp_net_stats_reset(eth_dev); + + return 0; ++ ++xstats_free: ++ rte_free(net_hw->eth_xstats_base); ++ipsec_exit: ++ nfp_ipsec_uninit(eth_dev); ++ ++ return err; + } + + #define DEFAULT_FW_PATH "/lib/firmware/netronome" +@@ -1120,26 +1186,46 @@ port_cleanup: + app_fw_nic->ports[id]->eth_dev != NULL) { + struct rte_eth_dev *tmp_dev; + tmp_dev = app_fw_nic->ports[id]->eth_dev; +- nfp_ipsec_uninit(tmp_dev); ++ nfp_net_uninit(tmp_dev); + rte_eth_dev_release_port(tmp_dev); +- app_fw_nic->ports[id] = NULL; + } + } +- nfp_cpp_area_free(pf_dev->ctrl_area); ++ nfp_cpp_area_release_free(pf_dev->ctrl_area); + app_cleanup: + rte_free(app_fw_nic); + + return ret; + } + ++/* Force the physical port down to clear the possible DMA error */ + static int +-nfp_pf_init(struct rte_pci_device *pci_dev) ++nfp_net_force_port_down(struct nfp_pf_dev *pf_dev, ++ struct nfp_eth_table *nfp_eth_table, ++ struct nfp_cpp *cpp) + { ++ int ret; + uint32_t i; + uint32_t id; ++ uint32_t index; ++ uint32_t count; ++ ++ count = nfp_net_get_port_num(pf_dev, nfp_eth_table); ++ for (i = 0; i < count; i++) { ++ id = nfp_function_id_get(pf_dev, i); ++ index = nfp_eth_table->ports[id].index; ++ ret = nfp_eth_set_configured(cpp, index, 0); + if (ret < 0) + return ret; -+ /* -+ * The number of statistics fetched via "ETH_SS_STATS" may vary because -+ * of the port configuration each time. This is also true between 2 -+ * ports. There might be a case that the numbers are the same even if -+ * configurations are different. -+ * It is not recommended to change the configuration without using -+ * RTE API. The port(traffic) restart may trigger another initialization -+ * to make sure the map are correct. -+ */ -+ if (xstats_ctrl->stats_n != stats_n || -+ (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) - mlx5_os_stats_init(dev); -- ret = mlx5_os_read_dev_counters(dev, counters); -- if (ret) -+ ret = mlx5_os_read_dev_counters(dev, bond_master, counters); -+ if (ret < 0) - return ret; -- for (i = 0; i != mlx5_stats_n; ++i) { -+ for (i = 0; i != mlx5_stats_n; i++) { - stats[i].id = i; - if (xstats_ctrl->info[i].dev) { - uint64_t wrap_n; -@@ -225,30 +237,32 @@ mlx5_xstats_reset(struct rte_eth_dev *dev) - { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; -- int stats_n; - unsigned int i; - uint64_t *counters; - int ret; -+ uint16_t stats_n = 0; -+ uint16_t stats_n_2nd = 0; -+ bool bond_master = (priv->master && priv->pf_bond >= 0); ++ } ++ ++ return 0; ++} ++ ++static int ++nfp_pf_init(struct rte_pci_device *pci_dev) ++{ + int ret = 0; + uint64_t addr; +- uint32_t index; + uint32_t cpp_id; + uint8_t function_id; + struct nfp_cpp *cpp; +@@ -1211,11 +1297,11 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp); + pf_dev->multi_pf.function_id = function_id; -- stats_n = mlx5_os_get_stats_n(dev); -- if (stats_n < 0) { -+ ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); -+ if (ret < 0) { - DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id, -- strerror(-stats_n)); -- return stats_n; -+ strerror(-ret)); -+ return ret; - } -- if (xstats_ctrl->stats_n != stats_n) -+ if (xstats_ctrl->stats_n != stats_n || -+ (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) - mlx5_os_stats_init(dev); -- counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * -- xstats_ctrl->mlx5_stats_n, 0, -- SOCKET_ID_ANY); -+ /* Considering to use stack directly. */ -+ counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * xstats_ctrl->mlx5_stats_n, -+ 0, SOCKET_ID_ANY); - if (!counters) { -- DRV_LOG(WARNING, "port %u unable to allocate memory for xstats " -- "counters", -+ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats counters", - dev->data->port_id); - rte_errno = ENOMEM; - return -rte_errno; - } -- ret = mlx5_os_read_dev_counters(dev, counters); -+ ret = mlx5_os_read_dev_counters(dev, bond_master, counters); - if (ret) { - DRV_LOG(ERR, "port %u cannot read device counters: %s", - dev->data->port_id, strerror(rte_errno)); -diff --git a/dpdk/drivers/net/mlx5/mlx5_trigger.c b/dpdk/drivers/net/mlx5/mlx5_trigger.c -index 5ac25d7e2d..fe2c512c5c 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_trigger.c -+++ b/dpdk/drivers/net/mlx5/mlx5_trigger.c -@@ -1498,7 +1498,9 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) - if (!txq) - continue; - queue = mlx5_txq_get_sqn(txq); -- if ((priv->representor || priv->master) && config->dv_esw_en) { -+ if ((priv->representor || priv->master) && -+ config->dv_esw_en && -+ config->fdb_def_rule) { - if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, queue, false)) { - mlx5_txq_release(dev, i); - goto error; -@@ -1524,7 +1526,7 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev) - } - if (priv->isolated) - return 0; -- if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) -+ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) - if (mlx5_flow_hw_lacp_rx_flow(dev)) - goto error; - if (dev->data->promiscuous) -@@ -1632,14 +1634,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) - DRV_LOG(INFO, "port %u FDB default rule is disabled", - dev->data->port_id); +- /* Force the physical port down to clear the possible DMA error */ +- for (i = 0; i < nfp_eth_table->count; i++) { +- id = nfp_function_id_get(pf_dev, i); +- index = nfp_eth_table->ports[id].index; +- nfp_eth_set_configured(cpp, index, 0); ++ ret = nfp_net_force_port_down(pf_dev, nfp_eth_table, cpp); ++ if (ret != 0) { ++ PMD_INIT_LOG(ERR, "Failed to force port down"); ++ ret = -EIO; ++ goto eth_table_cleanup; } -- if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { -+ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { - ret = mlx5_flow_lacp_miss(dev); - if (ret) - DRV_LOG(INFO, "port %u LACP rule cannot be created - " - "forward LACP to kernel.", dev->data->port_id); - else -- DRV_LOG(INFO, "LACP traffic will be missed in port %u." -- , dev->data->port_id); -+ DRV_LOG(INFO, "LACP traffic will be missed in port %u.", -+ dev->data->port_id); + + if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo, +@@ -1264,6 +1350,14 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + + PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar); + ++ pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats", ++ NFP_MAC_STATS_SIZE * nfp_eth_table->max_index, ++ &pf_dev->mac_stats_area); ++ if (pf_dev->mac_stats_bar == NULL) { ++ PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats"); ++ goto hwqueues_cleanup; ++ } ++ + /* + * PF initialization has been done at this point. Call app specific + * init code now. +@@ -1273,14 +1367,14 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + if (pf_dev->multi_pf.enabled) { + ret = nfp_enable_multi_pf(pf_dev); + if (ret != 0) +- goto hwqueues_cleanup; ++ goto mac_stats_cleanup; + } + + PMD_INIT_LOG(INFO, "Initializing coreNIC"); + ret = nfp_init_app_fw_nic(pf_dev, dev_info); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); +- goto hwqueues_cleanup; ++ goto mac_stats_cleanup; + } + break; + case NFP_APP_FW_FLOWER_NIC: +@@ -1288,13 +1382,13 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + ret = nfp_init_app_fw_flower(pf_dev, dev_info); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Could not initialize Flower!"); +- goto hwqueues_cleanup; ++ goto mac_stats_cleanup; + } + break; + default: + PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); + ret = -EINVAL; +- goto hwqueues_cleanup; ++ goto mac_stats_cleanup; } - if (priv->isolated) - return 0; -diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c -index 1ac43548b2..aac078a6ed 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_txq.c -+++ b/dpdk/drivers/net/mlx5/mlx5_txq.c -@@ -1311,11 +1311,18 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num) + + /* Register the CPP bridge service here for primary use */ +@@ -1304,13 +1398,18 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + + return 0; + ++mac_stats_cleanup: ++ nfp_cpp_area_release_free(pf_dev->mac_stats_area); + hwqueues_cleanup: +- nfp_cpp_area_free(pf_dev->qc_area); ++ nfp_cpp_area_release_free(pf_dev->qc_area); + sym_tbl_cleanup: + free(sym_tbl); + fw_cleanup: + nfp_fw_unload(cpp); +- nfp_net_keepalive_stop(&pf_dev->multi_pf); ++ if (pf_dev->multi_pf.enabled) { ++ nfp_net_keepalive_stop(&pf_dev->multi_pf); ++ nfp_net_keepalive_uninit(&pf_dev->multi_pf); ++ } + eth_table_cleanup: + free(nfp_eth_table); + hwinfo_cleanup: +@@ -1437,7 +1536,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) + if (sym_tbl == NULL) { + PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); + ret = -EIO; +- goto pf_cleanup; ++ goto cpp_cleanup; } - #ifdef HAVE_MLX5_HWS_SUPPORT - if (priv->sh->config.dv_flow_en == 2) { -- if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true)) -- return -rte_errno; -+ bool sq_miss_created = false; -+ -+ if (priv->sh->config.fdb_def_rule) { -+ if (mlx5_flow_hw_esw_create_sq_miss_flow(dev, sq_num, true)) -+ return -rte_errno; -+ sq_miss_created = true; -+ } + + /* Read the app ID of the firmware loaded */ +@@ -1484,6 +1583,8 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) + + sym_tbl_cleanup: + free(sym_tbl); ++cpp_cleanup: ++ nfp_cpp_free(cpp); + pf_cleanup: + rte_free(pf_dev); + +diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +index 7927f53403..cfe7225ca5 100644 +--- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c ++++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c +@@ -160,13 +160,17 @@ nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused) + static int + nfp_netvf_close(struct rte_eth_dev *dev) + { ++ struct nfp_net_hw *net_hw; + struct rte_pci_device *pci_dev; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + ++ net_hw = dev->data->dev_private; + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + ++ rte_free(net_hw->eth_xstats_base); + - if (priv->sh->config.repr_matching && - mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) { -- mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num); -+ if (sq_miss_created) -+ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num); - return -rte_errno; - } + /* + * We assume that the DPDK application is stopping all the + * threads/queues before calling the device close function. +@@ -284,8 +288,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; -diff --git a/dpdk/drivers/net/mlx5/mlx5_utils.c b/dpdk/drivers/net/mlx5/mlx5_utils.c -index 4db738785f..b5b6c7c728 100644 ---- a/dpdk/drivers/net/mlx5/mlx5_utils.c -+++ b/dpdk/drivers/net/mlx5/mlx5_utils.c -@@ -379,7 +379,8 @@ _mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx) - idx -= 1; - trunk_idx = mlx5_trunk_idx_get(pool, idx); - trunk = lc->trunks[trunk_idx]; -- MLX5_ASSERT(trunk); -+ if (!trunk) -+ return NULL; - entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk_idx); - return &trunk->data[entry_idx * pool->cfg.size]; - } -diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c -index a31e1b5494..49f750be68 100644 ---- a/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c -+++ b/dpdk/drivers/net/mlx5/windows/mlx5_ethdev_os.c -@@ -178,20 +178,29 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) - return -ENOTSUP; - } --/** -+/* - * Query the number of statistics provided by ETHTOOL. - * - * @param dev - * Pointer to Ethernet device. -+ * @param bond_master -+ * Indicate if the device is a bond master. -+ * @param n_stats -+ * Pointer to number of stats to store. -+ * @param n_stats_sec -+ * Pointer to number of stats to store for the 2nd port of the bond. - * - * @return -- * Number of statistics on success, negative errno value otherwise and -- * rte_errno is set. -+ * 0 on success, negative errno value otherwise and rte_errno is set. - */ - int --mlx5_os_get_stats_n(struct rte_eth_dev *dev) -+mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, -+ uint16_t *n_stats, uint16_t *n_stats_sec) - { - RTE_SET_USED(dev); -+ RTE_SET_USED(bond_master); -+ RTE_SET_USED(n_stats); -+ RTE_SET_USED(n_stats_sec); - return -ENOTSUP; - } +- rte_eth_copy_pci_info(eth_dev, pci_dev); +- + net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", + sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0); + if (net_hw->eth_xstats_base == NULL) { +@@ -323,7 +325,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to space for MAC address"); + err = -ENOMEM; +- goto dev_err_ctrl_map; ++ goto free_xstats; + } -@@ -221,6 +230,8 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) - * - * @param dev - * Pointer to Ethernet device. -+ * @param bond_master -+ * Indicate if the device is a bond master. - * @param[out] stats - * Counters table output buffer. - * -@@ -229,9 +240,10 @@ mlx5_os_stats_init(struct rte_eth_dev *dev) - * rte_errno is set. - */ - int --mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) -+mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) - { - RTE_SET_USED(dev); -+ RTE_SET_USED(bond_master); - RTE_SET_USED(stats); - return -ENOTSUP; + nfp_read_mac(hw); +@@ -360,8 +362,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) + + return 0; + +-dev_err_ctrl_map: +- nfp_cpp_area_free(net_hw->ctrl_area); ++free_xstats: ++ rte_free(net_hw->eth_xstats_base); + + return err; } -diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -index daa69e533a..212c300c14 100644 ---- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c -+++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -@@ -198,7 +198,8 @@ mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV6, - RTE_PTYPE_L4_TCP, -- RTE_PTYPE_L4_UDP -+ RTE_PTYPE_L4_UDP, -+ RTE_PTYPE_UNKNOWN - }; +diff --git a/dpdk/drivers/net/nfp/nfp_flow.c b/dpdk/drivers/net/nfp/nfp_flow.c +index f832b52d89..13f58b210e 100644 +--- a/dpdk/drivers/net/nfp/nfp_flow.c ++++ b/dpdk/drivers/net/nfp/nfp_flow.c +@@ -312,14 +312,14 @@ nfp_check_mask_add(struct nfp_flow_priv *priv, + ret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id); + if (ret != 0) + return false; ++ ++ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; + } else { + /* Mask entry already exist */ + mask_entry->ref_cnt++; + *mask_id = mask_entry->mask_id; + } - return ptypes; -diff --git a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c -index c12364941d..4cc64c7cad 100644 ---- a/dpdk/drivers/net/mvpp2/mrvl_ethdev.c -+++ b/dpdk/drivers/net/mvpp2/mrvl_ethdev.c -@@ -1777,7 +1777,8 @@ mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) - RTE_PTYPE_L3_IPV6_EXT, - RTE_PTYPE_L2_ETHER_ARP, - RTE_PTYPE_L4_TCP, -- RTE_PTYPE_L4_UDP -+ RTE_PTYPE_L4_UDP, -+ RTE_PTYPE_UNKNOWN - }; +- *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; +- + return true; + } - return ptypes; -diff --git a/dpdk/drivers/net/netvsc/hn_ethdev.c b/dpdk/drivers/net/netvsc/hn_ethdev.c -index b8a32832d7..f8cb05a118 100644 ---- a/dpdk/drivers/net/netvsc/hn_ethdev.c -+++ b/dpdk/drivers/net/netvsc/hn_ethdev.c -@@ -1127,8 +1127,10 @@ hn_reinit(struct rte_eth_dev *dev, uint16_t mtu) - int i, ret = 0; +@@ -3177,7 +3177,6 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr, + goto free_entry; + } - /* Point primary queues at new primary channel */ -- rxqs[0]->chan = hv->channels[0]; -- txqs[0]->chan = hv->channels[0]; -+ if (rxqs[0]) { -+ rxqs[0]->chan = hv->channels[0]; -+ txqs[0]->chan = hv->channels[0]; -+ } +- rte_free(entry); + rte_free(find_entry); + priv->pre_tun_cnt--; - ret = hn_attach(hv, mtu); - if (ret) -@@ -1140,10 +1142,12 @@ hn_reinit(struct rte_eth_dev *dev, uint16_t mtu) - return ret; +@@ -3658,7 +3657,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + ttl_tos_flag = true; + } + } else { +- nfp_flow_action_set_hl(position, action, ttl_tos_flag); ++ nfp_flow_action_set_hl(position, action, tc_hl_flag); + if (!tc_hl_flag) { + position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl); + tc_hl_flag = true; +@@ -3675,7 +3674,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + break; + case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: + PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP"); +- nfp_flow_action_set_tc(position, action, ttl_tos_flag); ++ nfp_flow_action_set_tc(position, action, tc_hl_flag); + if (!tc_hl_flag) { + position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl); + tc_hl_flag = true; +@@ -3741,6 +3740,11 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, + total_actions++; + } - /* Point any additional queues at new subchannels */ -- for (i = 1; i < dev->data->nb_rx_queues; i++) -- rxqs[i]->chan = hv->channels[i]; -- for (i = 1; i < dev->data->nb_tx_queues; i++) -- txqs[i]->chan = hv->channels[i]; -+ if (rxqs[0]) { -+ for (i = 1; i < dev->data->nb_rx_queues; i++) -+ rxqs[i]->chan = hv->channels[i]; -+ for (i = 1; i < dev->data->nb_tx_queues; i++) -+ txqs[i]->chan = hv->channels[i]; ++ if (nfp_flow->install_flag && total_actions == 0) { ++ PMD_DRV_LOG(ERR, "The action list is empty"); ++ return -ENOTSUP; + } ++ + if (drop_flag) + nfp_flow_meta->shortcut = rte_cpu_to_be_32(NFP_FL_SC_ACT_DROP); + else if (total_actions > 1) +diff --git a/dpdk/drivers/net/nfp/nfp_ipsec.c b/dpdk/drivers/net/nfp/nfp_ipsec.c +index 452947380e..56f3777226 100644 +--- a/dpdk/drivers/net/nfp/nfp_ipsec.c ++++ b/dpdk/drivers/net/nfp/nfp_ipsec.c +@@ -18,6 +18,7 @@ + #include "nfp_rxtx.h" - return ret; - } -diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c -index e4f5015aa3..9bf1ec5509 100644 ---- a/dpdk/drivers/net/netvsc/hn_rxtx.c -+++ b/dpdk/drivers/net/netvsc/hn_rxtx.c -@@ -612,7 +612,9 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, - RTE_PTYPE_L4_MASK); + #define NFP_UDP_ESP_PORT 4500 ++#define NFP_ESP_IV_LENGTH 8 - if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { -- m->vlan_tci = info->vlan_info; -+ m->vlan_tci = RTE_VLAN_TCI_MAKE(NDIS_VLAN_INFO_ID(info->vlan_info), -+ NDIS_VLAN_INFO_PRI(info->vlan_info), -+ NDIS_VLAN_INFO_CFI(info->vlan_info)); - m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; + static const struct rte_cryptodev_capabilities nfp_crypto_caps[] = { + { +@@ -521,10 +522,14 @@ nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg, + char *save; + char *iv_b; + char *iv_str; +- uint8_t *cfg_iv; ++ const rte_be32_t *iv_value; ++ uint8_t cfg_iv[NFP_ESP_IV_LENGTH] = {}; - /* NDIS always strips tag, put it back if necessary */ -@@ -1332,7 +1334,9 @@ static void hn_encap(struct rndis_packet_msg *pkt, - if (m->ol_flags & RTE_MBUF_F_TX_VLAN) { - pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, - NDIS_PKTINFO_TYPE_VLAN); -- *pi_data = m->vlan_tci; -+ *pi_data = NDIS_VLAN_INFO_MAKE(RTE_VLAN_TCI_ID(m->vlan_tci), -+ RTE_VLAN_TCI_PRI(m->vlan_tci), -+ RTE_VLAN_TCI_DEI(m->vlan_tci)); - } + iv_str = strdup(iv_string); +- cfg_iv = (uint8_t *)cfg->aesgcm_fields.iv; ++ if (iv_str == NULL) { ++ PMD_DRV_LOG(ERR, "Failed to strdup iv_string"); ++ return; ++ } - if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { -diff --git a/dpdk/drivers/net/netvsc/hn_vf.c b/dpdk/drivers/net/netvsc/hn_vf.c -index 90cb6f6923..a4e958419d 100644 ---- a/dpdk/drivers/net/netvsc/hn_vf.c -+++ b/dpdk/drivers/net/netvsc/hn_vf.c -@@ -264,7 +264,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) - goto exit; - } + for (i = 0; i < iv_len; i++) { + iv_b = strtok_r(i ? NULL : iv_str, ",", &save); +@@ -534,8 +539,9 @@ nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg, + cfg_iv[i] = strtoul(iv_b, NULL, 0); + } -- ret = hn_vf_mtu_set(dev, dev->data->mtu); -+ ret = rte_eth_dev_set_mtu(port, dev->data->mtu); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to set VF MTU"); - goto exit; -@@ -794,7 +794,7 @@ int hn_vf_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) - rte_rwlock_read_lock(&hv->vf_lock); - vf_dev = hn_get_vf_dev(hv); - if (hv->vf_ctx.vf_vsc_switched && vf_dev) -- ret = vf_dev->dev_ops->mtu_set(vf_dev, mtu); -+ ret = rte_eth_dev_set_mtu(vf_dev->data->port_id, mtu); - rte_rwlock_read_unlock(&hv->vf_lock); +- *(uint32_t *)cfg_iv = rte_be_to_cpu_32(*(uint32_t *)cfg_iv); +- *(uint32_t *)&cfg_iv[4] = rte_be_to_cpu_32(*(uint32_t *)&cfg_iv[4]); ++ iv_value = (const rte_be32_t *)(cfg_iv); ++ cfg->aesgcm_fields.iv[0] = rte_be_to_cpu_32(iv_value[0]); ++ cfg->aesgcm_fields.iv[1] = rte_be_to_cpu_32(iv_value[1]); - return ret; -diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.c b/dpdk/drivers/net/nfp/flower/nfp_flower.c -index 6b523d98b0..9ecd5f49c7 100644 ---- a/dpdk/drivers/net/nfp/flower/nfp_flower.c -+++ b/dpdk/drivers/net/nfp/flower/nfp_flower.c -@@ -82,63 +82,6 @@ nfp_flower_pf_start(struct rte_eth_dev *dev) - return 0; + free(iv_str); } +@@ -576,7 +582,7 @@ nfp_aead_map(struct rte_eth_dev *eth_dev, + uint32_t offset; + uint32_t device_id; + const char *iv_str; +- const uint32_t *key; ++ const rte_be32_t *key; + struct nfp_net_hw *net_hw; --/* Reset and stop device. The device can not be restarted. */ --static int --nfp_flower_pf_close(struct rte_eth_dev *dev) --{ -- uint16_t i; -- struct nfp_net_hw *hw; -- struct nfp_pf_dev *pf_dev; -- struct nfp_net_txq *this_tx_q; -- struct nfp_net_rxq *this_rx_q; -- struct nfp_flower_representor *repr; -- struct nfp_app_fw_flower *app_fw_flower; -- -- if (rte_eal_process_type() != RTE_PROC_PRIMARY) -- return 0; -- -- repr = dev->data->dev_private; -- hw = repr->app_fw_flower->pf_hw; -- pf_dev = hw->pf_dev; -- app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv); -- -- nfp_mtr_priv_uninit(pf_dev); -- -- /* -- * We assume that the DPDK application is stopping all the -- * threads/queues before calling the device close function. -- */ -- nfp_net_disable_queues(dev); -- -- /* Clear queues */ -- for (i = 0; i < dev->data->nb_tx_queues; i++) { -- this_tx_q = dev->data->tx_queues[i]; -- nfp_net_reset_tx_queue(this_tx_q); -- } -- -- for (i = 0; i < dev->data->nb_rx_queues; i++) { -- this_rx_q = dev->data->rx_queues[i]; -- nfp_net_reset_rx_queue(this_rx_q); -- } -- -- /* Cancel possible impending LSC work here before releasing the port */ -- rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); -- -- nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); -- -- /* Now it is safe to free all PF resources */ -- PMD_DRV_LOG(INFO, "Freeing PF resources"); -- nfp_cpp_area_free(pf_dev->ctrl_area); -- nfp_cpp_area_free(pf_dev->qc_area); -- free(pf_dev->hwinfo); -- free(pf_dev->sym_tbl); -- nfp_cpp_free(pf_dev->cpp); -- rte_free(app_fw_flower); -- rte_free(pf_dev); -- -- return 0; --} -- - static const struct eth_dev_ops nfp_flower_pf_vnic_ops = { - .dev_infos_get = nfp_net_infos_get, - .link_update = nfp_net_link_update, -@@ -146,7 +89,6 @@ static const struct eth_dev_ops nfp_flower_pf_vnic_ops = { - - .dev_start = nfp_flower_pf_start, - .dev_stop = nfp_net_stop, -- .dev_close = nfp_flower_pf_close, - }; - - static inline struct nfp_flower_representor * -@@ -191,7 +133,9 @@ nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw, - return false; + net_hw = eth_dev->data->dev_private; +@@ -626,7 +632,7 @@ nfp_aead_map(struct rte_eth_dev *eth_dev, + return -EINVAL; } -- rte_ring_enqueue(repr->ring, (void *)mbuf); -+ if (rte_ring_enqueue(repr->ring, (void *)mbuf) != 0) -+ return false; -+ - return true; - } +- key = (const uint32_t *)(aead->key.data); ++ key = (const rte_be32_t *)(aead->key.data); -@@ -567,6 +511,8 @@ nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw) + /* + * The CHACHA20's key order needs to be adjusted based on hardware design. +@@ -638,16 +644,22 @@ nfp_aead_map(struct rte_eth_dev *eth_dev, - pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1; + for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) { + index = (i + offset) % (key_length / sizeof(cfg->cipher_key[0])); +- cfg->cipher_key[index] = rte_cpu_to_be_32(*key++); ++ cfg->cipher_key[index] = rte_be_to_cpu_32(key[i]); + } -+ nfp_net_disable_queues(eth_dev); + /* +- * The iv of the FW is equal to ESN by default. Reading the +- * iv of the configuration information is not supported. ++ * The iv of the FW is equal to ESN by default. Only the ++ * aead algorithm can offload the iv of configuration and ++ * the length of iv cannot be greater than NFP_ESP_IV_LENGTH. + */ + iv_str = getenv("ETH_SEC_IV_OVR"); + if (iv_str != NULL) { + iv_len = aead->iv.length; ++ if (iv_len > NFP_ESP_IV_LENGTH) { ++ PMD_DRV_LOG(ERR, "Unsupported length of iv data"); ++ return -EINVAL; ++ } + - snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name); - for (i = 0; i < hw->max_tx_queues; i++) { - txq = eth_dev->data->tx_queues[i]; -@@ -858,6 +804,23 @@ app_cleanup: - return ret; - } + nfp_aesgcm_iv_update(cfg, iv_len, iv_str); + } -+void -+nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev) -+{ -+ struct nfp_app_fw_flower *app_fw_flower; -+ -+ app_fw_flower = pf_dev->app_fw_priv; -+ nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw); -+ nfp_cpp_area_free(app_fw_flower->ctrl_hw->ctrl_area); -+ nfp_cpp_area_free(pf_dev->ctrl_area); -+ rte_free(app_fw_flower->pf_hw); -+ nfp_mtr_priv_uninit(pf_dev); -+ nfp_flow_priv_uninit(pf_dev); -+ if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) -+ PMD_DRV_LOG(WARNING, "Failed to free switch domain for device"); -+ rte_free(app_fw_flower); -+} -+ - int - nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev) - { -diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.h b/dpdk/drivers/net/nfp/flower/nfp_flower.h -index 6f27c06acc..8393de66c5 100644 ---- a/dpdk/drivers/net/nfp/flower/nfp_flower.h -+++ b/dpdk/drivers/net/nfp/flower/nfp_flower.h -@@ -106,6 +106,7 @@ nfp_flower_support_decap_v2(const struct nfp_app_fw_flower *app_fw_flower) +@@ -664,7 +676,7 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev, + int ret; + uint32_t i; + uint32_t device_id; +- const uint32_t *key; ++ const rte_be32_t *key; + struct nfp_net_hw *net_hw; + + net_hw = eth_dev->data->dev_private; +@@ -698,14 +710,14 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev, + return -EINVAL; + } - int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev, - const struct nfp_dev_info *dev_info); -+void nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev); - int nfp_secondary_init_app_fw_flower(struct nfp_pf_dev *pf_dev); - bool nfp_flower_pf_dispatch_pkts(struct nfp_net_hw *hw, - struct rte_mbuf *mbuf, -diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c -index c25487c277..102daa3d70 100644 ---- a/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c -+++ b/dpdk/drivers/net/nfp/flower/nfp_flower_ctrl.c -@@ -441,6 +441,11 @@ nfp_flower_cmsg_port_mod_rx(struct nfp_app_fw_flower *app_fw_flower, +- key = (const uint32_t *)(cipher->key.data); ++ key = (const rte_be32_t *)(cipher->key.data); + if (key_length > sizeof(cfg->cipher_key)) { + PMD_DRV_LOG(ERR, "Insufficient space for offloaded key"); return -EINVAL; } -+ if (repr == NULL) { -+ PMD_DRV_LOG(ERR, "Can not get 'repr' for port %#x", port); -+ return -EINVAL; -+ } -+ - repr->link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; - if ((msg->info & NFP_FLOWER_CMSG_PORT_MOD_INFO_LINK) != 0) - repr->link.link_status = RTE_ETH_LINK_UP; -diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c -index 0f0e63aae0..88fb6975af 100644 ---- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c -+++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.c -@@ -291,12 +291,156 @@ nfp_flower_repr_tx_burst(void *tx_queue, - return sent; + for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) +- cfg->cipher_key[i] = rte_cpu_to_be_32(*key++); ++ cfg->cipher_key[i] = rte_be_to_cpu_32(key[i]); + + return 0; } +@@ -800,7 +812,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev, + uint32_t i; + uint8_t key_length; + uint32_t device_id; +- const uint32_t *key; ++ const rte_be32_t *key; + struct nfp_net_hw *net_hw; -+static void -+nfp_flower_repr_free_queue(struct nfp_flower_representor *repr) -+{ -+ uint16_t i; -+ struct rte_eth_dev *eth_dev = repr->eth_dev; -+ -+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) -+ rte_free(eth_dev->data->tx_queues[i]); -+ -+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) -+ rte_free(eth_dev->data->rx_queues[i]); -+} -+ -+static void -+nfp_flower_pf_repr_close_queue(struct nfp_flower_representor *repr) -+{ -+ struct rte_eth_dev *eth_dev = repr->eth_dev; -+ -+ /* -+ * We assume that the DPDK application is stopping all the -+ * threads/queues before calling the device close function. -+ */ -+ nfp_net_disable_queues(eth_dev); -+ -+ /* Clear queues */ -+ nfp_net_close_tx_queue(eth_dev); -+ nfp_net_close_rx_queue(eth_dev); -+} -+ -+static void -+nfp_flower_repr_close_queue(struct nfp_flower_representor *repr) -+{ -+ switch (repr->repr_type) { -+ case NFP_REPR_TYPE_PHYS_PORT: -+ nfp_flower_repr_free_queue(repr); -+ break; -+ case NFP_REPR_TYPE_PF: -+ nfp_flower_pf_repr_close_queue(repr); -+ break; -+ case NFP_REPR_TYPE_VF: -+ nfp_flower_repr_free_queue(repr); -+ break; -+ default: -+ PMD_DRV_LOG(ERR, "Unsupported repr port type."); -+ break; -+ } -+} -+ -+static int -+nfp_flower_repr_uninit(struct rte_eth_dev *eth_dev) -+{ -+ uint16_t index; -+ struct nfp_flower_representor *repr; -+ -+ repr = eth_dev->data->dev_private; -+ rte_ring_free(repr->ring); -+ -+ if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { -+ index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); -+ repr->app_fw_flower->phy_reprs[index] = NULL; -+ } else { -+ index = repr->vf_id; -+ repr->app_fw_flower->vf_reprs[index] = NULL; -+ } -+ -+ return 0; -+} -+ -+static int -+nfp_flower_pf_repr_uninit(struct rte_eth_dev *eth_dev) -+{ -+ struct nfp_flower_representor *repr = eth_dev->data->dev_private; -+ -+ repr->app_fw_flower->pf_repr = NULL; -+ -+ return 0; -+} -+ -+static void -+nfp_flower_repr_free(struct nfp_flower_representor *repr, -+ enum nfp_repr_type repr_type) -+{ -+ switch (repr_type) { -+ case NFP_REPR_TYPE_PHYS_PORT: -+ nfp_flower_repr_uninit(repr->eth_dev); -+ break; -+ case NFP_REPR_TYPE_PF: -+ nfp_flower_pf_repr_uninit(repr->eth_dev); -+ break; -+ case NFP_REPR_TYPE_VF: -+ nfp_flower_repr_uninit(repr->eth_dev); -+ break; -+ default: -+ PMD_DRV_LOG(ERR, "Unsupported repr port type."); -+ break; -+ } -+} -+ -+/* Reset and stop device. The device can not be restarted. */ -+static int -+nfp_flower_repr_dev_close(struct rte_eth_dev *dev) -+{ -+ uint16_t i; -+ struct nfp_net_hw *hw; -+ struct nfp_pf_dev *pf_dev; -+ struct nfp_flower_representor *repr; -+ struct nfp_app_fw_flower *app_fw_flower; -+ -+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) -+ return 0; -+ -+ repr = dev->data->dev_private; -+ app_fw_flower = repr->app_fw_flower; -+ hw = app_fw_flower->pf_hw; -+ pf_dev = hw->pf_dev; -+ -+ if (pf_dev->app_fw_id != NFP_APP_FW_FLOWER_NIC) -+ return -EINVAL; -+ -+ nfp_flower_repr_close_queue(repr); -+ -+ nfp_flower_repr_free(repr, repr->repr_type); -+ -+ for (i = 0; i < MAX_FLOWER_VFS; i++) { -+ if (app_fw_flower->vf_reprs[i] != NULL) -+ return 0; -+ } -+ -+ for (i = 0; i < NFP_MAX_PHYPORTS; i++) { -+ if (app_fw_flower->phy_reprs[i] != NULL) -+ return 0; -+ } -+ -+ if (app_fw_flower->pf_repr != NULL) -+ return 0; -+ -+ /* Now it is safe to free all PF resources */ -+ nfp_uninit_app_fw_flower(pf_dev); -+ nfp_pf_uninit(pf_dev); -+ -+ return 0; -+} -+ - static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { - .dev_infos_get = nfp_flower_repr_dev_infos_get, + if (digest_length == 0) { +@@ -847,7 +859,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev, + return -EINVAL; + } - .dev_start = nfp_flower_pf_start, - .dev_configure = nfp_net_configure, - .dev_stop = nfp_net_stop, -+ .dev_close = nfp_flower_repr_dev_close, +- key = (const uint32_t *)(auth->key.data); ++ key = (const rte_be32_t *)(auth->key.data); + key_length = auth->key.length; + if (key_length > sizeof(cfg->auth_key)) { + PMD_DRV_LOG(ERR, "Insufficient space for offloaded auth key!"); +@@ -855,7 +867,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev, + } - .rx_queue_setup = nfp_net_rx_queue_setup, - .tx_queue_setup = nfp_net_tx_queue_setup, -@@ -319,6 +463,7 @@ static const struct eth_dev_ops nfp_flower_repr_dev_ops = { - .dev_start = nfp_flower_repr_dev_start, - .dev_configure = nfp_net_configure, - .dev_stop = nfp_flower_repr_dev_stop, -+ .dev_close = nfp_flower_repr_dev_close, + for (i = 0; i < key_length / sizeof(cfg->auth_key[0]); i++) +- cfg->auth_key[i] = rte_cpu_to_be_32(*key++); ++ cfg->auth_key[i] = rte_be_to_cpu_32(key[i]); + + return 0; + } +@@ -895,7 +907,7 @@ nfp_crypto_msg_build(struct rte_eth_dev *eth_dev, + return ret; + } + +- cfg->aesgcm_fields.salt = rte_cpu_to_be_32(conf->ipsec.salt); ++ cfg->aesgcm_fields.salt = conf->ipsec.salt; + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + /* Only support Auth + Cipher for inbound */ +@@ -960,7 +972,10 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, + struct rte_security_session_conf *conf, + struct nfp_ipsec_msg *msg) + { ++ int i; + int ret; ++ rte_be32_t *src_ip; ++ rte_be32_t *dst_ip; + struct ipsec_add_sa *cfg; + enum rte_security_ipsec_tunnel_type type; + +@@ -1018,12 +1033,18 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, + type = conf->ipsec.tunnel.type; + cfg->ctrl_word.mode = NFP_IPSEC_MODE_TUNNEL; + if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { +- cfg->src_ip.v4 = conf->ipsec.tunnel.ipv4.src_ip; +- cfg->dst_ip.v4 = conf->ipsec.tunnel.ipv4.dst_ip; ++ src_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv4.src_ip.s_addr; ++ dst_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv4.dst_ip.s_addr; ++ cfg->src_ip[0] = rte_be_to_cpu_32(src_ip[0]); ++ cfg->dst_ip[0] = rte_be_to_cpu_32(dst_ip[0]); + cfg->ipv6 = 0; + } else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) { +- cfg->src_ip.v6 = conf->ipsec.tunnel.ipv6.src_addr; +- cfg->dst_ip.v6 = conf->ipsec.tunnel.ipv6.dst_addr; ++ src_ip = (rte_be32_t *)conf->ipsec.tunnel.ipv6.src_addr.s6_addr; ++ dst_ip = (rte_be32_t *)conf->ipsec.tunnel.ipv6.dst_addr.s6_addr; ++ for (i = 0; i < 4; i++) { ++ cfg->src_ip[i] = rte_be_to_cpu_32(src_ip[i]); ++ cfg->dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]); ++ } + cfg->ipv6 = 1; + } else { + PMD_DRV_LOG(ERR, "Unsupported address family!"); +@@ -1032,18 +1053,9 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, - .rx_queue_setup = nfp_flower_repr_rx_queue_setup, - .tx_queue_setup = nfp_flower_repr_tx_queue_setup, -@@ -410,6 +555,7 @@ nfp_flower_pf_repr_init(struct rte_eth_dev *eth_dev, + break; + case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT: +- type = conf->ipsec.tunnel.type; + cfg->ctrl_word.mode = NFP_IPSEC_MODE_TRANSPORT; +- if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { +- memset(&cfg->src_ip, 0, sizeof(cfg->src_ip)); +- cfg->ipv6 = 0; +- } else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) { +- memset(&cfg->src_ip, 0, sizeof(cfg->src_ip)); +- cfg->ipv6 = 1; +- } else { +- PMD_DRV_LOG(ERR, "Unsupported address family!"); +- return -EINVAL; +- } ++ memset(&cfg->src_ip, 0, sizeof(cfg->src_ip)); ++ memset(&cfg->dst_ip, 0, sizeof(cfg->dst_ip)); - repr->app_fw_flower->pf_repr = repr; - repr->app_fw_flower->pf_hw->eth_dev = eth_dev; -+ repr->eth_dev = eth_dev; + break; + default: +@@ -1172,18 +1184,18 @@ nfp_security_set_pkt_metadata(void *device, + desc_md = RTE_MBUF_DYNFIELD(m, offset, struct nfp_tx_ipsec_desc_msg *); - return 0; - } -@@ -501,6 +647,8 @@ nfp_flower_repr_init(struct rte_eth_dev *eth_dev, - app_fw_flower->vf_reprs[index] = repr; + if (priv_session->msg.ctrl_word.ext_seq != 0 && sqn != NULL) { +- desc_md->esn.low = rte_cpu_to_be_32(*sqn); +- desc_md->esn.hi = rte_cpu_to_be_32(*sqn >> 32); ++ desc_md->esn.low = (uint32_t)*sqn; ++ desc_md->esn.hi = (uint32_t)(*sqn >> 32); + } else if (priv_session->msg.ctrl_word.ext_seq != 0) { +- desc_md->esn.low = rte_cpu_to_be_32(priv_session->ipsec.esn.low); +- desc_md->esn.hi = rte_cpu_to_be_32(priv_session->ipsec.esn.hi); ++ desc_md->esn.low = priv_session->ipsec.esn.low; ++ desc_md->esn.hi = priv_session->ipsec.esn.hi; + } else { +- desc_md->esn.low = rte_cpu_to_be_32(priv_session->ipsec.esn.value); ++ desc_md->esn.low = priv_session->ipsec.esn.low; + desc_md->esn.hi = 0; + } + + desc_md->enc = 1; +- desc_md->sa_idx = rte_cpu_to_be_32(priv_session->sa_index); ++ desc_md->sa_idx = priv_session->sa_index; } -+ repr->eth_dev = eth_dev; -+ return 0; +diff --git a/dpdk/drivers/net/nfp/nfp_ipsec.h b/dpdk/drivers/net/nfp/nfp_ipsec.h +index d7a729398a..f7c4f3f225 100644 +--- a/dpdk/drivers/net/nfp/nfp_ipsec.h ++++ b/dpdk/drivers/net/nfp/nfp_ipsec.h +@@ -36,11 +36,6 @@ struct sa_ctrl_word { + uint32_t spare2 :1; /**< Must be set to 0 */ + }; - mac_cleanup: -@@ -511,6 +659,35 @@ ring_cleanup: - return ret; +-union nfp_ip_addr { +- struct in6_addr v6; +- struct in_addr v4; +-}; +- + struct ipsec_add_sa { + uint32_t cipher_key[8]; /**< Cipher Key */ + union { +@@ -60,8 +55,8 @@ struct ipsec_add_sa { + uint8_t spare1; + uint32_t soft_byte_cnt; /**< Soft lifetime byte count */ + uint32_t hard_byte_cnt; /**< Hard lifetime byte count */ +- union nfp_ip_addr src_ip; /**< Src IP addr */ +- union nfp_ip_addr dst_ip; /**< Dst IP addr */ ++ uint32_t src_ip[4]; /**< Src IP addr */ ++ uint32_t dst_ip[4]; /**< Dst IP addr */ + uint16_t natt_dst_port; /**< NAT-T UDP Header dst port */ + uint16_t natt_src_port; /**< NAT-T UDP Header src port */ + uint32_t soft_lifetime_limit; /**< Soft lifetime time limit */ +diff --git a/dpdk/drivers/net/nfp/nfp_net_common.c b/dpdk/drivers/net/nfp/nfp_net_common.c +index e969b840d6..bf44373b26 100644 +--- a/dpdk/drivers/net/nfp/nfp_net_common.c ++++ b/dpdk/drivers/net/nfp/nfp_net_common.c +@@ -153,10 +153,10 @@ static const uint32_t nfp_net_link_speed_nfp2rte[] = { + [NFP_NET_CFG_STS_LINK_RATE_100G] = RTE_ETH_SPEED_NUM_100G, + }; + +-static uint16_t +-nfp_net_link_speed_rte2nfp(uint16_t speed) ++static size_t ++nfp_net_link_speed_rte2nfp(uint32_t speed) + { +- uint16_t i; ++ size_t i; + + for (i = 0; i < RTE_DIM(nfp_net_link_speed_nfp2rte); i++) { + if (speed == nfp_net_link_speed_nfp2rte[i]) +@@ -166,7 +166,7 @@ nfp_net_link_speed_rte2nfp(uint16_t speed) + return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN; } -+static void -+nfp_flower_repr_free_all(struct nfp_app_fw_flower *app_fw_flower) -+{ -+ uint32_t i; -+ struct nfp_flower_representor *repr; -+ -+ for (i = 0; i < MAX_FLOWER_VFS; i++) { -+ repr = app_fw_flower->vf_reprs[i]; -+ if (repr != NULL) { -+ nfp_flower_repr_free(repr, NFP_REPR_TYPE_VF); -+ app_fw_flower->vf_reprs[i] = NULL; -+ } -+ } -+ -+ for (i = 0; i < NFP_MAX_PHYPORTS; i++) { -+ repr = app_fw_flower->phy_reprs[i]; -+ if (repr != NULL) { -+ nfp_flower_repr_free(repr, NFP_REPR_TYPE_PHYS_PORT); -+ app_fw_flower->phy_reprs[i] = NULL; -+ } -+ } -+ -+ repr = app_fw_flower->pf_repr; -+ if (repr != NULL) { -+ nfp_flower_repr_free(repr, NFP_REPR_TYPE_PF); -+ app_fw_flower->pf_repr = NULL; -+ } -+} -+ - static int - nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) +-static void ++void + nfp_net_notify_port_speed(struct nfp_net_hw *hw, + struct rte_eth_link *link) { -@@ -563,7 +740,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) - eth_port = &nfp_eth_table->ports[i]; - flower_repr.repr_type = NFP_REPR_TYPE_PHYS_PORT; - flower_repr.port_id = nfp_flower_get_phys_port_id(eth_port->index); -- flower_repr.nfp_idx = eth_port->eth_index; -+ flower_repr.nfp_idx = eth_port->index; - flower_repr.vf_id = i + 1; +@@ -189,9 +189,6 @@ nfp_net_notify_port_speed(struct nfp_net_hw *hw, + nfp_net_link_speed_rte2nfp(link->link_speed)); + } - /* Copy the real mac of the interface to the representor struct */ -@@ -585,7 +762,7 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) - } +-/* The length of firmware version string */ +-#define FW_VER_LEN 32 +- + /** + * Reconfigure the firmware via the mailbox + * +@@ -1299,6 +1296,7 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) + RTE_PTYPE_INNER_L4_NONFRAG, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_SCTP, ++ RTE_PTYPE_UNKNOWN + }; - if (i < app_fw_flower->num_phyport_reprs) -- return ret; -+ goto repr_free; + if (dev->rx_pkt_burst != nfp_net_recv_pkts) +@@ -2062,17 +2060,22 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev, + size_t fw_size) + { + struct nfp_net_hw *hw; +- char mip_name[FW_VER_LEN]; +- char app_name[FW_VER_LEN]; +- char nsp_version[FW_VER_LEN]; +- char vnic_version[FW_VER_LEN]; ++ char app_name[FW_VER_LEN] = {0}; ++ char mip_name[FW_VER_LEN] = {0}; ++ char nsp_version[FW_VER_LEN] = {0}; ++ char vnic_version[FW_VER_LEN] = {0}; - /* - * Now allocate eth_dev's for VF representors. -@@ -614,9 +791,14 @@ nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) - } + if (fw_size < FW_VER_LEN) + return FW_VER_LEN; - if (i < app_fw_flower->num_vf_reprs) -- return ret; -+ goto repr_free; + hw = nfp_net_get_hw(dev); - return 0; -+ -+repr_free: -+ nfp_flower_repr_free_all(app_fw_flower); +- if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) { ++ if (hw->fw_version[0] != 0) { ++ snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version); ++ return 0; ++ } + -+ return ret; - } ++ if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) == 0) { + snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d", + hw->ver.extend, hw->ver.class, + hw->ver.major, hw->ver.minor); +@@ -2084,8 +2087,16 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev, + nfp_net_get_mip_name(hw, mip_name); + nfp_net_get_app_name(hw, app_name); - int -@@ -634,10 +816,9 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower) - pci_dev = pf_dev->pci_dev; +- snprintf(fw_version, FW_VER_LEN, "%s %s %s %s", ++ if (nsp_version[0] == 0 || mip_name[0] == 0) { ++ snprintf(fw_version, FW_VER_LEN, "%s %s %s %s", + vnic_version, nsp_version, mip_name, app_name); ++ return 0; ++ } ++ ++ snprintf(hw->fw_version, FW_VER_LEN, "%s %s %s %s", ++ vnic_version, nsp_version, mip_name, app_name); ++ ++ snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version); - /* Allocate a switch domain for the flower app */ -- if (app_fw_flower->switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID && -- rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id)) { -+ ret = rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id); -+ if (ret != 0) - PMD_INIT_LOG(WARNING, "failed to allocate switch domain for device"); -- } + return 0; + } +@@ -2207,7 +2218,7 @@ nfp_net_pause_frame_set(struct nfp_net_hw *net_hw, + } - /* Now parse PCI device args passed for representor info */ - if (pci_dev->device.devargs != NULL) { -@@ -677,8 +858,15 @@ nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower) - ret = nfp_flower_repr_alloc(app_fw_flower); - if (ret != 0) { - PMD_INIT_LOG(ERR, "representors allocation failed"); -- return -EINVAL; -+ ret = -EINVAL; -+ goto domain_free; + err = nfp_eth_config_commit_end(nsp); +- if (err != 0) { ++ if (err < 0) { + PMD_DRV_LOG(ERR, "Failed to configure pause frame."); + return err; } +@@ -2249,3 +2260,13 @@ nfp_net_flow_ctrl_set(struct rte_eth_dev *dev, return 0; -+ -+domain_free: -+ if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) -+ PMD_INIT_LOG(WARNING, "failed to free switch domain for device"); -+ -+ return ret; } -diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h -index bcb4c3cdb5..8053617562 100644 ---- a/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h -+++ b/dpdk/drivers/net/nfp/flower/nfp_flower_representor.h -@@ -20,6 +20,7 @@ struct nfp_flower_representor { - struct rte_ring *ring; - struct rte_eth_link link; - struct rte_eth_stats repr_stats; -+ struct rte_eth_dev *eth_dev; - }; - - int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower); -diff --git a/dpdk/drivers/net/nfp/meson.build b/dpdk/drivers/net/nfp/meson.build -index cf9c16266d..7bf94710f1 100644 ---- a/dpdk/drivers/net/nfp/meson.build -+++ b/dpdk/drivers/net/nfp/meson.build -@@ -4,6 +4,7 @@ - if not is_linux or not dpdk_conf.get('RTE_ARCH_64') - build = false - reason = 'only supported on 64-bit Linux' -+ subdir_done() - endif ++ ++uint32_t ++nfp_net_get_port_num(struct nfp_pf_dev *pf_dev, ++ struct nfp_eth_table *nfp_eth_table) ++{ ++ if (pf_dev->multi_pf.enabled) ++ return 1; ++ else ++ return nfp_eth_table->count; ++} +diff --git a/dpdk/drivers/net/nfp/nfp_net_common.h b/dpdk/drivers/net/nfp/nfp_net_common.h +index 30fea7ae02..72286ab5c9 100644 +--- a/dpdk/drivers/net/nfp/nfp_net_common.h ++++ b/dpdk/drivers/net/nfp/nfp_net_common.h +@@ -38,6 +38,9 @@ - sources = files( -diff --git a/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c b/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c -index ff9b10f046..b9da74bc99 100644 ---- a/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c -+++ b/dpdk/drivers/net/nfp/nfd3/nfp_nfd3_dp.c -@@ -137,7 +137,7 @@ nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq, - } - } + #define NFP_BEAT_LENGTH 8 --static inline void -+static inline int - nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, - struct nfp_net_txq *txq, - struct rte_mbuf *pkt) -@@ -174,7 +174,7 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, - } ++/* The length of firmware version string */ ++#define FW_VER_LEN 32 ++ + /* + * Each PF has corresponding word to beat: + * Offset | Usage +@@ -98,6 +101,9 @@ struct nfp_pf_dev { - if (meta_data->length == 0) -- return; -+ return 0; + uint8_t *qc_bar; - meta_info = meta_data->header; - meta_data->header = rte_cpu_to_be_32(meta_data->header); -@@ -188,15 +188,16 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, - case NFP_NET_META_VLAN: - if (vlan_layer > 0) { - PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported"); -- return; -+ return -EINVAL; - } ++ struct nfp_cpp_area *mac_stats_area; ++ uint8_t *mac_stats_bar; + - nfp_net_set_meta_vlan(meta_data, pkt, layer); - vlan_layer++; - break; - case NFP_NET_META_IPSEC: - if (ipsec_layer > 2) { - PMD_DRV_LOG(ERR, "At most 3 layers of ipsec is supported for now."); -- return; -+ return -EINVAL; - } + struct nfp_hwinfo *hwinfo; + struct nfp_rtsym_table *sym_tbl; - nfp_net_set_meta_ipsec(meta_data, txq, pkt, layer, ipsec_layer); -@@ -204,11 +205,13 @@ nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data, - break; - default: - PMD_DRV_LOG(ERR, "The metadata type not supported"); -- return; -+ return -ENOTSUP; - } +@@ -165,8 +171,6 @@ struct nfp_net_hw { - memcpy(meta, &meta_data->data[layer], sizeof(meta_data->data[layer])); - } + struct nfp_cpp *cpp; + struct nfp_cpp_area *ctrl_area; +- struct nfp_cpp_area *mac_stats_area; +- uint8_t *mac_stats_bar; + uint8_t *mac_stats; + + /** Sequential physical port number, only valid for CoreNIC firmware */ +@@ -177,6 +181,9 @@ struct nfp_net_hw { + struct nfp_net_tlv_caps tlv_caps; + + struct nfp_net_ipsec_data *ipsec_data; + -+ return 0; - } ++ /** Used for firmware version */ ++ char fw_version[FW_VER_LEN]; + }; - uint16_t -@@ -225,6 +228,7 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, - uint16_t nb_pkts, - bool repr_flag) - { -+ int ret; - uint16_t i; - uint8_t offset; - uint32_t pkt_size; -@@ -271,7 +275,10 @@ nfp_net_nfd3_xmit_pkts_common(void *tx_queue, - if (!repr_flag) { - struct nfp_net_meta_raw meta_data; - memset(&meta_data, 0, sizeof(meta_data)); -- nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt); -+ ret = nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt); -+ if (unlikely(ret != 0)) -+ goto xmit_end; + static inline uint32_t +@@ -272,6 +279,11 @@ int nfp_net_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); + int nfp_net_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); ++void nfp_pf_uninit(struct nfp_pf_dev *pf_dev); ++uint32_t nfp_net_get_port_num(struct nfp_pf_dev *pf_dev, ++ struct nfp_eth_table *nfp_eth_table); ++void nfp_net_notify_port_speed(struct nfp_net_hw *hw, ++ struct rte_eth_link *link); + + #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\ + ((struct nfp_app_fw_nic *)app_fw_priv) +diff --git a/dpdk/drivers/net/nfp/nfp_rxtx.c b/dpdk/drivers/net/nfp/nfp_rxtx.c +index f21e120a43..8ca651ba55 100644 +--- a/dpdk/drivers/net/nfp/nfp_rxtx.c ++++ b/dpdk/drivers/net/nfp/nfp_rxtx.c +@@ -747,15 +747,6 @@ nfp_net_recv_pkts(void *rx_queue, + /* Checking the checksum flag */ + nfp_net_rx_cksum(rxq, rxds, mb); + +- if (meta.port_id == 0) { +- rx_pkts[avail++] = mb; +- } else if (nfp_flower_pf_dispatch_pkts(hw, mb, meta.port_id)) { +- avail_multiplexed++; +- } else { +- rte_pktmbuf_free(mb); +- break; +- } +- + /* Now resetting and updating the descriptor */ + rxds->vals[0] = 0; + rxds->vals[1] = 0; +@@ -768,6 +759,15 @@ nfp_net_recv_pkts(void *rx_queue, + rxq->rd_p++; + if (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */ + rxq->rd_p = 0; + - offset = meta_data.length; - } else { - offset = FLOWER_PKT_DATA_OFFSET; -diff --git a/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c b/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c -index 0141fbcc8f..772c847b9d 100644 ---- a/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c -+++ b/dpdk/drivers/net/nfp/nfdk/nfp_nfdk_dp.c -@@ -167,7 +167,7 @@ close_block: - return nop_slots; ++ if (meta.port_id == 0) { ++ rx_pkts[avail++] = mb; ++ } else if (nfp_flower_pf_dispatch_pkts(hw, mb, meta.port_id)) { ++ avail_multiplexed++; ++ } else { ++ rte_pktmbuf_free(mb); ++ break; ++ } + } + + if (nb_hold == 0) +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c b/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c +index a6fd89b6c8..ef1ffd6d01 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c +@@ -263,19 +263,6 @@ nfp_bitsize_calc(uint64_t mask) + return bit_size; } --static void -+static int - nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, - struct nfp_net_txq *txq, - uint64_t *metadata) -@@ -178,7 +178,6 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, - uint32_t cap_extend; - struct nfp_net_hw *hw; - uint32_t header_offset; -- uint8_t vlan_layer = 0; - uint8_t ipsec_layer = 0; - struct nfp_net_meta_raw meta_data; +-static int +-nfp_cmp_bars(const void *ptr_a, +- const void *ptr_b) +-{ +- const struct nfp_bar *a = ptr_a; +- const struct nfp_bar *b = ptr_b; +- +- if (a->bitsize == b->bitsize) +- return a->index - b->index; +- else +- return a->bitsize - b->bitsize; +-} +- + static bool + nfp_bars_for_secondary(uint32_t index) + { +@@ -383,9 +370,6 @@ nfp_enable_bars(struct nfp_pcie_user *nfp) + if (nfp_bar_write(nfp, bar, barcfg_msix_general) < 0) + return -EIO; -@@ -206,8 +205,10 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, - meta_data.length += 3 * NFP_NET_META_FIELD_SIZE; - } +- /* Sort bars by bit size - use the smallest possible first. */ +- qsort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]), nfp_cmp_bars); +- + return 0; + } -- if (meta_data.length == 0) -- return; -+ if (meta_data.length == 0) { -+ *metadata = 0; -+ return 0; -+ } +@@ -466,16 +450,18 @@ find_matching_bar(struct nfp_pcie_user *nfp, + int width) + { + uint32_t n; ++ uint32_t index; - meta_type = meta_data.header; - header_offset = meta_type << NFP_NET_META_NFDK_LENGTH; -@@ -221,17 +222,13 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, - meta += NFP_NET_META_FIELD_SIZE) { - switch (meta_type & NFP_NET_META_FIELD_MASK) { - case NFP_NET_META_VLAN: -- if (vlan_layer > 0) { -- PMD_DRV_LOG(ERR, "At most 1 layers of vlan is supported"); -- return; -- } -+ - nfp_net_set_meta_vlan(&meta_data, pkt, layer); -- vlan_layer++; - break; - case NFP_NET_META_IPSEC: - if (ipsec_layer > 2) { - PMD_DRV_LOG(ERR, "At most 3 layers of ipsec is supported for now."); -- return; -+ return -EINVAL; - } +- for (n = 0; n < nfp->bars; n++) { +- struct nfp_bar *bar = &nfp->bar[n]; ++ for (n = RTE_DIM(nfp->bar) ; n > 0; n--) { ++ index = n - 1; ++ struct nfp_bar *bar = &nfp->bar[index]; - nfp_net_set_meta_ipsec(&meta_data, txq, pkt, layer, ipsec_layer); -@@ -239,13 +236,15 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt, - break; - default: - PMD_DRV_LOG(ERR, "The metadata type not supported"); -- return; -+ return -ENOTSUP; - } + if (bar->lock) + continue; - memcpy(meta, &meta_data.data[layer], sizeof(meta_data.data[layer])); + if (matching_bar_exist(bar, target, action, token, + offset, size, width)) +- return n; ++ return index; } - *metadata = NFDK_DESC_TX_CHAIN_META; -+ -+ return 0; - } - - uint16_t -@@ -292,6 +291,7 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, + return -1; +@@ -493,10 +479,12 @@ find_unused_bar_noblock(struct nfp_pcie_user *nfp, + { + int ret; + uint32_t n; ++ uint32_t index; + const struct nfp_bar *bar; - /* Sending packets */ - while (npkts < nb_pkts && free_descs > 0) { -+ int ret; - int nop_descs; - uint32_t type; - uint32_t dma_len; -@@ -319,10 +319,13 @@ nfp_net_nfdk_xmit_pkts_common(void *tx_queue, +- for (n = 0; n < nfp->bars; n++) { +- bar = &nfp->bar[n]; ++ for (n = RTE_DIM(nfp->bar); n > 0; n--) { ++ index = n - 1; ++ bar = &nfp->bar[index]; - temp_pkt = pkt; + if (bar->bitsize == 0) + continue; +@@ -508,7 +496,7 @@ find_unused_bar_noblock(struct nfp_pcie_user *nfp, + continue; -- if (repr_flag) -+ if (repr_flag) { - metadata = NFDK_DESC_TX_CHAIN_META; -- else -- nfp_net_nfdk_set_meta_data(pkt, txq, &metadata); -+ } else { -+ ret = nfp_net_nfdk_set_meta_data(pkt, txq, &metadata); -+ if (unlikely(ret != 0)) -+ goto xmit_end; -+ } + if (!bar->lock) +- return n; ++ return index; + } - if (unlikely(pkt->nb_segs > 1 && - (hw->super.cap & NFP_NET_CFG_CTRL_GATHER) == 0)) { -diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c -index f02caf8056..7495b01f16 100644 ---- a/dpdk/drivers/net/nfp/nfp_ethdev.c -+++ b/dpdk/drivers/net/nfp/nfp_ethdev.c -@@ -310,6 +310,66 @@ nfp_net_keepalive_stop(struct nfp_multi_pf *multi_pf) - rte_eal_alarm_cancel(nfp_net_beat_timer, (void *)multi_pf); - } + return -EAGAIN; +@@ -561,7 +549,7 @@ nfp_disable_bars(struct nfp_pcie_user *nfp) + uint32_t i; + struct nfp_bar *bar; -+static void -+nfp_net_uninit(struct rte_eth_dev *eth_dev) -+{ -+ struct nfp_net_hw *net_hw; -+ -+ net_hw = eth_dev->data->dev_private; -+ rte_free(net_hw->eth_xstats_base); -+ nfp_ipsec_uninit(eth_dev); -+} -+ -+static void -+nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev, -+ uint8_t id) -+{ -+ struct rte_eth_dev *eth_dev; -+ struct nfp_app_fw_nic *app_fw_nic; -+ -+ app_fw_nic = pf_dev->app_fw_priv; -+ if (app_fw_nic->ports[id] != NULL) { -+ eth_dev = app_fw_nic->ports[id]->eth_dev; -+ if (eth_dev != NULL) -+ nfp_net_uninit(eth_dev); -+ -+ app_fw_nic->ports[id] = NULL; -+ } -+} -+ -+static void -+nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev) -+{ -+ nfp_cpp_area_release_free(pf_dev->ctrl_area); -+ rte_free(pf_dev->app_fw_priv); -+} -+ -+void -+nfp_pf_uninit(struct nfp_pf_dev *pf_dev) -+{ -+ nfp_cpp_area_release_free(pf_dev->mac_stats_area); -+ nfp_cpp_area_release_free(pf_dev->qc_area); -+ free(pf_dev->sym_tbl); -+ if (pf_dev->multi_pf.enabled) { -+ nfp_net_keepalive_stop(&pf_dev->multi_pf); -+ nfp_net_keepalive_uninit(&pf_dev->multi_pf); -+ } -+ free(pf_dev->nfp_eth_table); -+ free(pf_dev->hwinfo); -+ nfp_cpp_free(pf_dev->cpp); -+ rte_free(pf_dev); -+} -+ -+static int -+nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev) -+{ -+ free(pf_dev->sym_tbl); -+ nfp_cpp_free(pf_dev->cpp); -+ rte_free(pf_dev); -+ -+ return 0; -+} -+ - /* Reset and stop device. The device can not be restarted. */ - static int - nfp_net_close(struct rte_eth_dev *dev) -@@ -321,8 +381,19 @@ nfp_net_close(struct rte_eth_dev *dev) - struct rte_pci_device *pci_dev; - struct nfp_app_fw_nic *app_fw_nic; +- for (i = 0; i < nfp->bars; i++) { ++ for (i = 0; i < RTE_DIM(nfp->bar); i++) { + bar = &nfp->bar[i]; + if (bar->iomem != NULL) { + bar->iomem = NULL; +diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c +index 3c10c7a090..edb78dfdc9 100644 +--- a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c ++++ b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c +@@ -168,7 +168,7 @@ nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, + if (tmp != key) + return NULL; -- if (rte_eal_process_type() != RTE_PROC_PRIMARY) -+ /* -+ * In secondary process, a released eth device can be found by its name -+ * in shared memory. -+ * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the -+ * eth device has been released. -+ */ -+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) { -+ if (dev->state == RTE_ETH_DEV_UNUSED) -+ return 0; -+ -+ nfp_pf_secondary_uninit(dev->process_private); - return 0; -+ } +- mutex = calloc(sizeof(*mutex), 1); ++ mutex = calloc(1, sizeof(*mutex)); + if (mutex == NULL) + return NULL; - hw = dev->data->dev_private; - pf_dev = hw->pf_dev; -@@ -339,16 +410,17 @@ nfp_net_close(struct rte_eth_dev *dev) - nfp_net_close_tx_queue(dev); - nfp_net_close_rx_queue(dev); +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_devids.h b/dpdk/drivers/net/ngbe/base/ngbe_devids.h +index 83eedf423e..e1efa62015 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_devids.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_devids.h +@@ -83,6 +83,7 @@ + #define NGBE_YT8521S_SFP_GPIO 0x0062 + #define NGBE_INTERNAL_YT8521S_SFP_GPIO 0x0064 + #define NGBE_LY_YT8521S_SFP 0x0070 ++#define NGBE_RGMII_FPGA 0x0080 + #define NGBE_WOL_SUP 0x4000 + #define NGBE_NCSI_SUP 0x8000 -- /* Clear ipsec */ -- nfp_ipsec_uninit(dev); -- - /* Cancel possible impending LSC work here before releasing the port */ - rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_hw.c b/dpdk/drivers/net/ngbe/base/ngbe_hw.c +index 22ccdb0b7d..68b0a8b8ab 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_hw.c ++++ b/dpdk/drivers/net/ngbe/base/ngbe_hw.c +@@ -173,6 +173,9 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw) + ngbe_reset_misc_em(hw); + hw->mac.clear_hw_cntrs(hw); - /* Only free PF resources after all physical ports have been closed */ - /* Mark this port as unused and free device priv resources */ - nn_cfg_writeb(&hw->super, NFP_NET_CFG_LSC, 0xff); -- app_fw_nic->ports[hw->idx] = NULL; -+ -+ if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC) -+ return -EINVAL; ++ if (!((hw->sub_device_id & NGBE_OEM_MASK) == NGBE_RGMII_FPGA)) ++ hw->phy.set_phy_power(hw, false); + -+ nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx); + msec_delay(50); - for (i = 0; i < app_fw_nic->total_phyports; i++) { - id = nfp_function_id_get(pf_dev, i); -@@ -358,26 +430,16 @@ nfp_net_close(struct rte_eth_dev *dev) - return 0; - } + /* Store the permanent mac address */ +@@ -1064,7 +1067,7 @@ s32 ngbe_set_pcie_master(struct ngbe_hw *hw, bool enable) + u32 i; -- /* Now it is safe to free all PF resources */ -- PMD_INIT_LOG(INFO, "Freeing PF resources"); -- if (pf_dev->multi_pf.enabled) { -- nfp_net_keepalive_stop(&pf_dev->multi_pf); -- nfp_net_keepalive_uninit(&pf_dev->multi_pf); -- } -- nfp_cpp_area_free(pf_dev->ctrl_area); -- nfp_cpp_area_free(pf_dev->qc_area); -- free(pf_dev->hwinfo); -- free(pf_dev->sym_tbl); -- nfp_cpp_free(pf_dev->cpp); -- rte_free(app_fw_nic); -- rte_free(pf_dev); -- -+ /* Enable in nfp_net_start() */ - rte_intr_disable(pci_dev->intr_handle); + if (rte_pci_set_bus_master(pci_dev, enable) < 0) { +- DEBUGOUT("Cannot configure PCI bus master\n"); ++ DEBUGOUT("Cannot configure PCI bus master"); + return -1; + } -- /* Unregister callback func from eal lib */ -+ /* Register in nfp_net_init() */ - rte_intr_callback_unregister(pci_dev->intr_handle, - nfp_net_dev_interrupt_handler, (void *)dev); +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c +index ea313cd9a5..a374b015fd 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c ++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c +@@ -320,6 +320,10 @@ skip_an_fiber: + value |= value_r4; + ngbe_write_phy_reg_mdi(hw, YT_ANA, 0, value); -+ nfp_uninit_app_fw_nic(pf_dev); -+ nfp_pf_uninit(pf_dev); ++ /* config for yt8531sh-ca */ ++ ngbe_write_phy_reg_ext_yt(hw, YT_SPEC_CONF, 0, ++ YT_SPEC_CONF_8531SH_CA); + - return 0; - } - -@@ -576,28 +638,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev) + /* software reset to make the above configuration + * take effect + */ +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h +index ddf992e79a..c45bec7ce7 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h +@@ -32,6 +32,8 @@ + #define YT_MISC 0xA006 + #define YT_MISC_FIBER_PRIO MS16(8, 0x1) /* 0 for UTP */ + #define YT_MISC_RESV MS16(0, 0x1) ++#define YT_SPEC_CONF 0xA023 ++#define YT_SPEC_CONF_8531SH_CA 0x4031 - rte_eth_copy_pci_info(eth_dev, pci_dev); + /* SDS EXT */ + #define YT_AUTO 0xA5 +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_regs.h b/dpdk/drivers/net/ngbe/base/ngbe_regs.h +index c0e79a2ba7..0d820f4079 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_regs.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_regs.h +@@ -712,6 +712,8 @@ enum ngbe_5tuple_protocol { + #define NGBE_MACRXFLT_CTL_PASS LS(3, 6, 0x3) + #define NGBE_MACRXFLT_RXALL MS(31, 0x1) -- if (port == 0 || pf_dev->multi_pf.enabled) { -- uint32_t min_size; -- -+ if (pf_dev->multi_pf.enabled) - hw->ctrl_bar = pf_dev->ctrl_bar; -- min_size = NFP_MAC_STATS_SIZE * net_hw->pf_dev->nfp_eth_table->max_index; -- net_hw->mac_stats_bar = nfp_rtsym_map(net_hw->pf_dev->sym_tbl, "_mac_stats", -- min_size, &net_hw->mac_stats_area); -- if (net_hw->mac_stats_bar == NULL) { -- PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats_bar"); -- return -EIO; -- } ++#define NGBE_MAC_WDG_TIMEOUT 0x01100C ++ + /****************************************************************************** + * Statistic Registers + ******************************************************************************/ +diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c +index 478da014b2..aca6c2aaa1 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c ++++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c +@@ -263,6 +263,8 @@ ngbe_pf_reset_hw(struct ngbe_hw *hw) + status = hw->mac.reset_hw(hw); + + ctrl_ext = rd32(hw, NGBE_PORTCTL); ++ /* let hardware know driver is loaded */ ++ ctrl_ext |= NGBE_PORTCTL_DRVLOAD; + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= NGBE_PORTCTL_RSTDONE; + wr32(hw, NGBE_PORTCTL, ctrl_ext); +@@ -381,7 +383,7 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + ssid = ngbe_flash_read_dword(hw, 0xFFFDC); + if (ssid == 0x1) { + PMD_INIT_LOG(ERR, +- "Read of internal subsystem device id failed\n"); ++ "Read of internal subsystem device id failed"); + return -ENODEV; + } + hw->sub_system_id = (u16)ssid >> 8 | (u16)ssid << 8; +@@ -546,7 +548,7 @@ static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) + if (ethdev == NULL) + return 0; + +- return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit); ++ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit); + } + + static struct rte_pci_driver rte_ngbe_pmd = { +@@ -582,41 +584,25 @@ ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + } + + static void +-ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ++ngbe_vlan_strip_q_set(struct rte_eth_dev *dev, uint16_t queue, int on) + { +- struct ngbe_hw *hw = ngbe_dev_hw(dev); +- struct ngbe_rx_queue *rxq; +- bool restart; +- uint32_t rxcfg, rxbal, rxbah; - -- net_hw->mac_stats = net_hw->mac_stats_bar; + if (on) + ngbe_vlan_hw_strip_enable(dev, queue); + else + ngbe_vlan_hw_strip_disable(dev, queue); ++} + +- rxq = dev->data->rx_queues[queue]; +- rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx)); +- rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx)); +- rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); +- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { +- restart = (rxcfg & NGBE_RXCFG_ENA) && +- !(rxcfg & NGBE_RXCFG_VLAN); +- rxcfg |= NGBE_RXCFG_VLAN; - } else { -- if (pf_dev->ctrl_bar == NULL) -- return -ENODEV; -- -- /* Use port offset in pf ctrl_bar for this ports control bar */ -+ else - hw->ctrl_bar = pf_dev->ctrl_bar + (port * NFP_NET_CFG_BAR_SZ); -- net_hw->mac_stats = app_fw_nic->ports[0]->mac_stats_bar + -+ -+ net_hw->mac_stats = pf_dev->mac_stats_bar + - (net_hw->nfp_idx * NFP_MAC_STATS_SIZE); +- restart = (rxcfg & NGBE_RXCFG_ENA) && +- (rxcfg & NGBE_RXCFG_VLAN); +- rxcfg &= ~NGBE_RXCFG_VLAN; - } +- rxcfg &= ~NGBE_RXCFG_ENA; ++static void ++ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ++{ ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); - PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar); - PMD_INIT_LOG(DEBUG, "MAC stats: %p", net_hw->mac_stats); -@@ -625,7 +672,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) - if (net_hw->eth_xstats_base == NULL) { - PMD_INIT_LOG(ERR, "no memory for xstats base values on device %s!", - pci_dev->device.name); -- return -ENOMEM; -+ err = -ENOMEM; -+ goto ipsec_exit; - } - - /* Work out where in the BAR the queues start. */ -@@ -655,7 +703,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) - eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0); - if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, "Failed to space for MAC address"); -- return -ENOMEM; -+ err = -ENOMEM; -+ goto xstats_free; +- if (restart) { +- /* set vlan strip for ring */ +- ngbe_dev_rx_queue_stop(dev, queue); +- wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal); +- wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah); +- wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg); +- ngbe_dev_rx_queue_start(dev, queue); ++ if (!hw->adapter_stopped) { ++ PMD_DRV_LOG(ERR, "Please stop port first"); ++ return; } - - nfp_net_pf_read_mac(app_fw_nic, port); -@@ -693,6 +742,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev) - nfp_net_stats_reset(eth_dev); - - return 0; -+ -+xstats_free: -+ rte_free(net_hw->eth_xstats_base); -+ipsec_exit: -+ nfp_ipsec_uninit(eth_dev); + -+ return err; ++ ngbe_vlan_strip_q_set(dev, queue, on); } - #define DEFAULT_FW_PATH "/lib/firmware/netronome" -@@ -1120,26 +1176,46 @@ port_cleanup: - app_fw_nic->ports[id]->eth_dev != NULL) { - struct rte_eth_dev *tmp_dev; - tmp_dev = app_fw_nic->ports[id]->eth_dev; -- nfp_ipsec_uninit(tmp_dev); -+ nfp_net_uninit(tmp_dev); - rte_eth_dev_release_port(tmp_dev); -- app_fw_nic->ports[id] = NULL; - } - } -- nfp_cpp_area_free(pf_dev->ctrl_area); -+ nfp_cpp_area_release_free(pf_dev->ctrl_area); - app_cleanup: - rte_free(app_fw_nic); + static int +@@ -842,9 +828,9 @@ ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev) + rxq = dev->data->rx_queues[i]; - return ret; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) +- ngbe_vlan_hw_strip_enable(dev, i); ++ ngbe_vlan_strip_q_set(dev, i, 1); + else +- ngbe_vlan_hw_strip_disable(dev, i); ++ ngbe_vlan_strip_q_set(dev, i, 0); + } } -+/* Force the physical port down to clear the possible DMA error */ +@@ -906,6 +892,13 @@ ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) static int --nfp_pf_init(struct rte_pci_device *pci_dev) -+nfp_net_force_port_down(struct nfp_pf_dev *pf_dev, -+ struct nfp_eth_table *nfp_eth_table, -+ struct nfp_cpp *cpp) + ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { -+ int ret; - uint32_t i; - uint32_t id; -+ uint32_t index; -+ uint32_t count; ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); + -+ count = nfp_net_get_port_num(pf_dev, nfp_eth_table); -+ for (i = 0; i < count; i++) { -+ id = nfp_function_id_get(pf_dev, i); -+ index = nfp_eth_table->ports[id].index; -+ ret = nfp_eth_set_configured(cpp, index, 0); -+ if (ret < 0) -+ return ret; ++ if (!hw->adapter_stopped && (mask & RTE_ETH_VLAN_STRIP_MASK)) { ++ PMD_DRV_LOG(ERR, "Please stop port first"); ++ return -EPERM; + } + -+ return 0; -+} + ngbe_config_vlan_strip_on_all_queues(dev, mask); + + ngbe_vlan_offload_config(dev, mask); +@@ -1269,6 +1262,9 @@ ngbe_dev_close(struct rte_eth_dev *dev) + + ngbe_dev_stop(dev); + ++ /* Let firmware take over control of hardware */ ++ wr32m(hw, NGBE_PORTCTL, NGBE_PORTCTL_DRVLOAD, 0); + -+static int -+nfp_pf_init(struct rte_pci_device *pci_dev) -+{ - int ret = 0; - uint64_t addr; -- uint32_t index; - uint32_t cpp_id; - uint8_t function_id; - struct nfp_cpp *cpp; -@@ -1211,11 +1287,11 @@ nfp_pf_init(struct rte_pci_device *pci_dev) - pf_dev->multi_pf.enabled = nfp_check_multi_pf_from_nsp(pci_dev, cpp); - pf_dev->multi_pf.function_id = function_id; + ngbe_dev_free_queues(dev); -- /* Force the physical port down to clear the possible DMA error */ -- for (i = 0; i < nfp_eth_table->count; i++) { -- id = nfp_function_id_get(pf_dev, i); -- index = nfp_eth_table->ports[id].index; -- nfp_eth_set_configured(cpp, index, 0); -+ ret = nfp_net_force_port_down(pf_dev, nfp_eth_table, cpp); -+ if (ret != 0) { -+ PMD_INIT_LOG(ERR, "Failed to force port down"); -+ ret = -EIO; -+ goto eth_table_cleanup; - } + ngbe_set_pcie_master(hw, false); +@@ -1811,7 +1807,9 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; + dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; +- dev_info->max_rx_pktlen = 15872; ++ dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD; ++ dev_info->min_mtu = RTE_ETHER_MIN_MTU; ++ dev_info->max_mtu = NGBE_MAX_MTU; + dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = pci_dev->max_vfs; +@@ -1909,6 +1907,7 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, + bool link_up; + int err; + int wait = 1; ++ u32 reg; - if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo, -@@ -1264,6 +1340,14 @@ nfp_pf_init(struct rte_pci_device *pci_dev) + memset(&link, 0, sizeof(link)); + link.link_status = RTE_ETH_LINK_DOWN; +@@ -1966,8 +1965,13 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, + wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, + NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); + } ++ /* Re configure MAC RX */ ++ reg = rd32(hw, NGBE_MACRXCFG); ++ wr32(hw, NGBE_MACRXCFG, reg); + wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC, + NGBE_MACRXFLT_PROMISC); ++ reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT); ++ wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg); + } - PMD_INIT_LOG(DEBUG, "qc_bar address: %p", pf_dev->qc_bar); + return rte_eth_linkstatus_set(dev, &link); +@@ -2155,6 +2159,19 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_interrupt *intr = ngbe_dev_intr(dev); -+ pf_dev->mac_stats_bar = nfp_rtsym_map(sym_tbl, "_mac_stats", -+ NFP_MAC_STATS_SIZE * nfp_eth_table->max_index, -+ &pf_dev->mac_stats_area); -+ if (pf_dev->mac_stats_bar == NULL) { -+ PMD_INIT_LOG(ERR, "nfp_rtsym_map fails for _mac_stats"); -+ goto hwqueues_cleanup; ++ eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_VEC0]; ++ if (!eicr) { ++ /* ++ * shared interrupt alert! ++ * make sure interrupts are enabled because the read will ++ * have disabled interrupts. ++ */ ++ if (!hw->adapter_stopped) ++ ngbe_enable_intr(dev); ++ return 0; + } ++ ((u32 *)hw->isb_mem)[NGBE_ISB_VEC0] = 0; + - /* - * PF initialization has been done at this point. Call app specific - * init code now. -@@ -1273,14 +1357,14 @@ nfp_pf_init(struct rte_pci_device *pci_dev) - if (pf_dev->multi_pf.enabled) { - ret = nfp_enable_multi_pf(pf_dev); - if (ret != 0) -- goto hwqueues_cleanup; -+ goto mac_stats_cleanup; + /* read-on-clear nic registers here */ + eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; + PMD_DRV_LOG(DEBUG, "eicr %x", eicr); +diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.h b/dpdk/drivers/net/ngbe/ngbe_ethdev.h +index 3cde7c8750..9b43d5f20e 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_ethdev.h ++++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.h +@@ -32,6 +32,7 @@ + + #define NGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ + ++#define NGBE_MAX_MTU 9414 + /* The overhead from MTU to max frame size. */ + #define NGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + +diff --git a/dpdk/drivers/net/ngbe/ngbe_pf.c b/dpdk/drivers/net/ngbe/ngbe_pf.c +index 947ae7fe94..bb62e2fbb7 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_pf.c ++++ b/dpdk/drivers/net/ngbe/ngbe_pf.c +@@ -71,7 +71,7 @@ int ngbe_pf_host_init(struct rte_eth_dev *eth_dev) + sizeof(struct ngbe_vf_info) * vf_num, 0); + if (*vfinfo == NULL) { + PMD_INIT_LOG(ERR, +- "Cannot allocate memory for private VF data\n"); ++ "Cannot allocate memory for private VF data"); + return -ENOMEM; + } + +@@ -320,7 +320,7 @@ ngbe_disable_vf_mc_promisc(struct rte_eth_dev *eth_dev, uint32_t vf) + + vmolr = rd32(hw, NGBE_POOLETHCTL(vf)); + +- PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf); ++ PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous", vf); + + vmolr &= ~NGBE_POOLETHCTL_MCP; + +@@ -482,7 +482,7 @@ ngbe_negotiate_vf_api(struct rte_eth_dev *eth_dev, + break; + } + +- PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n", ++ PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d", + api_version, vf); + + return -1; +@@ -564,7 +564,7 @@ ngbe_set_vf_mc_promisc(struct rte_eth_dev *eth_dev, + if (!(fctrl & NGBE_PSRCTL_UCP)) { + /* VF promisc requires PF in promisc */ + PMD_DRV_LOG(ERR, +- "Enabling VF promisc requires PF in promisc\n"); ++ "Enabling VF promisc requires PF in promisc"); + return -1; } - PMD_INIT_LOG(INFO, "Initializing coreNIC"); - ret = nfp_init_app_fw_nic(pf_dev, dev_info); - if (ret != 0) { - PMD_INIT_LOG(ERR, "Could not initialize coreNIC!"); -- goto hwqueues_cleanup; -+ goto mac_stats_cleanup; +@@ -601,7 +601,7 @@ ngbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + + if (index) { + if (!rte_is_valid_assigned_ether_addr(ea)) { +- PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf); ++ PMD_DRV_LOG(ERR, "set invalid mac vf:%d", vf); + return -1; } - break; - case NFP_APP_FW_FLOWER_NIC: -@@ -1288,13 +1372,13 @@ nfp_pf_init(struct rte_pci_device *pci_dev) - ret = nfp_init_app_fw_flower(pf_dev, dev_info); - if (ret != 0) { - PMD_INIT_LOG(ERR, "Could not initialize Flower!"); -- goto hwqueues_cleanup; -+ goto mac_stats_cleanup; + +diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.c b/dpdk/drivers/net/ngbe/ngbe_rxtx.c +index 8a873b858e..4680ff91f1 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_rxtx.c ++++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.c +@@ -1791,6 +1791,7 @@ ngbe_tx_queue_release(struct ngbe_tx_queue *txq) + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->free_swring(txq); ++ rte_memzone_free(txq->mz); } - break; - default: - PMD_INIT_LOG(ERR, "Unsupported Firmware loaded"); - ret = -EINVAL; -- goto hwqueues_cleanup; -+ goto mac_stats_cleanup; + rte_free(txq); + } +@@ -1995,6 +1996,7 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; } - /* Register the CPP bridge service here for primary use */ -@@ -1304,13 +1388,18 @@ nfp_pf_init(struct rte_pci_device *pci_dev) ++ txq->mz = tz; + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; +@@ -2097,6 +2099,7 @@ ngbe_rx_queue_release(struct ngbe_rx_queue *rxq) + ngbe_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq->sw_sc_ring); ++ rte_memzone_free(rxq->mz); + rte_free(rxq); + } + } +@@ -2187,6 +2190,7 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; ++ rte_pktmbuf_free(rxq->pkt_first_seg); + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; + } +@@ -2277,6 +2281,7 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + return -ENOMEM; + } + ++ rxq->mz = rz; + /* + * Zero init all the descriptors in the ring. + */ +diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.h b/dpdk/drivers/net/ngbe/ngbe_rxtx.h +index 9130f9d0df..2914b9a756 100644 +--- a/dpdk/drivers/net/ngbe/ngbe_rxtx.h ++++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.h +@@ -276,6 +276,7 @@ struct ngbe_rx_queue { + struct rte_mbuf fake_mbuf; + /** hold packets to return to application */ + struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2]; ++ const struct rte_memzone *mz; + }; - return 0; + /** +@@ -353,6 +354,7 @@ struct ngbe_tx_queue { + uint8_t tx_deferred_start; /**< not in global dev start */ -+mac_stats_cleanup: -+ nfp_cpp_area_release_free(pf_dev->mac_stats_area); - hwqueues_cleanup: -- nfp_cpp_area_free(pf_dev->qc_area); -+ nfp_cpp_area_release_free(pf_dev->qc_area); - sym_tbl_cleanup: - free(sym_tbl); - fw_cleanup: - nfp_fw_unload(cpp); -- nfp_net_keepalive_stop(&pf_dev->multi_pf); -+ if (pf_dev->multi_pf.enabled) { -+ nfp_net_keepalive_stop(&pf_dev->multi_pf); -+ nfp_net_keepalive_uninit(&pf_dev->multi_pf); -+ } - eth_table_cleanup: - free(nfp_eth_table); - hwinfo_cleanup: -@@ -1437,7 +1526,7 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) - if (sym_tbl == NULL) { - PMD_INIT_LOG(ERR, "Something is wrong with the firmware symbol table"); - ret = -EIO; -- goto pf_cleanup; -+ goto cpp_cleanup; - } + const struct ngbe_txq_ops *ops; /**< txq ops */ ++ const struct rte_memzone *mz; + }; - /* Read the app ID of the firmware loaded */ -@@ -1484,6 +1573,8 @@ nfp_pf_secondary_init(struct rte_pci_device *pci_dev) + struct ngbe_txq_ops { +diff --git a/dpdk/drivers/net/octeon_ep/cnxk_ep_tx.c b/dpdk/drivers/net/octeon_ep/cnxk_ep_tx.c +index 9f11a2f317..8628edf8a7 100644 +--- a/dpdk/drivers/net/octeon_ep/cnxk_ep_tx.c ++++ b/dpdk/drivers/net/octeon_ep/cnxk_ep_tx.c +@@ -139,7 +139,7 @@ cnxk_ep_xmit_pkts_scalar_mseg(struct rte_mbuf **tx_pkts, struct otx_ep_instr_que + num_sg = (frags + mask) / OTX_EP_NUM_SG_PTRS; + + if (unlikely(pkt_len > OTX_EP_MAX_PKT_SZ && num_sg > OTX_EP_MAX_SG_LISTS)) { +- otx_ep_err("Failed to xmit the pkt, pkt_len is higher or pkt has more segments\n"); ++ otx_ep_err("Failed to xmit the pkt, pkt_len is higher or pkt has more segments"); + goto exit; + } - sym_tbl_cleanup: - free(sym_tbl); -+cpp_cleanup: -+ nfp_cpp_free(cpp); - pf_cleanup: - rte_free(pf_dev); +diff --git a/dpdk/drivers/net/octeon_ep/cnxk_ep_vf.c b/dpdk/drivers/net/octeon_ep/cnxk_ep_vf.c +index ef275703c3..74b63a161f 100644 +--- a/dpdk/drivers/net/octeon_ep/cnxk_ep_vf.c ++++ b/dpdk/drivers/net/octeon_ep/cnxk_ep_vf.c +@@ -102,7 +102,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no) + } -diff --git a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -index 7927f53403..cfe7225ca5 100644 ---- a/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -+++ b/dpdk/drivers/net/nfp/nfp_ethdev_vf.c -@@ -160,13 +160,17 @@ nfp_netvf_set_link_down(struct rte_eth_dev *dev __rte_unused) - static int - nfp_netvf_close(struct rte_eth_dev *dev) - { -+ struct nfp_net_hw *net_hw; - struct rte_pci_device *pci_dev; + if (loop < 0) { +- otx_ep_err("IDLE bit is not set\n"); ++ otx_ep_err("IDLE bit is not set"); + return -EIO; + } - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; +@@ -134,7 +134,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no) + } while (reg_val != 0 && loop--); -+ net_hw = dev->data->dev_private; - pci_dev = RTE_ETH_DEV_TO_PCI(dev); + if (loop < 0) { +- otx_ep_err("INST CNT REGISTER is not zero\n"); ++ otx_ep_err("INST CNT REGISTER is not zero"); + return -EIO; + } -+ rte_free(net_hw->eth_xstats_base); -+ - /* - * We assume that the DPDK application is stopping all the - * threads/queues before calling the device close function. -@@ -284,8 +288,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; +@@ -181,7 +181,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no) + } -- rte_eth_copy_pci_info(eth_dev, pci_dev); -- - net_hw->eth_xstats_base = rte_malloc("rte_eth_xstat", - sizeof(struct rte_eth_xstat) * nfp_net_xstats_size(eth_dev), 0); - if (net_hw->eth_xstats_base == NULL) { -@@ -323,7 +325,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) - if (eth_dev->data->mac_addrs == NULL) { - PMD_INIT_LOG(ERR, "Failed to space for MAC address"); - err = -ENOMEM; -- goto dev_err_ctrl_map; -+ goto free_xstats; + if (loop < 0) { +- otx_ep_err("OUT CNT REGISTER value is zero\n"); ++ otx_ep_err("OUT CNT REGISTER value is zero"); + return -EIO; } - nfp_read_mac(hw); -@@ -360,8 +362,8 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev) +@@ -217,7 +217,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no) + } - return 0; + if (loop < 0) { +- otx_ep_err("Packets credit register value is not cleared\n"); ++ otx_ep_err("Packets credit register value is not cleared"); + return -EIO; + } --dev_err_ctrl_map: -- nfp_cpp_area_free(net_hw->ctrl_area); -+free_xstats: -+ rte_free(net_hw->eth_xstats_base); +@@ -250,7 +250,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no) + } - return err; - } -diff --git a/dpdk/drivers/net/nfp/nfp_flow.c b/dpdk/drivers/net/nfp/nfp_flow.c -index f832b52d89..91ebee5db4 100644 ---- a/dpdk/drivers/net/nfp/nfp_flow.c -+++ b/dpdk/drivers/net/nfp/nfp_flow.c -@@ -312,14 +312,14 @@ nfp_check_mask_add(struct nfp_flow_priv *priv, - ret = nfp_mask_table_add(priv, mask_data, mask_len, mask_id); - if (ret != 0) - return false; -+ -+ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; - } else { - /* Mask entry already exist */ - mask_entry->ref_cnt++; - *mask_id = mask_entry->mask_id; + if (loop < 0) { +- otx_ep_err("Packets sent register value is not cleared\n"); ++ otx_ep_err("Packets sent register value is not cleared"); + return -EIO; } -- *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK; -- - return true; - } +@@ -280,7 +280,7 @@ cnxk_ep_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no) + } -@@ -3658,7 +3658,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, - ttl_tos_flag = true; - } - } else { -- nfp_flow_action_set_hl(position, action, ttl_tos_flag); -+ nfp_flow_action_set_hl(position, action, tc_hl_flag); - if (!tc_hl_flag) { - position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl); - tc_hl_flag = true; -@@ -3675,7 +3675,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, - break; - case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: - PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP"); -- nfp_flow_action_set_tc(position, action, ttl_tos_flag); -+ nfp_flow_action_set_tc(position, action, tc_hl_flag); - if (!tc_hl_flag) { - position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl); - tc_hl_flag = true; -@@ -3741,6 +3741,11 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, - total_actions++; + if (loop < 0) { +- otx_ep_err("INSTR DBELL not coming back to 0\n"); ++ otx_ep_err("INSTR DBELL not coming back to 0"); + return -EIO; } -+ if (nfp_flow->install_flag && total_actions == 0) { -+ PMD_DRV_LOG(ERR, "The action list is empty"); -+ return -ENOTSUP; -+ } -+ - if (drop_flag) - nfp_flow_meta->shortcut = rte_cpu_to_be_32(NFP_FL_SC_ACT_DROP); - else if (total_actions > 1) -diff --git a/dpdk/drivers/net/nfp/nfp_ipsec.c b/dpdk/drivers/net/nfp/nfp_ipsec.c -index 452947380e..b10cda570b 100644 ---- a/dpdk/drivers/net/nfp/nfp_ipsec.c -+++ b/dpdk/drivers/net/nfp/nfp_ipsec.c -@@ -18,6 +18,7 @@ - #include "nfp_rxtx.h" +diff --git a/dpdk/drivers/net/octeon_ep/otx2_ep_vf.c b/dpdk/drivers/net/octeon_ep/otx2_ep_vf.c +index 7f4edf8dcf..fdab542246 100644 +--- a/dpdk/drivers/net/octeon_ep/otx2_ep_vf.c ++++ b/dpdk/drivers/net/octeon_ep/otx2_ep_vf.c +@@ -37,7 +37,7 @@ otx2_vf_reset_iq(struct otx_ep_device *otx_ep, int q_no) + SDP_VF_R_IN_INSTR_DBELL(q_no)); + } + if (loop < 0) { +- otx_ep_err("%s: doorbell init retry limit exceeded.\n", __func__); ++ otx_ep_err("%s: doorbell init retry limit exceeded.", __func__); + return -EIO; + } - #define NFP_UDP_ESP_PORT 4500 -+#define NFP_ESP_IV_LENGTH 8 +@@ -48,7 +48,7 @@ otx2_vf_reset_iq(struct otx_ep_device *otx_ep, int q_no) + rte_delay_ms(1); + } while ((d64 & ~SDP_VF_R_IN_CNTS_OUT_INT) != 0 && loop--); + if (loop < 0) { +- otx_ep_err("%s: in_cnts init retry limit exceeded.\n", __func__); ++ otx_ep_err("%s: in_cnts init retry limit exceeded.", __func__); + return -EIO; + } - static const struct rte_cryptodev_capabilities nfp_crypto_caps[] = { - { -@@ -521,10 +522,14 @@ nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg, - char *save; - char *iv_b; - char *iv_str; -- uint8_t *cfg_iv; -+ const rte_be32_t *iv_value; -+ uint8_t cfg_iv[NFP_ESP_IV_LENGTH] = {}; +@@ -81,7 +81,7 @@ otx2_vf_reset_oq(struct otx_ep_device *otx_ep, int q_no) + SDP_VF_R_OUT_SLIST_DBELL(q_no)); + } + if (loop < 0) { +- otx_ep_err("%s: doorbell init retry limit exceeded.\n", __func__); ++ otx_ep_err("%s: doorbell init retry limit exceeded.", __func__); + return -EIO; + } - iv_str = strdup(iv_string); -- cfg_iv = (uint8_t *)cfg->aesgcm_fields.iv; -+ if (iv_str == NULL) { -+ PMD_DRV_LOG(ERR, "Failed to strdup iv_string"); -+ return; -+ } +@@ -109,7 +109,7 @@ otx2_vf_reset_oq(struct otx_ep_device *otx_ep, int q_no) + rte_delay_ms(1); + } while ((d64 & ~SDP_VF_R_OUT_CNTS_IN_INT) != 0 && loop--); + if (loop < 0) { +- otx_ep_err("%s: out_cnts init retry limit exceeded.\n", __func__); ++ otx_ep_err("%s: out_cnts init retry limit exceeded.", __func__); + return -EIO; + } - for (i = 0; i < iv_len; i++) { - iv_b = strtok_r(i ? NULL : iv_str, ",", &save); -@@ -534,8 +539,9 @@ nfp_aesgcm_iv_update(struct ipsec_add_sa *cfg, - cfg_iv[i] = strtoul(iv_b, NULL, 0); +@@ -252,7 +252,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no) } -- *(uint32_t *)cfg_iv = rte_be_to_cpu_32(*(uint32_t *)cfg_iv); -- *(uint32_t *)&cfg_iv[4] = rte_be_to_cpu_32(*(uint32_t *)&cfg_iv[4]); -+ iv_value = (const rte_be32_t *)(cfg_iv); -+ cfg->aesgcm_fields.iv[0] = rte_be_to_cpu_32(iv_value[0]); -+ cfg->aesgcm_fields.iv[1] = rte_be_to_cpu_32(iv_value[1]); + if (loop < 0) { +- otx_ep_err("IDLE bit is not set\n"); ++ otx_ep_err("IDLE bit is not set"); + return -EIO; + } - free(iv_str); - } -@@ -576,7 +582,7 @@ nfp_aead_map(struct rte_eth_dev *eth_dev, - uint32_t offset; - uint32_t device_id; - const char *iv_str; -- const uint32_t *key; -+ const rte_be32_t *key; - struct nfp_net_hw *net_hw; +@@ -283,7 +283,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no) + } while (reg_val != 0 && loop--); - net_hw = eth_dev->data->dev_private; -@@ -626,7 +632,7 @@ nfp_aead_map(struct rte_eth_dev *eth_dev, - return -EINVAL; + if (loop < 0) { +- otx_ep_err("INST CNT REGISTER is not zero\n"); ++ otx_ep_err("INST CNT REGISTER is not zero"); + return -EIO; } -- key = (const uint32_t *)(aead->key.data); -+ key = (const rte_be32_t *)(aead->key.data); +@@ -332,7 +332,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no) + } - /* - * The CHACHA20's key order needs to be adjusted based on hardware design. -@@ -638,16 +644,22 @@ nfp_aead_map(struct rte_eth_dev *eth_dev, + if (loop < 0) { +- otx_ep_err("OUT CNT REGISTER value is zero\n"); ++ otx_ep_err("OUT CNT REGISTER value is zero"); + return -EIO; + } - for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) { - index = (i + offset) % (key_length / sizeof(cfg->cipher_key[0])); -- cfg->cipher_key[index] = rte_cpu_to_be_32(*key++); -+ cfg->cipher_key[index] = rte_be_to_cpu_32(key[i]); +@@ -368,7 +368,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no) } - /* -- * The iv of the FW is equal to ESN by default. Reading the -- * iv of the configuration information is not supported. -+ * The iv of the FW is equal to ESN by default. Only the -+ * aead algorithm can offload the iv of configuration and -+ * the length of iv cannot be greater than NFP_ESP_IV_LENGTH. - */ - iv_str = getenv("ETH_SEC_IV_OVR"); - if (iv_str != NULL) { - iv_len = aead->iv.length; -+ if (iv_len > NFP_ESP_IV_LENGTH) { -+ PMD_DRV_LOG(ERR, "Unsupported length of iv data"); -+ return -EINVAL; -+ } -+ - nfp_aesgcm_iv_update(cfg, iv_len, iv_str); + if (loop < 0) { +- otx_ep_err("Packets credit register value is not cleared\n"); ++ otx_ep_err("Packets credit register value is not cleared"); + return -EIO; + } + otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg)); +@@ -425,7 +425,7 @@ otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no) } -@@ -664,7 +676,7 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev, - int ret; - uint32_t i; - uint32_t device_id; -- const uint32_t *key; -+ const rte_be32_t *key; - struct nfp_net_hw *net_hw; + if (loop < 0) { +- otx_ep_err("INSTR DBELL not coming back to 0\n"); ++ otx_ep_err("INSTR DBELL not coming back to 0"); + return -EIO; + } - net_hw = eth_dev->data->dev_private; -@@ -698,14 +710,14 @@ nfp_cipher_map(struct rte_eth_dev *eth_dev, +diff --git a/dpdk/drivers/net/octeon_ep/otx_ep_common.h b/dpdk/drivers/net/octeon_ep/otx_ep_common.h +index 82e57520d3..938c51b35d 100644 +--- a/dpdk/drivers/net/octeon_ep/otx_ep_common.h ++++ b/dpdk/drivers/net/octeon_ep/otx_ep_common.h +@@ -119,7 +119,7 @@ union otx_ep_instr_irh { + {\ + typeof(value) val = (value); \ + typeof(reg_off) off = (reg_off); \ +- otx_ep_dbg("octeon_write_csr64: reg: 0x%08lx val: 0x%016llx\n", \ ++ otx_ep_dbg("octeon_write_csr64: reg: 0x%08lx val: 0x%016llx", \ + (unsigned long)off, (unsigned long long)val); \ + rte_write64(val, ((base_addr) + off)); \ + } +diff --git a/dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c b/dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c +index 615cbbb648..c0298a56ac 100644 +--- a/dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c ++++ b/dpdk/drivers/net/octeon_ep/otx_ep_ethdev.c +@@ -118,7 +118,7 @@ otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) + ret = otx_ep_mbox_get_link_info(eth_dev, &link); + if (ret) + return -EINVAL; +- otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d\n", ++ otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d", + link.link_status, link.link_duplex, link.link_autoneg, link.link_speed); + return rte_eth_linkstatus_set(eth_dev, &link); + } +@@ -163,7 +163,7 @@ otx_ep_dev_set_default_mac_addr(struct rte_eth_dev *eth_dev, + ret = otx_ep_mbox_set_mac_addr(eth_dev, mac_addr); + if (ret) return -EINVAL; +- otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "\n", ++ otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "", + RTE_ETHER_ADDR_BYTES(mac_addr)); + rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs); + return 0; +@@ -180,7 +180,7 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev) + /* Enable IQ/OQ for this device */ + ret = otx_epvf->fn_list.enable_io_queues(otx_epvf); + if (ret) { +- otx_ep_err("IOQ enable failed\n"); ++ otx_ep_err("IOQ enable failed"); + return ret; } -- key = (const uint32_t *)(cipher->key.data); -+ key = (const rte_be32_t *)(cipher->key.data); - if (key_length > sizeof(cfg->cipher_key)) { - PMD_DRV_LOG(ERR, "Insufficient space for offloaded key"); - return -EINVAL; +@@ -189,7 +189,7 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev) + otx_epvf->droq[q]->pkts_credit_reg); + + rte_wmb(); +- otx_ep_info("OQ[%d] dbells [%d]\n", q, ++ otx_ep_info("OQ[%d] dbells [%d]", q, + rte_read32(otx_epvf->droq[q]->pkts_credit_reg)); + } + +@@ -198,7 +198,7 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev) + otx_ep_set_tx_func(eth_dev); + otx_ep_set_rx_func(eth_dev); + +- otx_ep_info("dev started\n"); ++ otx_ep_info("dev started"); + + for (q = 0; q < eth_dev->data->nb_rx_queues; q++) + eth_dev->data->rx_queue_state[q] = RTE_ETH_QUEUE_STATE_STARTED; +@@ -241,7 +241,7 @@ otx_ep_ism_setup(struct otx_ep_device *otx_epvf) + /* Same DMA buffer is shared by OQ and IQ, clear it at start */ + memset(otx_epvf->ism_buffer_mz->addr, 0, OTX_EP_ISM_BUFFER_SIZE); + if (otx_epvf->ism_buffer_mz == NULL) { +- otx_ep_err("Failed to allocate ISM buffer\n"); ++ otx_ep_err("Failed to allocate ISM buffer"); + return(-1); + } + otx_ep_dbg("ISM: virt: 0x%p, dma: 0x%" PRIX64, +@@ -285,12 +285,12 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf) + ret = -EINVAL; + break; + default: +- otx_ep_err("Unsupported device\n"); ++ otx_ep_err("Unsupported device"); + ret = -EINVAL; } - for (i = 0; i < key_length / sizeof(cfg->cipher_key[0]); i++) -- cfg->cipher_key[i] = rte_cpu_to_be_32(*key++); -+ cfg->cipher_key[i] = rte_be_to_cpu_32(key[i]); + if (!ret) +- otx_ep_info("OTX_EP dev_id[%d]\n", dev_id); ++ otx_ep_info("OTX_EP dev_id[%d]", dev_id); - return 0; + return ret; } -@@ -800,7 +812,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev, - uint32_t i; - uint8_t key_length; - uint32_t device_id; -- const uint32_t *key; -+ const rte_be32_t *key; - struct nfp_net_hw *net_hw; +@@ -304,7 +304,7 @@ otx_epdev_init(struct otx_ep_device *otx_epvf) - if (digest_length == 0) { -@@ -847,7 +859,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev, - return -EINVAL; + ret = otx_ep_chip_specific_setup(otx_epvf); + if (ret) { +- otx_ep_err("Chip specific setup failed\n"); ++ otx_ep_err("Chip specific setup failed"); + goto setup_fail; } -- key = (const uint32_t *)(auth->key.data); -+ key = (const rte_be32_t *)(auth->key.data); - key_length = auth->key.length; - if (key_length > sizeof(cfg->auth_key)) { - PMD_DRV_LOG(ERR, "Insufficient space for offloaded auth key!"); -@@ -855,7 +867,7 @@ nfp_auth_map(struct rte_eth_dev *eth_dev, +@@ -328,7 +328,7 @@ otx_epdev_init(struct otx_ep_device *otx_epvf) + otx_epvf->eth_dev->rx_pkt_burst = &cnxk_ep_recv_pkts; + otx_epvf->chip_gen = OTX_EP_CN10XX; + } else { +- otx_ep_err("Invalid chip_id\n"); ++ otx_ep_err("Invalid chip_id"); + ret = -EINVAL; + goto setup_fail; } +@@ -336,7 +336,7 @@ otx_epdev_init(struct otx_ep_device *otx_epvf) + otx_epvf->max_rx_queues = ethdev_queues; + otx_epvf->max_tx_queues = ethdev_queues; - for (i = 0; i < key_length / sizeof(cfg->auth_key[0]); i++) -- cfg->auth_key[i] = rte_cpu_to_be_32(*key++); -+ cfg->auth_key[i] = rte_be_to_cpu_32(key[i]); - - return 0; - } -@@ -895,7 +907,7 @@ nfp_crypto_msg_build(struct rte_eth_dev *eth_dev, - return ret; - } +- otx_ep_info("OTX_EP Device is Ready\n"); ++ otx_ep_info("OTX_EP Device is Ready"); -- cfg->aesgcm_fields.salt = rte_cpu_to_be_32(conf->ipsec.salt); -+ cfg->aesgcm_fields.salt = conf->ipsec.salt; - break; - case RTE_CRYPTO_SYM_XFORM_AUTH: - /* Only support Auth + Cipher for inbound */ -@@ -960,7 +972,10 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, - struct rte_security_session_conf *conf, - struct nfp_ipsec_msg *msg) - { -+ int i; - int ret; -+ rte_be32_t *src_ip; -+ rte_be32_t *dst_ip; - struct ipsec_add_sa *cfg; - enum rte_security_ipsec_tunnel_type type; + setup_fail: + return ret; +@@ -356,10 +356,10 @@ otx_ep_dev_configure(struct rte_eth_dev *eth_dev) + txmode = &conf->txmode; + if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues || + eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) { +- otx_ep_err("invalid num queues\n"); ++ otx_ep_err("invalid num queues"); + return -EINVAL; + } +- otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n", ++ otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d", + eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues); -@@ -1018,12 +1033,18 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, - type = conf->ipsec.tunnel.type; - cfg->ctrl_word.mode = NFP_IPSEC_MODE_TUNNEL; - if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { -- cfg->src_ip.v4 = conf->ipsec.tunnel.ipv4.src_ip; -- cfg->dst_ip.v4 = conf->ipsec.tunnel.ipv4.dst_ip; -+ src_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv4.src_ip.s_addr; -+ dst_ip = (rte_be32_t *)&conf->ipsec.tunnel.ipv4.dst_ip.s_addr; -+ cfg->src_ip[0] = rte_be_to_cpu_32(src_ip[0]); -+ cfg->dst_ip[0] = rte_be_to_cpu_32(dst_ip[0]); - cfg->ipv6 = 0; - } else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) { -- cfg->src_ip.v6 = conf->ipsec.tunnel.ipv6.src_addr; -- cfg->dst_ip.v6 = conf->ipsec.tunnel.ipv6.dst_addr; -+ src_ip = (rte_be32_t *)conf->ipsec.tunnel.ipv6.src_addr.s6_addr; -+ dst_ip = (rte_be32_t *)conf->ipsec.tunnel.ipv6.dst_addr.s6_addr; -+ for (i = 0; i < 4; i++) { -+ cfg->src_ip[i] = rte_be_to_cpu_32(src_ip[i]); -+ cfg->dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]); -+ } - cfg->ipv6 = 1; - } else { - PMD_DRV_LOG(ERR, "Unsupported address family!"); -@@ -1036,9 +1057,11 @@ nfp_ipsec_msg_build(struct rte_eth_dev *eth_dev, - cfg->ctrl_word.mode = NFP_IPSEC_MODE_TRANSPORT; - if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) { - memset(&cfg->src_ip, 0, sizeof(cfg->src_ip)); -+ memset(&cfg->dst_ip, 0, sizeof(cfg->dst_ip)); - cfg->ipv6 = 0; - } else if (type == RTE_SECURITY_IPSEC_TUNNEL_IPV6) { - memset(&cfg->src_ip, 0, sizeof(cfg->src_ip)); -+ memset(&cfg->dst_ip, 0, sizeof(cfg->dst_ip)); - cfg->ipv6 = 1; - } else { - PMD_DRV_LOG(ERR, "Unsupported address family!"); -@@ -1172,18 +1195,18 @@ nfp_security_set_pkt_metadata(void *device, - desc_md = RTE_MBUF_DYNFIELD(m, offset, struct nfp_tx_ipsec_desc_msg *); + otx_epvf->rx_offloads = rxmode->offloads; +@@ -403,29 +403,29 @@ otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, + uint16_t buf_size; - if (priv_session->msg.ctrl_word.ext_seq != 0 && sqn != NULL) { -- desc_md->esn.low = rte_cpu_to_be_32(*sqn); -- desc_md->esn.hi = rte_cpu_to_be_32(*sqn >> 32); -+ desc_md->esn.low = (uint32_t)*sqn; -+ desc_md->esn.hi = (uint32_t)(*sqn >> 32); - } else if (priv_session->msg.ctrl_word.ext_seq != 0) { -- desc_md->esn.low = rte_cpu_to_be_32(priv_session->ipsec.esn.low); -- desc_md->esn.hi = rte_cpu_to_be_32(priv_session->ipsec.esn.hi); -+ desc_md->esn.low = priv_session->ipsec.esn.low; -+ desc_md->esn.hi = priv_session->ipsec.esn.hi; - } else { -- desc_md->esn.low = rte_cpu_to_be_32(priv_session->ipsec.esn.value); -+ desc_md->esn.low = priv_session->ipsec.esn.low; - desc_md->esn.hi = 0; - } + if (q_no >= otx_epvf->max_rx_queues) { +- otx_ep_err("Invalid rx queue number %u\n", q_no); ++ otx_ep_err("Invalid rx queue number %u", q_no); + return -EINVAL; + } - desc_md->enc = 1; -- desc_md->sa_idx = rte_cpu_to_be_32(priv_session->sa_index); -+ desc_md->sa_idx = priv_session->sa_index; + if (num_rx_descs & (num_rx_descs - 1)) { +- otx_ep_err("Invalid rx desc number should be pow 2 %u\n", ++ otx_ep_err("Invalid rx desc number should be pow 2 %u", + num_rx_descs); + return -EINVAL; + } + if (num_rx_descs < (SDP_GBL_WMARK * 8)) { +- otx_ep_err("Invalid rx desc number(%u) should at least be greater than 8xwmark %u\n", ++ otx_ep_err("Invalid rx desc number(%u) should at least be greater than 8xwmark %u", + num_rx_descs, (SDP_GBL_WMARK * 8)); + return -EINVAL; } - return 0; -diff --git a/dpdk/drivers/net/nfp/nfp_ipsec.h b/dpdk/drivers/net/nfp/nfp_ipsec.h -index d7a729398a..f7c4f3f225 100644 ---- a/dpdk/drivers/net/nfp/nfp_ipsec.h -+++ b/dpdk/drivers/net/nfp/nfp_ipsec.h -@@ -36,11 +36,6 @@ struct sa_ctrl_word { - uint32_t spare2 :1; /**< Must be set to 0 */ - }; +- otx_ep_dbg("setting up rx queue %u\n", q_no); ++ otx_ep_dbg("setting up rx queue %u", q_no); --union nfp_ip_addr { -- struct in6_addr v6; -- struct in_addr v4; --}; -- - struct ipsec_add_sa { - uint32_t cipher_key[8]; /**< Cipher Key */ - union { -@@ -60,8 +55,8 @@ struct ipsec_add_sa { - uint8_t spare1; - uint32_t soft_byte_cnt; /**< Soft lifetime byte count */ - uint32_t hard_byte_cnt; /**< Hard lifetime byte count */ -- union nfp_ip_addr src_ip; /**< Src IP addr */ -- union nfp_ip_addr dst_ip; /**< Dst IP addr */ -+ uint32_t src_ip[4]; /**< Src IP addr */ -+ uint32_t dst_ip[4]; /**< Dst IP addr */ - uint16_t natt_dst_port; /**< NAT-T UDP Header dst port */ - uint16_t natt_src_port; /**< NAT-T UDP Header src port */ - uint32_t soft_lifetime_limit; /**< Soft lifetime time limit */ -diff --git a/dpdk/drivers/net/nfp/nfp_net_common.c b/dpdk/drivers/net/nfp/nfp_net_common.c -index e969b840d6..0491912bd3 100644 ---- a/dpdk/drivers/net/nfp/nfp_net_common.c -+++ b/dpdk/drivers/net/nfp/nfp_net_common.c -@@ -189,9 +189,6 @@ nfp_net_notify_port_speed(struct nfp_net_hw *hw, - nfp_net_link_speed_rte2nfp(link->link_speed)); - } + mbp_priv = rte_mempool_get_priv(mp); + buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; --/* The length of firmware version string */ --#define FW_VER_LEN 32 -- - /** - * Reconfigure the firmware via the mailbox - * -@@ -1299,6 +1296,7 @@ nfp_net_supported_ptypes_get(struct rte_eth_dev *dev) - RTE_PTYPE_INNER_L4_NONFRAG, - RTE_PTYPE_INNER_L4_ICMP, - RTE_PTYPE_INNER_L4_SCTP, -+ RTE_PTYPE_UNKNOWN - }; + if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp, + socket_id)) { +- otx_ep_err("droq allocation failed\n"); ++ otx_ep_err("droq allocation failed"); + return -1; + } - if (dev->rx_pkt_burst != nfp_net_recv_pkts) -@@ -2062,17 +2060,22 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev, - size_t fw_size) - { - struct nfp_net_hw *hw; -- char mip_name[FW_VER_LEN]; -- char app_name[FW_VER_LEN]; -- char nsp_version[FW_VER_LEN]; -- char vnic_version[FW_VER_LEN]; -+ char app_name[FW_VER_LEN] = {0}; -+ char mip_name[FW_VER_LEN] = {0}; -+ char nsp_version[FW_VER_LEN] = {0}; -+ char vnic_version[FW_VER_LEN] = {0}; +@@ -454,7 +454,7 @@ otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no) + int q_id = rq->q_no; - if (fw_size < FW_VER_LEN) - return FW_VER_LEN; + if (otx_ep_delete_oqs(otx_epvf, q_id)) +- otx_ep_err("Failed to delete OQ:%d\n", q_id); ++ otx_ep_err("Failed to delete OQ:%d", q_id); + } - hw = nfp_net_get_hw(dev); + /** +@@ -488,16 +488,16 @@ otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, + int retval; -- if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) != 0) { -+ if (hw->fw_version[0] != 0) { -+ snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version); -+ return 0; -+ } -+ -+ if ((dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) == 0) { - snprintf(vnic_version, FW_VER_LEN, "%d.%d.%d.%d", - hw->ver.extend, hw->ver.class, - hw->ver.major, hw->ver.minor); -@@ -2084,8 +2087,16 @@ nfp_net_firmware_version_get(struct rte_eth_dev *dev, - nfp_net_get_mip_name(hw, mip_name); - nfp_net_get_app_name(hw, app_name); + if (q_no >= otx_epvf->max_tx_queues) { +- otx_ep_err("Invalid tx queue number %u\n", q_no); ++ otx_ep_err("Invalid tx queue number %u", q_no); + return -EINVAL; + } + if (num_tx_descs & (num_tx_descs - 1)) { +- otx_ep_err("Invalid tx desc number should be pow 2 %u\n", ++ otx_ep_err("Invalid tx desc number should be pow 2 %u", + num_tx_descs); + return -EINVAL; + } + if (num_tx_descs < (SDP_GBL_WMARK * 8)) { +- otx_ep_err("Invalid tx desc number(%u) should at least be greater than 8*wmark(%u)\n", ++ otx_ep_err("Invalid tx desc number(%u) should at least be greater than 8*wmark(%u)", + num_tx_descs, (SDP_GBL_WMARK * 8)); + return -EINVAL; + } +@@ -505,12 +505,12 @@ otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, + retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id); -- snprintf(fw_version, FW_VER_LEN, "%s %s %s %s", -+ if (nsp_version[0] == 0 || mip_name[0] == 0) { -+ snprintf(fw_version, FW_VER_LEN, "%s %s %s %s", - vnic_version, nsp_version, mip_name, app_name); -+ return 0; -+ } -+ -+ snprintf(hw->fw_version, FW_VER_LEN, "%s %s %s %s", -+ vnic_version, nsp_version, mip_name, app_name); -+ -+ snprintf(fw_version, FW_VER_LEN, "%s", hw->fw_version); + if (retval) { +- otx_ep_err("IQ(TxQ) creation failed.\n"); ++ otx_ep_err("IQ(TxQ) creation failed."); + return retval; + } + eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no]; +- otx_ep_dbg("tx queue[%d] setup\n", q_no); ++ otx_ep_dbg("tx queue[%d] setup", q_no); return 0; } -@@ -2249,3 +2260,13 @@ nfp_net_flow_ctrl_set(struct rte_eth_dev *dev, - return 0; - } -+ -+uint32_t -+nfp_net_get_port_num(struct nfp_pf_dev *pf_dev, -+ struct nfp_eth_table *nfp_eth_table) -+{ -+ if (pf_dev->multi_pf.enabled) -+ return 1; -+ else -+ return nfp_eth_table->count; -+} -diff --git a/dpdk/drivers/net/nfp/nfp_net_common.h b/dpdk/drivers/net/nfp/nfp_net_common.h -index 30fea7ae02..41d59bfa99 100644 ---- a/dpdk/drivers/net/nfp/nfp_net_common.h -+++ b/dpdk/drivers/net/nfp/nfp_net_common.h -@@ -38,6 +38,9 @@ +@@ -603,23 +603,23 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev) + num_queues = otx_epvf->nb_rx_queues; + for (q_no = 0; q_no < num_queues; q_no++) { + if (otx_ep_delete_oqs(otx_epvf, q_no)) { +- otx_ep_err("Failed to delete OQ:%d\n", q_no); ++ otx_ep_err("Failed to delete OQ:%d", q_no); + return -EINVAL; + } + } +- otx_ep_dbg("Num OQs:%d freed\n", otx_epvf->nb_rx_queues); ++ otx_ep_dbg("Num OQs:%d freed", otx_epvf->nb_rx_queues); - #define NFP_BEAT_LENGTH 8 + num_queues = otx_epvf->nb_tx_queues; + for (q_no = 0; q_no < num_queues; q_no++) { + if (otx_ep_delete_iqs(otx_epvf, q_no)) { +- otx_ep_err("Failed to delete IQ:%d\n", q_no); ++ otx_ep_err("Failed to delete IQ:%d", q_no); + return -EINVAL; + } + } +- otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues); ++ otx_ep_dbg("Num IQs:%d freed", otx_epvf->nb_tx_queues); -+/* The length of firmware version string */ -+#define FW_VER_LEN 32 -+ - /* - * Each PF has corresponding word to beat: - * Offset | Usage -@@ -98,6 +101,9 @@ struct nfp_pf_dev { + if (rte_eth_dma_zone_free(eth_dev, "ism", 0)) { +- otx_ep_err("Failed to delete ISM buffer\n"); ++ otx_ep_err("Failed to delete ISM buffer"); + return -EINVAL; + } - uint8_t *qc_bar; +@@ -635,7 +635,7 @@ otx_ep_dev_get_mac_addr(struct rte_eth_dev *eth_dev, + ret = otx_ep_mbox_get_mac_addr(eth_dev, mac_addr); + if (ret) + return -EINVAL; +- otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT "\n", ++ otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(mac_addr)); + return 0; + } +@@ -684,22 +684,22 @@ static int otx_ep_eth_dev_query_set_vf_mac(struct rte_eth_dev *eth_dev, + ret_val = otx_ep_dev_get_mac_addr(eth_dev, mac_addr); + if (!ret_val) { + if (!rte_is_valid_assigned_ether_addr(mac_addr)) { +- otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT "\n", ++ otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(mac_addr)); + rte_eth_random_addr(mac_addr->addr_bytes); +- otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT "\n", ++ otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(mac_addr)); + ret_val = otx_ep_dev_set_default_mac_addr(eth_dev, mac_addr); + if (ret_val) { +- otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails\n", ++ otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails", + RTE_ETHER_ADDR_BYTES(mac_addr)); + return ret_val; + } + } +- otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT "\n", ++ otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT, + RTE_ETHER_ADDR_BYTES(mac_addr)); + } else { +- otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d\n", ++ otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d", + ret_val); + return ret_val; + } +@@ -734,7 +734,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev) + otx_epvf->mbox_neg_ver = OTX_EP_MBOX_VERSION_V1; + eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { +- otx_ep_err("MAC addresses memory allocation failed\n"); ++ otx_ep_err("MAC addresses memory allocation failed"); + eth_dev->dev_ops = NULL; + return -ENOMEM; + } +@@ -754,12 +754,12 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev) + otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF || + otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) { + otx_epvf->pkind = SDP_OTX2_PKIND_FS0; +- otx_ep_info("using pkind %d\n", otx_epvf->pkind); ++ otx_ep_info("using pkind %d", otx_epvf->pkind); + } else if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF) { + otx_epvf->pkind = SDP_PKIND; +- otx_ep_info("Using pkind %d.\n", otx_epvf->pkind); ++ otx_ep_info("Using pkind %d.", otx_epvf->pkind); + } else { +- otx_ep_err("Invalid chip id\n"); ++ otx_ep_err("Invalid chip id"); + return -EINVAL; + } -+ struct nfp_cpp_area *mac_stats_area; -+ uint8_t *mac_stats_bar; -+ - struct nfp_hwinfo *hwinfo; - struct nfp_rtsym_table *sym_tbl; +@@ -768,7 +768,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev) -@@ -165,8 +171,6 @@ struct nfp_net_hw { + if (otx_ep_eth_dev_query_set_vf_mac(eth_dev, + (struct rte_ether_addr *)&vf_mac_addr)) { +- otx_ep_err("set mac addr failed\n"); ++ otx_ep_err("set mac addr failed"); + return -ENODEV; + } + rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs); +diff --git a/dpdk/drivers/net/octeon_ep/otx_ep_mbox.c b/dpdk/drivers/net/octeon_ep/otx_ep_mbox.c +index 4118645dc7..c92adeaf9a 100644 +--- a/dpdk/drivers/net/octeon_ep/otx_ep_mbox.c ++++ b/dpdk/drivers/net/octeon_ep/otx_ep_mbox.c +@@ -44,11 +44,11 @@ __otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep, + } + } + if (count == OTX_EP_MBOX_TIMEOUT_MS) { +- otx_ep_err("mbox send Timeout count:%d\n", count); ++ otx_ep_err("mbox send Timeout count:%d", count); + return OTX_EP_MBOX_TIMEOUT_MS; + } + if (rsp->s.type != OTX_EP_MBOX_TYPE_RSP_ACK) { +- otx_ep_err("mbox received NACK from PF\n"); ++ otx_ep_err("mbox received NACK from PF"); + return OTX_EP_MBOX_CMD_STATUS_NACK; + } + +@@ -65,7 +65,7 @@ otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep, + + rte_spinlock_lock(&otx_ep->mbox_lock); + if (otx_ep_cmd_versions[cmd.s.opcode] > otx_ep->mbox_neg_ver) { +- otx_ep_dbg("CMD:%d not supported in Version:%d\n", cmd.s.opcode, ++ otx_ep_dbg("CMD:%d not supported in Version:%d", cmd.s.opcode, + otx_ep->mbox_neg_ver); + rte_spinlock_unlock(&otx_ep->mbox_lock); + return -EOPNOTSUPP; +@@ -92,7 +92,7 @@ otx_ep_mbox_bulk_read(struct otx_ep_device *otx_ep, + /* Send cmd to read data from PF */ + ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); + if (ret) { +- otx_ep_err("mbox bulk read data request failed\n"); ++ otx_ep_err("mbox bulk read data request failed"); + rte_spinlock_unlock(&otx_ep->mbox_lock); + return ret; + } +@@ -108,7 +108,7 @@ otx_ep_mbox_bulk_read(struct otx_ep_device *otx_ep, + while (data_len) { + ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); + if (ret) { +- otx_ep_err("mbox bulk read data request failed\n"); ++ otx_ep_err("mbox bulk read data request failed"); + otx_ep->mbox_data_index = 0; + memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE); + rte_spinlock_unlock(&otx_ep->mbox_lock); +@@ -154,10 +154,10 @@ otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) + + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); + if (ret) { +- otx_ep_err("set MTU failed\n"); ++ otx_ep_err("set MTU failed"); + return -EINVAL; + } +- otx_ep_dbg("mtu set success mtu %u\n", mtu); ++ otx_ep_dbg("mtu set success mtu %u", mtu); - struct nfp_cpp *cpp; - struct nfp_cpp_area *ctrl_area; -- struct nfp_cpp_area *mac_stats_area; -- uint8_t *mac_stats_bar; - uint8_t *mac_stats; + return 0; + } +@@ -178,10 +178,10 @@ otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev, + cmd.s_set_mac.mac_addr[i] = mac_addr->addr_bytes[i]; + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); + if (ret) { +- otx_ep_err("set MAC address failed\n"); ++ otx_ep_err("set MAC address failed"); + return -EINVAL; + } +- otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n", ++ otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT, + __func__, RTE_ETHER_ADDR_BYTES(mac_addr)); + rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs); + return 0; +@@ -201,12 +201,12 @@ otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev, + cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_GET_MAC_ADDR; + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); + if (ret) { +- otx_ep_err("get MAC address failed\n"); ++ otx_ep_err("get MAC address failed"); + return -EINVAL; + } + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + mac_addr->addr_bytes[i] = rsp.s_set_mac.mac_addr[i]; +- otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n", ++ otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT, + __func__, RTE_ETHER_ADDR_BYTES(mac_addr)); + return 0; + } +@@ -224,7 +224,7 @@ int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev, + cmd.s_link_status.opcode = OTX_EP_MBOX_CMD_GET_LINK_STATUS; + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp); + if (ret) { +- otx_ep_err("Get link status failed\n"); ++ otx_ep_err("Get link status failed"); + return -EINVAL; + } + *oper_up = rsp.s_link_status.status; +@@ -242,7 +242,7 @@ int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev, + ret = otx_ep_mbox_bulk_read(otx_ep, OTX_EP_MBOX_CMD_GET_LINK_INFO, + (uint8_t *)&link_info, (int32_t *)&size); + if (ret) { +- otx_ep_err("Get link info failed\n"); ++ otx_ep_err("Get link info failed"); + return ret; + } + link->link_status = RTE_ETH_LINK_UP; +@@ -310,12 +310,12 @@ int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev) + * during initialization of PMD driver. + */ + if (ret == OTX_EP_MBOX_CMD_STATUS_NACK || rsp.s_version.version == 0) { +- otx_ep_dbg("VF Mbox version fallback to base version from:%u\n", ++ otx_ep_dbg("VF Mbox version fallback to base version from:%u", + (uint32_t)cmd.s_version.version); + return 0; + } + otx_ep->mbox_neg_ver = (uint32_t)rsp.s_version.version; +- otx_ep_dbg("VF Mbox version:%u Negotiated VF version with PF:%u\n", ++ otx_ep_dbg("VF Mbox version:%u Negotiated VF version with PF:%u", + (uint32_t)cmd.s_version.version, + (uint32_t)rsp.s_version.version); + return 0; +diff --git a/dpdk/drivers/net/octeon_ep/otx_ep_rxtx.c b/dpdk/drivers/net/octeon_ep/otx_ep_rxtx.c +index c421ef0a1c..65a1f304e8 100644 +--- a/dpdk/drivers/net/octeon_ep/otx_ep_rxtx.c ++++ b/dpdk/drivers/net/octeon_ep/otx_ep_rxtx.c +@@ -22,19 +22,19 @@ otx_ep_dmazone_free(const struct rte_memzone *mz) + int ret = 0; - /** Sequential physical port number, only valid for CoreNIC firmware */ -@@ -177,6 +181,9 @@ struct nfp_net_hw { - struct nfp_net_tlv_caps tlv_caps; + if (mz == NULL) { +- otx_ep_err("Memzone: NULL\n"); ++ otx_ep_err("Memzone: NULL"); + return; + } - struct nfp_net_ipsec_data *ipsec_data; -+ -+ /** Used for firmware version */ -+ char fw_version[FW_VER_LEN]; - }; + mz_tmp = rte_memzone_lookup(mz->name); + if (mz_tmp == NULL) { +- otx_ep_err("Memzone %s Not Found\n", mz->name); ++ otx_ep_err("Memzone %s Not Found", mz->name); + return; + } - static inline uint32_t -@@ -272,6 +279,9 @@ int nfp_net_flow_ctrl_get(struct rte_eth_dev *dev, - struct rte_eth_fc_conf *fc_conf); - int nfp_net_flow_ctrl_set(struct rte_eth_dev *dev, - struct rte_eth_fc_conf *fc_conf); -+void nfp_pf_uninit(struct nfp_pf_dev *pf_dev); -+uint32_t nfp_net_get_port_num(struct nfp_pf_dev *pf_dev, -+ struct nfp_eth_table *nfp_eth_table); + ret = rte_memzone_free(mz); + if (ret) +- otx_ep_err("Memzone free failed : ret = %d\n", ret); ++ otx_ep_err("Memzone free failed : ret = %d", ret); + } - #define NFP_PRIV_TO_APP_FW_NIC(app_fw_priv)\ - ((struct nfp_app_fw_nic *)app_fw_priv) -diff --git a/dpdk/drivers/net/nfp/nfp_rxtx.c b/dpdk/drivers/net/nfp/nfp_rxtx.c -index f21e120a43..8ca651ba55 100644 ---- a/dpdk/drivers/net/nfp/nfp_rxtx.c -+++ b/dpdk/drivers/net/nfp/nfp_rxtx.c -@@ -747,15 +747,6 @@ nfp_net_recv_pkts(void *rx_queue, - /* Checking the checksum flag */ - nfp_net_rx_cksum(rxq, rxds, mb); + /* Free IQ resources */ +@@ -46,7 +46,7 @@ otx_ep_delete_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no) -- if (meta.port_id == 0) { -- rx_pkts[avail++] = mb; -- } else if (nfp_flower_pf_dispatch_pkts(hw, mb, meta.port_id)) { -- avail_multiplexed++; -- } else { -- rte_pktmbuf_free(mb); -- break; -- } -- - /* Now resetting and updating the descriptor */ - rxds->vals[0] = 0; - rxds->vals[1] = 0; -@@ -768,6 +759,15 @@ nfp_net_recv_pkts(void *rx_queue, - rxq->rd_p++; - if (unlikely(rxq->rd_p == rxq->rx_count)) /* Wrapping */ - rxq->rd_p = 0; -+ -+ if (meta.port_id == 0) { -+ rx_pkts[avail++] = mb; -+ } else if (nfp_flower_pf_dispatch_pkts(hw, mb, meta.port_id)) { -+ avail_multiplexed++; -+ } else { -+ rte_pktmbuf_free(mb); -+ break; -+ } + iq = otx_ep->instr_queue[iq_no]; + if (iq == NULL) { +- otx_ep_err("Invalid IQ[%d]\n", iq_no); ++ otx_ep_err("Invalid IQ[%d]", iq_no); + return -EINVAL; } - if (nb_hold == 0) -diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c b/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c -index a6fd89b6c8..ef1ffd6d01 100644 ---- a/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c -+++ b/dpdk/drivers/net/nfp/nfpcore/nfp6000_pcie.c -@@ -263,19 +263,6 @@ nfp_bitsize_calc(uint64_t mask) - return bit_size; - } +@@ -68,7 +68,7 @@ otx_ep_delete_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no) --static int --nfp_cmp_bars(const void *ptr_a, -- const void *ptr_b) --{ -- const struct nfp_bar *a = ptr_a; -- const struct nfp_bar *b = ptr_b; -- -- if (a->bitsize == b->bitsize) -- return a->index - b->index; -- else -- return a->bitsize - b->bitsize; --} -- - static bool - nfp_bars_for_secondary(uint32_t index) - { -@@ -383,9 +370,6 @@ nfp_enable_bars(struct nfp_pcie_user *nfp) - if (nfp_bar_write(nfp, bar, barcfg_msix_general) < 0) - return -EIO; + otx_ep->nb_tx_queues--; + +- otx_ep_info("IQ[%d] is deleted\n", iq_no); ++ otx_ep_info("IQ[%d] is deleted", iq_no); -- /* Sort bars by bit size - use the smallest possible first. */ -- qsort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]), nfp_cmp_bars); -- return 0; } +@@ -94,7 +94,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs, + OTX_EP_PCI_RING_ALIGN, + socket_id); + if (iq->iq_mz == NULL) { +- otx_ep_err("IQ[%d] memzone alloc failed\n", iq_no); ++ otx_ep_err("IQ[%d] memzone alloc failed", iq_no); + goto iq_init_fail; + } -@@ -466,16 +450,18 @@ find_matching_bar(struct nfp_pcie_user *nfp, - int width) - { - uint32_t n; -+ uint32_t index; +@@ -102,7 +102,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs, + iq->base_addr = (uint8_t *)iq->iq_mz->addr; -- for (n = 0; n < nfp->bars; n++) { -- struct nfp_bar *bar = &nfp->bar[n]; -+ for (n = RTE_DIM(nfp->bar) ; n > 0; n--) { -+ index = n - 1; -+ struct nfp_bar *bar = &nfp->bar[index]; + if (num_descs & (num_descs - 1)) { +- otx_ep_err("IQ[%d] descs not in power of 2\n", iq_no); ++ otx_ep_err("IQ[%d] descs not in power of 2", iq_no); + goto iq_init_fail; + } - if (bar->lock) - continue; +@@ -117,7 +117,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs, + RTE_CACHE_LINE_SIZE, + rte_socket_id()); + if (iq->req_list == NULL) { +- otx_ep_err("IQ[%d] req_list alloc failed\n", iq_no); ++ otx_ep_err("IQ[%d] req_list alloc failed", iq_no); + goto iq_init_fail; + } - if (matching_bar_exist(bar, target, action, token, - offset, size, width)) -- return n; -+ return index; +@@ -125,7 +125,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs, + sg = rte_zmalloc_socket("sg_entry", (OTX_EP_MAX_SG_LISTS * OTX_EP_SG_ENTRY_SIZE), + OTX_EP_SG_ALIGN, rte_socket_id()); + if (sg == NULL) { +- otx_ep_err("IQ[%d] sg_entries alloc failed\n", iq_no); ++ otx_ep_err("IQ[%d] sg_entries alloc failed", iq_no); + goto iq_init_fail; + } + +@@ -133,14 +133,14 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs, + iq->req_list[i].finfo.g.sg = sg; } - return -1; -@@ -493,10 +479,12 @@ find_unused_bar_noblock(struct nfp_pcie_user *nfp, - { - int ret; - uint32_t n; -+ uint32_t index; - const struct nfp_bar *bar; +- otx_ep_info("IQ[%d]: base: %p basedma: %lx count: %d\n", ++ otx_ep_info("IQ[%d]: base: %p basedma: %lx count: %d", + iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma, + iq->nb_desc); -- for (n = 0; n < nfp->bars; n++) { -- bar = &nfp->bar[n]; -+ for (n = RTE_DIM(nfp->bar); n > 0; n--) { -+ index = n - 1; -+ bar = &nfp->bar[index]; + iq->mbuf_list = rte_zmalloc_socket("mbuf_list", (iq->nb_desc * sizeof(struct rte_mbuf *)), + RTE_CACHE_LINE_SIZE, rte_socket_id()); + if (!iq->mbuf_list) { +- otx_ep_err("IQ[%d] mbuf_list alloc failed\n", iq_no); ++ otx_ep_err("IQ[%d] mbuf_list alloc failed", iq_no); + goto iq_init_fail; + } - if (bar->bitsize == 0) - continue; -@@ -508,7 +496,7 @@ find_unused_bar_noblock(struct nfp_pcie_user *nfp, - continue; +@@ -185,12 +185,12 @@ otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no, int num_descs, + otx_ep->instr_queue[iq_no] = iq; - if (!bar->lock) -- return n; -+ return index; + if (otx_ep_init_instr_queue(otx_ep, iq_no, num_descs, socket_id)) { +- otx_ep_err("IQ init is failed\n"); ++ otx_ep_err("IQ init is failed"); + goto delete_IQ; } + otx_ep->nb_tx_queues++; - return -EAGAIN; -@@ -561,7 +549,7 @@ nfp_disable_bars(struct nfp_pcie_user *nfp) - uint32_t i; - struct nfp_bar *bar; +- otx_ep_info("IQ[%d] is created.\n", iq_no); ++ otx_ep_info("IQ[%d] is created.", iq_no); -- for (i = 0; i < nfp->bars; i++) { -+ for (i = 0; i < RTE_DIM(nfp->bar); i++) { - bar = &nfp->bar[i]; - if (bar->iomem != NULL) { - bar->iomem = NULL; -diff --git a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c -index 3c10c7a090..edb78dfdc9 100644 ---- a/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c -+++ b/dpdk/drivers/net/nfp/nfpcore/nfp_mutex.c -@@ -168,7 +168,7 @@ nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, - if (tmp != key) - return NULL; + return 0; -- mutex = calloc(sizeof(*mutex), 1); -+ mutex = calloc(1, sizeof(*mutex)); - if (mutex == NULL) - return NULL; +@@ -233,7 +233,7 @@ otx_ep_delete_oqs(struct otx_ep_device *otx_ep, uint32_t oq_no) -diff --git a/dpdk/drivers/net/ngbe/base/ngbe_devids.h b/dpdk/drivers/net/ngbe/base/ngbe_devids.h -index 83eedf423e..e1efa62015 100644 ---- a/dpdk/drivers/net/ngbe/base/ngbe_devids.h -+++ b/dpdk/drivers/net/ngbe/base/ngbe_devids.h -@@ -83,6 +83,7 @@ - #define NGBE_YT8521S_SFP_GPIO 0x0062 - #define NGBE_INTERNAL_YT8521S_SFP_GPIO 0x0064 - #define NGBE_LY_YT8521S_SFP 0x0070 -+#define NGBE_RGMII_FPGA 0x0080 - #define NGBE_WOL_SUP 0x4000 - #define NGBE_NCSI_SUP 0x8000 + droq = otx_ep->droq[oq_no]; + if (droq == NULL) { +- otx_ep_err("Invalid droq[%d]\n", oq_no); ++ otx_ep_err("Invalid droq[%d]", oq_no); + return -EINVAL; + } -diff --git a/dpdk/drivers/net/ngbe/base/ngbe_hw.c b/dpdk/drivers/net/ngbe/base/ngbe_hw.c -index 22ccdb0b7d..4dced0d328 100644 ---- a/dpdk/drivers/net/ngbe/base/ngbe_hw.c -+++ b/dpdk/drivers/net/ngbe/base/ngbe_hw.c -@@ -173,6 +173,9 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw) - ngbe_reset_misc_em(hw); - hw->mac.clear_hw_cntrs(hw); +@@ -253,7 +253,7 @@ otx_ep_delete_oqs(struct otx_ep_device *otx_ep, uint32_t oq_no) -+ if (!((hw->sub_device_id & NGBE_OEM_MASK) == NGBE_RGMII_FPGA)) -+ hw->phy.set_phy_power(hw, false); -+ - msec_delay(50); + otx_ep->nb_rx_queues--; - /* Store the permanent mac address */ -diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c -index ea313cd9a5..a374b015fd 100644 ---- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c -+++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.c -@@ -320,6 +320,10 @@ skip_an_fiber: - value |= value_r4; - ngbe_write_phy_reg_mdi(hw, YT_ANA, 0, value); +- otx_ep_info("OQ[%d] is deleted\n", oq_no); ++ otx_ep_info("OQ[%d] is deleted", oq_no); + return 0; + } -+ /* config for yt8531sh-ca */ -+ ngbe_write_phy_reg_ext_yt(hw, YT_SPEC_CONF, 0, -+ YT_SPEC_CONF_8531SH_CA); -+ - /* software reset to make the above configuration - * take effect - */ -diff --git a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h -index ddf992e79a..c45bec7ce7 100644 ---- a/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h -+++ b/dpdk/drivers/net/ngbe/base/ngbe_phy_yt.h -@@ -32,6 +32,8 @@ - #define YT_MISC 0xA006 - #define YT_MISC_FIBER_PRIO MS16(8, 0x1) /* 0 for UTP */ - #define YT_MISC_RESV MS16(0, 0x1) -+#define YT_SPEC_CONF 0xA023 -+#define YT_SPEC_CONF_8531SH_CA 0x4031 +@@ -268,7 +268,7 @@ otx_ep_droq_setup_ring_buffers(struct otx_ep_droq *droq) + for (idx = 0; idx < droq->nb_desc; idx++) { + buf = rte_pktmbuf_alloc(droq->mpool); + if (buf == NULL) { +- otx_ep_err("OQ buffer alloc failed\n"); ++ otx_ep_err("OQ buffer alloc failed"); + droq->stats.rx_alloc_failure++; + return -ENOMEM; + } +@@ -296,7 +296,7 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no, + uint32_t desc_ring_size; + int ret; - /* SDS EXT */ - #define YT_AUTO 0xA5 -diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -index 478da014b2..fb86e7b10d 100644 ---- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c -+++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -@@ -546,7 +546,7 @@ static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) - if (ethdev == NULL) - return 0; +- otx_ep_info("OQ[%d] Init start\n", q_no); ++ otx_ep_info("OQ[%d] Init start", q_no); + + droq = otx_ep->droq[q_no]; + droq->otx_ep_dev = otx_ep; +@@ -316,23 +316,23 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no, + socket_id); + + if (droq->desc_ring_mz == NULL) { +- otx_ep_err("OQ:%d desc_ring allocation failed\n", q_no); ++ otx_ep_err("OQ:%d desc_ring allocation failed", q_no); + goto init_droq_fail; + } + + droq->desc_ring_dma = droq->desc_ring_mz->iova; + droq->desc_ring = (struct otx_ep_droq_desc *)droq->desc_ring_mz->addr; + +- otx_ep_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx\n", ++ otx_ep_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx", + q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma); +- otx_ep_dbg("OQ[%d]: num_desc: %d\n", q_no, droq->nb_desc); ++ otx_ep_dbg("OQ[%d]: num_desc: %d", q_no, droq->nb_desc); + + /* OQ buf_list set up */ + droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list", + (droq->nb_desc * sizeof(struct rte_mbuf *)), + RTE_CACHE_LINE_SIZE, socket_id); + if (droq->recv_buf_list == NULL) { +- otx_ep_err("OQ recv_buf_list alloc failed\n"); ++ otx_ep_err("OQ recv_buf_list alloc failed"); + goto init_droq_fail; + } + +@@ -366,17 +366,17 @@ otx_ep_setup_oqs(struct otx_ep_device *otx_ep, int oq_no, int num_descs, + droq = (struct otx_ep_droq *)rte_zmalloc("otx_ep_OQ", + sizeof(*droq), RTE_CACHE_LINE_SIZE); + if (droq == NULL) { +- otx_ep_err("Droq[%d] Creation Failed\n", oq_no); ++ otx_ep_err("Droq[%d] Creation Failed", oq_no); + return -ENOMEM; + } + otx_ep->droq[oq_no] = droq; -- return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit); -+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ngbe_dev_uninit); - } + if (otx_ep_init_droq(otx_ep, oq_no, num_descs, desc_size, mpool, + socket_id)) { +- otx_ep_err("Droq[%d] Initialization failed\n", oq_no); ++ otx_ep_err("Droq[%d] Initialization failed", oq_no); + goto delete_OQ; + } +- otx_ep_info("OQ[%d] is created.\n", oq_no); ++ otx_ep_info("OQ[%d] is created.", oq_no); - static struct rte_pci_driver rte_ngbe_pmd = { -@@ -1811,7 +1811,9 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) - dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; - dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; - dev_info->min_rx_bufsize = 1024; -- dev_info->max_rx_pktlen = 15872; -+ dev_info->max_rx_pktlen = NGBE_MAX_MTU + NGBE_ETH_OVERHEAD; -+ dev_info->min_mtu = RTE_ETHER_MIN_MTU; -+ dev_info->max_mtu = NGBE_MAX_MTU; - dev_info->max_mac_addrs = hw->mac.num_rar_entries; - dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = pci_dev->max_vfs; -diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.h b/dpdk/drivers/net/ngbe/ngbe_ethdev.h -index 3cde7c8750..9b43d5f20e 100644 ---- a/dpdk/drivers/net/ngbe/ngbe_ethdev.h -+++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.h -@@ -32,6 +32,7 @@ + otx_ep->nb_rx_queues++; - #define NGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ +@@ -401,12 +401,12 @@ otx_ep_iqreq_delete(struct otx_ep_instr_queue *iq, uint32_t idx) + case OTX_EP_REQTYPE_NORESP_GATHER: + /* This will take care of multiple segments also */ + rte_pktmbuf_free(mbuf); +- otx_ep_dbg("IQ buffer freed at idx[%d]\n", idx); ++ otx_ep_dbg("IQ buffer freed at idx[%d]", idx); + break; -+#define NGBE_MAX_MTU 9414 - /* The overhead from MTU to max frame size. */ - #define NGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN) + case OTX_EP_REQTYPE_NONE: + default: +- otx_ep_info("This iqreq mode is not supported:%d\n", reqtype); ++ otx_ep_info("This iqreq mode is not supported:%d", reqtype); + } -diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.c b/dpdk/drivers/net/ngbe/ngbe_rxtx.c -index 8a873b858e..4680ff91f1 100644 ---- a/dpdk/drivers/net/ngbe/ngbe_rxtx.c -+++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.c -@@ -1791,6 +1791,7 @@ ngbe_tx_queue_release(struct ngbe_tx_queue *txq) - if (txq->ops != NULL) { - txq->ops->release_mbufs(txq); - txq->ops->free_swring(txq); -+ rte_memzone_free(txq->mz); + /* Reset the request list at this index */ +@@ -568,7 +568,7 @@ prepare_xmit_gather_list(struct otx_ep_instr_queue *iq, struct rte_mbuf *m, uint + num_sg = (frags + mask) / OTX_EP_NUM_SG_PTRS; + + if (unlikely(pkt_len > OTX_EP_MAX_PKT_SZ && num_sg > OTX_EP_MAX_SG_LISTS)) { +- otx_ep_err("Failed to xmit the pkt, pkt_len is higher or pkt has more segments\n"); ++ otx_ep_err("Failed to xmit the pkt, pkt_len is higher or pkt has more segments"); + goto exit; + } + +@@ -644,16 +644,16 @@ otx_ep_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts) + iqcmd.irh.u64 = rte_bswap64(iqcmd.irh.u64); + + #ifdef OTX_EP_IO_DEBUG +- otx_ep_dbg("After swapping\n"); +- otx_ep_dbg("Word0 [dptr]: 0x%016lx\n", ++ otx_ep_dbg("After swapping"); ++ otx_ep_dbg("Word0 [dptr]: 0x%016lx", + (unsigned long)iqcmd.dptr); +- otx_ep_dbg("Word1 [ihtx]: 0x%016lx\n", (unsigned long)iqcmd.ih); +- otx_ep_dbg("Word2 [pki_ih3]: 0x%016lx\n", ++ otx_ep_dbg("Word1 [ihtx]: 0x%016lx", (unsigned long)iqcmd.ih); ++ otx_ep_dbg("Word2 [pki_ih3]: 0x%016lx", + (unsigned long)iqcmd.pki_ih3); +- otx_ep_dbg("Word3 [rptr]: 0x%016lx\n", ++ otx_ep_dbg("Word3 [rptr]: 0x%016lx", + (unsigned long)iqcmd.rptr); +- otx_ep_dbg("Word4 [irh]: 0x%016lx\n", (unsigned long)iqcmd.irh); +- otx_ep_dbg("Word5 [exhdr[0]]: 0x%016lx\n", ++ otx_ep_dbg("Word4 [irh]: 0x%016lx", (unsigned long)iqcmd.irh); ++ otx_ep_dbg("Word5 [exhdr[0]]: 0x%016lx", + (unsigned long)iqcmd.exhdr[0]); + rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m)); + #endif +@@ -726,7 +726,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep, struct otx_ep_droq *droq, + if (unlikely(!info->length)) { + int retry = OTX_EP_MAX_DELAYED_PKT_RETRIES; + /* otx_ep_dbg("OCTEON DROQ[%d]: read_idx: %d; Data not ready " +- * "yet, Retry; pending=%lu\n", droq->q_no, droq->read_idx, ++ * "yet, Retry; pending=%lu", droq->q_no, droq->read_idx, + * droq->pkts_pending); + */ + droq->stats.pkts_delayed_data++; +@@ -735,7 +735,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep, struct otx_ep_droq *droq, + rte_delay_us_block(50); } - rte_free(txq); + if (!retry && !info->length) { +- otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!\n", ++ otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!", + droq->q_no, droq->read_idx); + /* May be zero length packet; drop it */ + assert(0); +@@ -803,7 +803,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep, struct otx_ep_droq *droq, + + last_buf = mbuf; + } else { +- otx_ep_err("no buf\n"); ++ otx_ep_err("no buf"); + assert(0); + } + +diff --git a/dpdk/drivers/net/octeon_ep/otx_ep_vf.c b/dpdk/drivers/net/octeon_ep/otx_ep_vf.c +index 236b7a874c..7defb0f13d 100644 +--- a/dpdk/drivers/net/octeon_ep/otx_ep_vf.c ++++ b/dpdk/drivers/net/octeon_ep/otx_ep_vf.c +@@ -142,7 +142,7 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no) + iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr + + OTX_EP_R_IN_CNTS(iq_no); + +- otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p\n", ++ otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p", + iq_no, iq->doorbell_reg, iq->inst_cnt_reg); + + loop = OTX_EP_BUSY_LOOP_COUNT; +@@ -220,14 +220,14 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no) } -@@ -1995,6 +1996,7 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, - return -ENOMEM; + if (loop < 0) + return -EIO; +- otx_ep_dbg("OTX_EP_R[%d]_credit:%x\n", oq_no, ++ otx_ep_dbg("OTX_EP_R[%d]_credit:%x", oq_no, + rte_read32(droq->pkts_credit_reg)); + + /* Clear the OQ_OUT_CNTS doorbell */ + reg_val = rte_read32(droq->pkts_sent_reg); + rte_write32((uint32_t)reg_val, droq->pkts_sent_reg); + +- otx_ep_dbg("OTX_EP_R[%d]_sent: %x\n", oq_no, ++ otx_ep_dbg("OTX_EP_R[%d]_sent: %x", oq_no, + rte_read32(droq->pkts_sent_reg)); + + loop = OTX_EP_BUSY_LOOP_COUNT; +@@ -259,7 +259,7 @@ otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no) } -+ txq->mz = tz; - txq->nb_tx_desc = nb_desc; - txq->tx_free_thresh = tx_free_thresh; - txq->pthresh = tx_conf->tx_thresh.pthresh; -@@ -2097,6 +2099,7 @@ ngbe_rx_queue_release(struct ngbe_rx_queue *rxq) - ngbe_rx_queue_release_mbufs(rxq); - rte_free(rxq->sw_ring); - rte_free(rxq->sw_sc_ring); -+ rte_memzone_free(rxq->mz); - rte_free(rxq); + if (loop < 0) { +- otx_ep_err("dbell reset failed\n"); ++ otx_ep_err("dbell reset failed"); + return -EIO; } + +@@ -269,7 +269,7 @@ otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no) + + otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_IN_ENABLE(q_no)); + +- otx_ep_info("IQ[%d] enable done\n", q_no); ++ otx_ep_info("IQ[%d] enable done", q_no); + + return 0; } -@@ -2187,6 +2190,7 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq) - rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); - rxq->rx_tail = 0; - rxq->nb_rx_hold = 0; -+ rte_pktmbuf_free(rxq->pkt_first_seg); - rxq->pkt_first_seg = NULL; - rxq->pkt_last_seg = NULL; - } -@@ -2277,6 +2281,7 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, - return -ENOMEM; +@@ -290,7 +290,7 @@ otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no) + rte_delay_ms(1); + } + if (loop < 0) { +- otx_ep_err("dbell reset failed\n"); ++ otx_ep_err("dbell reset failed"); + return -EIO; } -+ rxq->mz = rz; - /* - * Zero init all the descriptors in the ring. +@@ -299,7 +299,7 @@ otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no) + reg_val |= 0x1ull; + otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_OUT_ENABLE(q_no)); + +- otx_ep_info("OQ[%d] enable done\n", q_no); ++ otx_ep_info("OQ[%d] enable done", q_no); + + return 0; + } +@@ -402,10 +402,10 @@ otx_ep_vf_setup_device(struct otx_ep_device *otx_ep) + if (otx_ep->conf == NULL) { + otx_ep->conf = otx_ep_get_defconf(otx_ep); + if (otx_ep->conf == NULL) { +- otx_ep_err("OTX_EP VF default config not found\n"); ++ otx_ep_err("OTX_EP VF default config not found"); + return -ENOENT; + } +- otx_ep_info("Default config is used\n"); ++ otx_ep_info("Default config is used"); + } + + /* Get IOQs (RPVF] count */ +@@ -414,7 +414,7 @@ otx_ep_vf_setup_device(struct otx_ep_device *otx_ep) + otx_ep->sriov_info.rings_per_vf = ((reg_val >> OTX_EP_R_IN_CTL_RPVF_POS) + & OTX_EP_R_IN_CTL_RPVF_MASK); + +- otx_ep_info("OTX_EP RPVF: %d\n", otx_ep->sriov_info.rings_per_vf); ++ otx_ep_info("OTX_EP RPVF: %d", otx_ep->sriov_info.rings_per_vf); + + otx_ep->fn_list.setup_iq_regs = otx_ep_setup_iq_regs; + otx_ep->fn_list.setup_oq_regs = otx_ep_setup_oq_regs; +diff --git a/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c b/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c +index 5d445dfb49..7aec84a813 100644 +--- a/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c ++++ b/dpdk/drivers/net/octeontx/base/octeontx_pkovf.c +@@ -364,7 +364,7 @@ octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid) + + res = octeontx_pko_dq_close(dq); + if (res < 0) +- octeontx_log_err("closing DQ%d failed\n", dq); ++ octeontx_log_err("closing DQ%d failed", dq); + + dq_cnt++; + dq++; +diff --git a/dpdk/drivers/net/octeontx/octeontx_ethdev.c b/dpdk/drivers/net/octeontx/octeontx_ethdev.c +index 2a8378a33e..5f0cd1bb7f 100644 +--- a/dpdk/drivers/net/octeontx/octeontx_ethdev.c ++++ b/dpdk/drivers/net/octeontx/octeontx_ethdev.c +@@ -1223,7 +1223,7 @@ octeontx_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) + if (dev->data->tx_queues[qid]) { + res = octeontx_dev_tx_queue_stop(dev, qid); + if (res < 0) +- octeontx_log_err("failed stop tx_queue(%d)\n", qid); ++ octeontx_log_err("failed stop tx_queue(%d)", qid); + + rte_free(dev->data->tx_queues[qid]); + } +@@ -1342,7 +1342,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, + + /* Verify queue index */ + if (qidx >= dev->data->nb_rx_queues) { +- octeontx_log_err("QID %d not supported (0 - %d available)\n", ++ octeontx_log_err("QID %d not supported (0 - %d available)", + qidx, (dev->data->nb_rx_queues - 1)); + return -ENOTSUP; + } +diff --git a/dpdk/drivers/net/pcap/pcap_ethdev.c b/dpdk/drivers/net/pcap/pcap_ethdev.c +index bfec085045..728ef85d53 100644 +--- a/dpdk/drivers/net/pcap/pcap_ethdev.c ++++ b/dpdk/drivers/net/pcap/pcap_ethdev.c +@@ -274,7 +274,7 @@ static uint16_t + eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + { + unsigned int i; +- struct pcap_pkthdr header; ++ struct pcap_pkthdr *header; + struct pmd_process_private *pp; + const u_char *packet; + struct rte_mbuf *mbuf; +@@ -294,9 +294,13 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) */ -diff --git a/dpdk/drivers/net/ngbe/ngbe_rxtx.h b/dpdk/drivers/net/ngbe/ngbe_rxtx.h -index 9130f9d0df..2914b9a756 100644 ---- a/dpdk/drivers/net/ngbe/ngbe_rxtx.h -+++ b/dpdk/drivers/net/ngbe/ngbe_rxtx.h -@@ -276,6 +276,7 @@ struct ngbe_rx_queue { - struct rte_mbuf fake_mbuf; - /** hold packets to return to application */ - struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2]; -+ const struct rte_memzone *mz; - }; + for (i = 0; i < nb_pkts; i++) { + /* Get the next PCAP packet */ +- packet = pcap_next(pcap, &header); +- if (unlikely(packet == NULL)) ++ int ret = pcap_next_ex(pcap, &header, &packet); ++ if (ret != 1) { ++ if (ret == PCAP_ERROR) ++ pcap_q->rx_stat.err_pkts++; ++ + break; ++ } - /** -@@ -353,6 +354,7 @@ struct ngbe_tx_queue { - uint8_t tx_deferred_start; /**< not in global dev start */ + mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool); + if (unlikely(mbuf == NULL)) { +@@ -304,33 +308,30 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + break; + } - const struct ngbe_txq_ops *ops; /**< txq ops */ -+ const struct rte_memzone *mz; - }; +- if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) { ++ uint32_t len = header->caplen; ++ if (len <= rte_pktmbuf_tailroom(mbuf)) { + /* pcap packet will fit in the mbuf, can copy it */ +- rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, +- header.caplen); +- mbuf->data_len = (uint16_t)header.caplen; ++ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, len); ++ mbuf->data_len = len; + } else { + /* Try read jumbo frame into multi mbufs. */ + if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool, +- mbuf, +- packet, +- header.caplen) == -1)) { ++ mbuf, packet, len) == -1)) { + pcap_q->rx_stat.err_pkts++; + rte_pktmbuf_free(mbuf); + break; + } + } - struct ngbe_txq_ops { +- mbuf->pkt_len = (uint16_t)header.caplen; +- *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset, +- rte_mbuf_timestamp_t *) = +- (uint64_t)header.ts.tv_sec * 1000000 + +- header.ts.tv_usec; ++ mbuf->pkt_len = len; ++ uint64_t us = (uint64_t)header->ts.tv_sec * US_PER_S + header->ts.tv_usec; ++ ++ *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset, rte_mbuf_timestamp_t *) = us; + mbuf->ol_flags |= timestamp_rx_dynflag; + mbuf->port = pcap_q->port_id; + bufs[num_rx] = mbuf; + num_rx++; +- rx_bytes += header.caplen; ++ rx_bytes += len; + } + pcap_q->rx_stat.pkts += num_rx; + pcap_q->rx_stat.bytes += rx_bytes; +@@ -522,6 +523,12 @@ open_iface_live(const char *iface, pcap_t **pcap) { + return -1; + } + ++ if (pcap_setnonblock(*pcap, 1, errbuf)) { ++ PMD_LOG(ERR, "Couldn't set non-blocking on %s: %s", iface, errbuf); ++ pcap_close(*pcap); ++ return -1; ++ } ++ + return 0; + } + +@@ -1093,11 +1100,11 @@ set_iface_direction(const char *iface, pcap_t *pcap, + { + const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT"; + if (pcap_setdirection(pcap, direction) < 0) { +- PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n", ++ PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s", + iface, direction_str, pcap_geterr(pcap)); + return -1; + } +- PMD_LOG(INFO, "Setting %s pcap direction %s\n", ++ PMD_LOG(INFO, "Setting %s pcap direction %s", + iface, direction_str); + return 0; + } diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c -index 551f3cf193..0073dd7405 100644 +index 551f3cf193..dc04a52639 100644 --- a/dpdk/drivers/net/pfe/pfe_ethdev.c +++ b/dpdk/drivers/net/pfe/pfe_ethdev.c +@@ -161,7 +161,7 @@ pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE); + ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks); + if (ret < 0 && errno != EINTR) +- PFE_PMD_ERR("epoll_wait fails with %d\n", errno); ++ PFE_PMD_ERR("epoll_wait fails with %d", errno); + } + + return work_done; +@@ -338,9 +338,9 @@ pfe_eth_open_cdev(struct pfe_eth_priv_s *priv) + + pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY); + if (pfe_cdev_fd < 0) { +- PFE_PMD_WARN("Unable to open PFE device file (%s).\n", ++ PFE_PMD_WARN("Unable to open PFE device file (%s).", + PFE_CDEV_PATH); +- PFE_PMD_WARN("Link status update will not be available.\n"); ++ PFE_PMD_WARN("Link status update will not be available."); + priv->link_fd = PFE_CDEV_INVALID_FD; + return -1; + } @@ -520,7 +520,8 @@ pfe_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_L3_IPV6_EXT, RTE_PTYPE_L4_TCP, @@ -35189,11 +51203,479 @@ index 551f3cf193..0073dd7405 100644 }; if (dev->rx_pkt_burst == pfe_recv_pkts || +@@ -581,16 +582,16 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) + + ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus); + if (ret != 0) { +- PFE_PMD_ERR("Unable to fetch link status (ioctl)\n"); ++ PFE_PMD_ERR("Unable to fetch link status (ioctl)"); + return -1; + } +- PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n", ++ PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.", + lstatus, priv->id); + } + + if (old.link_status == lstatus) { + /* no change in status */ +- PFE_PMD_DEBUG("No change in link status; Not updating.\n"); ++ PFE_PMD_DEBUG("No change in link status; Not updating."); + return -1; + } + +@@ -601,7 +602,7 @@ pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) + + pfe_eth_atomic_write_link_status(dev, &link); + +- PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, ++ PFE_PMD_INFO("Port (%d) link is %s", dev->data->port_id, + link.link_status ? "up" : "down"); + + return 0; +@@ -991,24 +992,24 @@ pmd_pfe_probe(struct rte_vdev_device *vdev) + + addr = of_get_address(np, 0, &cbus_size, NULL); + if (!addr) { +- PFE_PMD_ERR("of_get_address cannot return qman address\n"); ++ PFE_PMD_ERR("of_get_address cannot return qman address"); + goto err; + } + cbus_addr = of_translate_address(np, addr); + if (!cbus_addr) { +- PFE_PMD_ERR("of_translate_address failed\n"); ++ PFE_PMD_ERR("of_translate_address failed"); + goto err; + } + + addr = of_get_address(np, 1, &ddr_size, NULL); + if (!addr) { +- PFE_PMD_ERR("of_get_address cannot return qman address\n"); ++ PFE_PMD_ERR("of_get_address cannot return qman address"); + goto err; + } + + g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr); + if (!g_pfe->ddr_phys_baseaddr) { +- PFE_PMD_ERR("of_translate_address failed\n"); ++ PFE_PMD_ERR("of_translate_address failed"); + goto err; + } + +diff --git a/dpdk/drivers/net/pfe/pfe_hif.c b/dpdk/drivers/net/pfe/pfe_hif.c +index e2b23bbeb7..abb9cde996 100644 +--- a/dpdk/drivers/net/pfe/pfe_hif.c ++++ b/dpdk/drivers/net/pfe/pfe_hif.c +@@ -309,7 +309,7 @@ client_put_rxpacket(struct hif_rx_queue *queue, + if (readl(&desc->ctrl) & CL_DESC_OWN) { + mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool)); + if (unlikely(!mbuf)) { +- PFE_PMD_WARN("Buffer allocation failure\n"); ++ PFE_PMD_WARN("Buffer allocation failure"); + return NULL; + } + +@@ -770,9 +770,9 @@ pfe_hif_rx_idle(struct pfe_hif *hif) + } while (--hif_stop_loop); + + if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV) +- PFE_PMD_ERR("Failed\n"); ++ PFE_PMD_ERR("Failed"); + else +- PFE_PMD_INFO("Done\n"); ++ PFE_PMD_INFO("Done"); + } + #endif + +@@ -806,7 +806,7 @@ pfe_hif_init(struct pfe *pfe) + + pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR); + if (pfe_cdev_fd < 0) { +- PFE_PMD_WARN("Unable to open PFE device file (%s).\n", ++ PFE_PMD_WARN("Unable to open PFE device file (%s).", + PFE_CDEV_PATH); + pfe->cdev_fd = PFE_CDEV_INVALID_FD; + return -1; +@@ -817,7 +817,7 @@ pfe_hif_init(struct pfe *pfe) + /* hif interrupt enable */ + err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd); + if (err) { +- PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n", ++ PFE_PMD_ERR("ioctl failed for intr enable err: %d", + errno); + goto err0; + } +@@ -826,7 +826,7 @@ pfe_hif_init(struct pfe *pfe) + epoll_ev.data.fd = event_fd; + err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev); + if (err < 0) { +- PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno); ++ PFE_PMD_ERR("epoll_ctl failed with err = %d", errno); + goto err0; + } + pfe->hif.epoll_fd = epoll_fd; +diff --git a/dpdk/drivers/net/pfe/pfe_hif_lib.c b/dpdk/drivers/net/pfe/pfe_hif_lib.c +index 6fe6d33d23..541ba365c6 100644 +--- a/dpdk/drivers/net/pfe/pfe_hif_lib.c ++++ b/dpdk/drivers/net/pfe/pfe_hif_lib.c +@@ -157,7 +157,7 @@ hif_lib_client_init_rx_buffers(struct hif_client_s *client, + queue->queue_id = 0; + queue->port_id = client->port_id; + queue->priv = client->priv; +- PFE_PMD_DEBUG("rx queue: %d, base: %p, size: %d\n", qno, ++ PFE_PMD_DEBUG("rx queue: %d, base: %p, size: %d", qno, + queue->base, queue->size); + } + +diff --git a/dpdk/drivers/net/qede/qede_rxtx.c b/dpdk/drivers/net/qede/qede_rxtx.c +index c35585f5fd..dcc8cbe943 100644 +--- a/dpdk/drivers/net/qede/qede_rxtx.c ++++ b/dpdk/drivers/net/qede/qede_rxtx.c +@@ -887,7 +887,7 @@ qede_free_tx_pkt(struct qede_tx_queue *txq) + mbuf = txq->sw_tx_ring[idx]; + if (mbuf) { + nb_segs = mbuf->nb_segs; +- PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs); ++ PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u", nb_segs); + while (nb_segs) { + /* It's like consuming rxbuf in recv() */ + ecore_chain_consume(&txq->tx_pbl); +@@ -897,7 +897,7 @@ qede_free_tx_pkt(struct qede_tx_queue *txq) + rte_pktmbuf_free(mbuf); + txq->sw_tx_ring[idx] = NULL; + txq->sw_tx_cons++; +- PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n"); ++ PMD_TX_LOG(DEBUG, txq, "Freed tx packet"); + } else { + ecore_chain_consume(&txq->tx_pbl); + txq->nb_tx_avail++; +@@ -919,7 +919,7 @@ qede_process_tx_compl(__rte_unused struct ecore_dev *edev, + + #ifdef RTE_LIBRTE_QEDE_DEBUG_TX + sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl); +- PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n", ++ PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u", + abs(hw_bd_cons - sw_tx_cons)); + #endif + while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) +@@ -1353,7 +1353,7 @@ qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev, + tpa_info->tpa_tail = curr_frag; + qede_rx_bd_ring_consume(rxq); + if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) { +- PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n"); ++ PMD_RX_LOG(ERR, rxq, "mbuf allocation fails"); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + rxq->rx_alloc_errors++; + } +@@ -1365,7 +1365,7 @@ qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_cont_cqe *cqe) + { +- PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n", ++ PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]", + cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0])); + /* only len_list[0] will have value */ + qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index, +@@ -1388,7 +1388,7 @@ qede_rx_process_tpa_end_cqe(struct qede_dev *qdev, + rx_mb->pkt_len = cqe->total_packet_len; + + PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d" +- " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason, ++ " pkt_len %d", cqe->tpa_agg_index, cqe->end_reason, + rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs, + rx_mb->pkt_len); + } +@@ -1471,7 +1471,7 @@ qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb, + pkt_len; + if (unlikely(!cur_size)) { + PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs" +- " left for mapping jumbo\n", num_segs); ++ " left for mapping jumbo", num_segs); + qede_recycle_rx_bd_ring(rxq, qdev, num_segs); + return -EINVAL; + } +@@ -1497,7 +1497,7 @@ print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq, + PMD_RX_LOG(INFO, rxq, + "len 0x%04x bf 0x%04x hash_val 0x%x" + " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s" +- " inner_l2=%s inner_l3=%s inner_l4=%s\n", ++ " inner_l2=%s inner_l3=%s inner_l4=%s", + m->data_len, bitfield, m->hash.rss, + (unsigned long)m->ol_flags, + rte_get_ptype_l2_name(m->packet_type), +@@ -1548,7 +1548,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + PMD_RX_LOG(ERR, rxq, + "New buffers allocation failed," +- "dropping incoming packets\n"); ++ "dropping incoming packets"); + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed += count; + rxq->rx_alloc_errors += count; +@@ -1579,13 +1579,13 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + cqe = + (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; +- PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type); ++ PMD_RX_LOG(INFO, rxq, "Rx CQE type %d", cqe_type); + + if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) { + fp_cqe = &cqe->fast_path_regular; + } else { + if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { +- PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n"); ++ PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE"); + ecore_eth_cqe_completion + (&edev->hwfns[rxq->queue_id % + edev->num_hwfns], +@@ -1611,10 +1611,10 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + #endif + + if (unlikely(qede_tunn_exist(parse_flag))) { +- PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n"); ++ PMD_RX_LOG(INFO, rxq, "Rx tunneled packet"); + if (unlikely(qede_check_tunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, +- "L4 csum failed, flags = 0x%x\n", ++ "L4 csum failed, flags = 0x%x", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; +@@ -1624,7 +1624,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + if (unlikely(qede_check_tunn_csum_l3(parse_flag))) { + PMD_RX_LOG(ERR, rxq, +- "Outer L3 csum failed, flags = 0x%x\n", ++ "Outer L3 csum failed, flags = 0x%x", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; +@@ -1659,7 +1659,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + */ + if (unlikely(qede_check_notunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, +- "L4 csum failed, flags = 0x%x\n", ++ "L4 csum failed, flags = 0x%x", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; +@@ -1667,7 +1667,7 @@ qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + } + if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) { +- PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n", ++ PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; +@@ -1776,7 +1776,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + PMD_RX_LOG(ERR, rxq, + "New buffers allocation failed," +- "dropping incoming packets\n"); ++ "dropping incoming packets"); + dev = &rte_eth_devices[rxq->port_id]; + dev->data->rx_mbuf_alloc_failed += count; + rxq->rx_alloc_errors += count; +@@ -1805,7 +1805,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + cqe = + (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; +- PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type); ++ PMD_RX_LOG(INFO, rxq, "Rx CQE type %d", cqe_type); + + switch (cqe_type) { + case ETH_RX_CQE_TYPE_REGULAR: +@@ -1823,7 +1823,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + */ + PMD_RX_LOG(INFO, rxq, + "TPA start[%d] - len_on_first_bd %d header %d" +- " [bd_list[0] %d], [seg_len %d]\n", ++ " [bd_list[0] %d], [seg_len %d]", + cqe_start_tpa->tpa_agg_index, + rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd), + cqe_start_tpa->header_len, +@@ -1843,7 +1843,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head; + goto tpa_end; + case ETH_RX_CQE_TYPE_SLOW_PATH: +- PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n"); ++ PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE"); + ecore_eth_cqe_completion( + &edev->hwfns[rxq->queue_id % edev->num_hwfns], + (struct eth_slow_path_rx_cqe *)cqe); +@@ -1881,10 +1881,10 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash); + } + if (qede_tunn_exist(parse_flag)) { +- PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n"); ++ PMD_RX_LOG(INFO, rxq, "Rx tunneled packet"); + if (unlikely(qede_check_tunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, +- "L4 csum failed, flags = 0x%x\n", ++ "L4 csum failed, flags = 0x%x", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; +@@ -1894,7 +1894,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + + if (unlikely(qede_check_tunn_csum_l3(parse_flag))) { + PMD_RX_LOG(ERR, rxq, +- "Outer L3 csum failed, flags = 0x%x\n", ++ "Outer L3 csum failed, flags = 0x%x", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD; +@@ -1933,7 +1933,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + */ + if (unlikely(qede_check_notunn_csum_l4(parse_flag))) { + PMD_RX_LOG(ERR, rxq, +- "L4 csum failed, flags = 0x%x\n", ++ "L4 csum failed, flags = 0x%x", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; +@@ -1941,7 +1941,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; + } + if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) { +- PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n", ++ PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x", + parse_flag); + rxq->rx_hw_errors++; + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; +@@ -2117,13 +2117,13 @@ print_tx_bd_info(struct qede_tx_queue *txq, + rte_cpu_to_le_16(bd1->data.bitfields)); + if (bd2) + PMD_TX_LOG(INFO, txq, +- "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n", ++ "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x", + rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1, + bd2->data.bitfields2, bd2->data.tunn_ip_size); + if (bd3) + PMD_TX_LOG(INFO, txq, + "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x " +- "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n", ++ "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x", + rte_cpu_to_le_16(bd3->nbytes), + rte_cpu_to_le_16(bd3->data.bitfields), + rte_cpu_to_le_16(bd3->data.lso_mss), +@@ -2131,7 +2131,7 @@ print_tx_bd_info(struct qede_tx_queue *txq, + bd3->data.tunn_hdr_size_w); + + rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf)); +- PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf); ++ PMD_TX_LOG(INFO, txq, "TX offloads = %s", ol_buf); + } + #endif + +@@ -2201,7 +2201,7 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts, + + #ifdef RTE_LIBRTE_QEDE_DEBUG_TX + if (unlikely(i != nb_pkts)) +- PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n", ++ PMD_TX_LOG(ERR, txq, "TX prepare failed for %u", + nb_pkts - i); + #endif + return i; +@@ -2215,16 +2215,16 @@ qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf, + struct qede_tx_queue *txq) + { + if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff) +- PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n"); ++ PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow"); + if (((mbuf->outer_l2_len + mbuf->outer_l3_len + + MPLSINUDP_HDR_SIZE) / 2) > 0xff) +- PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n"); ++ PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow"); + if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) > + ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) +- PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n"); ++ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow"); + if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) > + ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) +- PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n"); ++ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow"); + } + #endif + +diff --git a/dpdk/drivers/net/sfc/sfc_flow_rss.c b/dpdk/drivers/net/sfc/sfc_flow_rss.c +index e28c943335..8e2749833b 100644 +--- a/dpdk/drivers/net/sfc/sfc_flow_rss.c ++++ b/dpdk/drivers/net/sfc/sfc_flow_rss.c +@@ -303,9 +303,9 @@ sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx) + + TAILQ_REMOVE(&flow_rss->ctx_list, ctx, entries); + rte_free(ctx->qid_offsets); +- rte_free(ctx); +- + sfc_dbg(sa, "flow-rss: deleted ctx=%p", ctx); ++ ++ rte_free(ctx); + } + + static int diff --git a/dpdk/drivers/net/sfc/sfc_mae.c b/dpdk/drivers/net/sfc/sfc_mae.c -index e5ec0ae49d..60ff6d2181 100644 +index e5ec0ae49d..8f74f10390 100644 --- a/dpdk/drivers/net/sfc/sfc_mae.c +++ b/dpdk/drivers/net/sfc/sfc_mae.c -@@ -1350,8 +1350,8 @@ sfc_mae_action_set_list_add(struct sfc_adapter *sa, +@@ -400,9 +400,8 @@ sfc_mae_outer_rule_del(struct sfc_adapter *sa, + efx_mae_match_spec_fini(sa->nic, rule->match_spec); + + TAILQ_REMOVE(&mae->outer_rules, rule, entries); +- rte_free(rule); +- + sfc_dbg(sa, "deleted outer_rule=%p", rule); ++ rte_free(rule); + } + + static int +@@ -585,9 +584,8 @@ sfc_mae_mac_addr_del(struct sfc_adapter *sa, struct sfc_mae_mac_addr *mac_addr) + } + + TAILQ_REMOVE(&mae->mac_addrs, mac_addr, entries); +- rte_free(mac_addr); +- + sfc_dbg(sa, "deleted mac_addr=%p", mac_addr); ++ rte_free(mac_addr); + } + + enum sfc_mae_mac_addr_type { +@@ -785,10 +783,10 @@ sfc_mae_encap_header_del(struct sfc_adapter *sa, + } + + TAILQ_REMOVE(&mae->encap_headers, encap_header, entries); ++ sfc_dbg(sa, "deleted encap_header=%p", encap_header); ++ + rte_free(encap_header->buf); + rte_free(encap_header); +- +- sfc_dbg(sa, "deleted encap_header=%p", encap_header); + } + + static int +@@ -983,9 +981,8 @@ sfc_mae_counter_del(struct sfc_adapter *sa, struct sfc_mae_counter *counter) + } + + TAILQ_REMOVE(&mae->counters, counter, entries); +- rte_free(counter); +- + sfc_dbg(sa, "deleted counter=%p", counter); ++ rte_free(counter); + } + + static int +@@ -1165,9 +1162,8 @@ sfc_mae_action_set_del(struct sfc_adapter *sa, + sfc_mae_mac_addr_del(sa, action_set->src_mac_addr); + sfc_mae_counter_del(sa, action_set->counter); + TAILQ_REMOVE(&mae->action_sets, action_set, entries); +- rte_free(action_set); +- + sfc_dbg(sa, "deleted action_set=%p", action_set); ++ rte_free(action_set); + } + + static int +@@ -1350,8 +1346,8 @@ sfc_mae_action_set_list_add(struct sfc_adapter *sa, action_set_list->action_sets = rte_calloc("sfc_mae_action_set_list_action_sets", @@ -35204,6 +51686,30 @@ index e5ec0ae49d..60ff6d2181 100644 if (action_set_list->action_sets == NULL) { sfc_err(sa, "failed to allocate action set list"); rte_free(action_set_list); +@@ -1401,10 +1397,10 @@ sfc_mae_action_set_list_del(struct sfc_adapter *sa, + sfc_mae_action_set_del(sa, action_set_list->action_sets[i]); + + TAILQ_REMOVE(&mae->action_set_lists, action_set_list, entries); ++ sfc_dbg(sa, "deleted action_set_list=%p", action_set_list); ++ + rte_free(action_set_list->action_sets); + rte_free(action_set_list); +- +- sfc_dbg(sa, "deleted action_set_list=%p", action_set_list); + } + + static int +@@ -1667,9 +1663,8 @@ sfc_mae_action_rule_del(struct sfc_adapter *sa, + sfc_mae_outer_rule_del(sa, rule->outer_rule); + + TAILQ_REMOVE(&mae->action_rules, rule, entries); +- rte_free(rule); +- + sfc_dbg(sa, "deleted action_rule=%p", rule); ++ rte_free(rule); + } + + static int diff --git a/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c b/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c index 085523fe03..95e705c553 100644 --- a/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c @@ -35225,7 +51731,7 @@ index 085523fe03..95e705c553 100644 obj_file, lib_file, diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c -index b41fa971cb..3fa03cdbee 100644 +index b41fa971cb..93bba3cec1 100644 --- a/dpdk/drivers/net/tap/rte_eth_tap.c +++ b/dpdk/drivers/net/tap/rte_eth_tap.c @@ -1803,6 +1803,7 @@ tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) @@ -35236,6 +51742,20 @@ index b41fa971cb..3fa03cdbee 100644 }; return ptypes; +@@ -2392,9 +2393,10 @@ tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer) + /* Fill file descriptors for all queues */ + reply.num_fds = 0; + reply_param->rxq_count = 0; +- if (dev->data->nb_rx_queues + dev->data->nb_tx_queues > +- RTE_MP_MAX_FD_NUM){ +- TAP_LOG(ERR, "Number of rx/tx queues exceeds max number of fds"); ++ ++ if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) { ++ TAP_LOG(ERR, "Number of rx/tx queues %u exceeds max number of fds %u", ++ dev->data->nb_rx_queues, RTE_PMD_TAP_MAX_QUEUES); + return -1; + } + diff --git a/dpdk/drivers/net/tap/tap_flow.c b/dpdk/drivers/net/tap/tap_flow.c index ed4d42f92f..79cd6a12ca 100644 --- a/dpdk/drivers/net/tap/tap_flow.c @@ -35351,6 +51871,20 @@ index ed4d42f92f..79cd6a12ca 100644 return 0; if (set) { struct rte_flow *remote_flow; +diff --git a/dpdk/drivers/net/tap/tap_netlink.c b/dpdk/drivers/net/tap/tap_netlink.c +index 75af3404b0..c1f7ff56da 100644 +--- a/dpdk/drivers/net/tap/tap_netlink.c ++++ b/dpdk/drivers/net/tap/tap_netlink.c +@@ -301,7 +301,8 @@ tap_nlattr_add(struct nlmsghdr *nh, unsigned short type, + rta = (struct rtattr *)NLMSG_TAIL(nh); + rta->rta_len = RTA_LENGTH(data_len); + rta->rta_type = type; +- memcpy(RTA_DATA(rta), data, data_len); ++ if (data_len > 0) ++ memcpy(RTA_DATA(rta), data, data_len); + nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + } + diff --git a/dpdk/drivers/net/thunderx/base/nicvf_mbox.c b/dpdk/drivers/net/thunderx/base/nicvf_mbox.c index 5993eec4e6..0e0176974d 100644 --- a/dpdk/drivers/net/thunderx/base/nicvf_mbox.c @@ -35413,7 +51947,7 @@ index 322c8159cb..47f3d13755 100644 #endif /* __THUNDERX_NICVF_MBOX__ */ diff --git a/dpdk/drivers/net/thunderx/nicvf_ethdev.c b/dpdk/drivers/net/thunderx/nicvf_ethdev.c -index a504d41dfe..ba2ef4058e 100644 +index a504d41dfe..ee563c55ce 100644 --- a/dpdk/drivers/net/thunderx/nicvf_ethdev.c +++ b/dpdk/drivers/net/thunderx/nicvf_ethdev.c @@ -58,6 +58,10 @@ RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_driver, driver, NOTICE); @@ -35442,6 +51976,15 @@ index a504d41dfe..ba2ef4058e 100644 }; static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; +@@ -1811,7 +1817,7 @@ nicvf_dev_start(struct rte_eth_dev *dev) + /* Apply new link configurations if changed */ + ret = nicvf_apply_link_speed(dev); + if (ret) { +- PMD_INIT_LOG(ERR, "Failed to set link configuration\n"); ++ PMD_INIT_LOG(ERR, "Failed to set link configuration"); + return ret; + } + @@ -2183,9 +2189,22 @@ nicvf_eth_dev_uninit(struct rte_eth_dev *dev) nicvf_dev_close(dev); return 0; @@ -35592,6 +52135,18 @@ index 7031589f7c..4bf9da2d4c 100644 s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); +diff --git a/dpdk/drivers/net/txgbe/base/txgbe_mng.c b/dpdk/drivers/net/txgbe/base/txgbe_mng.c +index 029a0a1fe1..9770c88bc8 100644 +--- a/dpdk/drivers/net/txgbe/base/txgbe_mng.c ++++ b/dpdk/drivers/net/txgbe/base/txgbe_mng.c +@@ -58,6 +58,7 @@ txgbe_hic_unlocked(struct txgbe_hw *hw, u32 *buffer, u32 length, u32 timeout) + + dword_len = length >> 2; + ++ txgbe_flush(hw); + /* The device driver writes the relevant command block + * into the ram area. + */ diff --git a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h b/dpdk/drivers/net/txgbe/base/txgbe_osdep.h index b62c0b0824..0d9492c3cb 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_osdep.h @@ -35605,7 +52160,7 @@ index b62c0b0824..0d9492c3cb 100644 #include "../txgbe_logs.h" diff --git a/dpdk/drivers/net/txgbe/base/txgbe_regs.h b/dpdk/drivers/net/txgbe/base/txgbe_regs.h -index 79290a7afe..a2984f1106 100644 +index 79290a7afe..db02b1b81b 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_regs.h +++ b/dpdk/drivers/net/txgbe/base/txgbe_regs.h @@ -1022,6 +1022,8 @@ enum txgbe_5tuple_protocol { @@ -35617,6 +52172,15 @@ index 79290a7afe..a2984f1106 100644 /****************************************************************************** * Statistic Registers ******************************************************************************/ +@@ -1195,7 +1197,7 @@ enum txgbe_5tuple_protocol { + #define TXGBE_ICRMISC_ANDONE MS(19, 0x1) /* link auto-nego done */ + #define TXGBE_ICRMISC_ERRIG MS(20, 0x1) /* integrity error */ + #define TXGBE_ICRMISC_SPI MS(21, 0x1) /* SPI interface */ +-#define TXGBE_ICRMISC_VFMBX MS(22, 0x1) /* VF-PF message box */ ++#define TXGBE_ICRMISC_VFMBX MS(23, 0x1) /* VF-PF message box */ + #define TXGBE_ICRMISC_GPIO MS(26, 0x1) /* GPIO interrupt */ + #define TXGBE_ICRMISC_ERRPCI MS(27, 0x1) /* pcie request error */ + #define TXGBE_ICRMISC_HEAT MS(28, 0x1) /* overheat detection */ @@ -1236,6 +1238,9 @@ enum txgbe_5tuple_protocol { #define TXGBE_TCPTMR 0x000170 #define TXGBE_ITRSEL 0x000180 @@ -35640,10 +52204,19 @@ index 75e839b7de..f52736cae9 100644 #define TXGBE_ALIGN 128 /* as intel did */ diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c -index 6bc231a130..ad29c3cfec 100644 +index 6bc231a130..25b657d0ff 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c -@@ -601,6 +601,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -331,6 +331,8 @@ txgbe_pf_reset_hw(struct txgbe_hw *hw) + status = hw->mac.reset_hw(hw); + + ctrl_ext = rd32(hw, TXGBE_PORTCTL); ++ /* let hardware know driver is loaded */ ++ ctrl_ext |= TXGBE_PORTCTL_DRVLOAD; + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= TXGBE_PORTCTL_RSTDONE; + wr32(hw, TXGBE_PORTCTL, ctrl_ext); +@@ -601,6 +603,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; /* Vendor and Device ID need to be set before init of shared code */ @@ -35651,7 +52224,16 @@ index 6bc231a130..ad29c3cfec 100644 hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) { -@@ -734,6 +735,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -611,7 +614,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) + ssid = txgbe_flash_read_dword(hw, 0xFFFDC); + if (ssid == 0x1) { + PMD_INIT_LOG(ERR, +- "Read of internal subsystem device id failed\n"); ++ "Read of internal subsystem device id failed"); + return -ENODEV; + } + hw->subsystem_device_id = (u16)ssid >> 8 | (u16)ssid << 8; +@@ -734,6 +737,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses", RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC); @@ -35660,7 +52242,7 @@ index 6bc231a130..ad29c3cfec 100644 return -ENOMEM; } -@@ -901,6 +904,7 @@ static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) +@@ -901,6 +906,7 @@ static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) if (!fdir_info->hash_map) { PMD_INIT_LOG(ERR, "Failed to allocate memory for fdir hash map!"); @@ -35668,7 +52250,7 @@ index 6bc231a130..ad29c3cfec 100644 return -ENOMEM; } fdir_info->mask_added = FALSE; -@@ -936,6 +940,7 @@ static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) +@@ -936,6 +942,7 @@ static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) if (!l2_tn_info->hash_map) { PMD_INIT_LOG(ERR, "Failed to allocate memory for L2 TN hash map!"); @@ -35676,7 +52258,7 @@ index 6bc231a130..ad29c3cfec 100644 return -ENOMEM; } l2_tn_info->e_tag_en = FALSE; -@@ -963,7 +968,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) +@@ -963,7 +970,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) if (!ethdev) return 0; @@ -35685,7 +52267,7 @@ index 6bc231a130..ad29c3cfec 100644 } static struct rte_pci_driver rte_txgbe_pmd = { -@@ -999,41 +1004,25 @@ txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -999,41 +1006,25 @@ txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } static void @@ -35738,7 +52320,7 @@ index 6bc231a130..ad29c3cfec 100644 } static int -@@ -1258,9 +1247,9 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) +@@ -1258,9 +1249,9 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) @@ -35750,7 +52332,7 @@ index 6bc231a130..ad29c3cfec 100644 } } -@@ -1322,6 +1311,13 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) +@@ -1322,6 +1313,13 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) static int txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { @@ -35764,7 +52346,17 @@ index 6bc231a130..ad29c3cfec 100644 txgbe_config_vlan_strip_on_all_queues(dev, mask); txgbe_vlan_offload_config(dev, mask); -@@ -1716,6 +1712,8 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1555,6 +1553,9 @@ static void txgbe_reinit_gpio_intr(struct txgbe_hw *hw) + wr32(hw, TXGBE_GPIOINTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIORAWINTSTAT); + ++ if (reg & TXGBE_GPIOBIT_0) ++ wr32(hw, TXGBE_GPIOEOI, TXGBE_GPIOBIT_0); ++ + if (reg & TXGBE_GPIOBIT_2) + wr32(hw, TXGBE_GPIOEOI, TXGBE_GPIOBIT_2); + +@@ -1716,6 +1717,8 @@ txgbe_dev_start(struct rte_eth_dev *dev) hw->mac.get_link_status = true; hw->dev_start = true; @@ -35773,7 +52365,7 @@ index 6bc231a130..ad29c3cfec 100644 /* workaround for GPIO intr lost when mng_veto bit is set */ if (txgbe_check_reset_blocked(hw)) txgbe_reinit_gpio_intr(hw); -@@ -1979,6 +1977,8 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1979,6 +1982,8 @@ txgbe_dev_stop(struct rte_eth_dev *dev) adapter->rss_reta_updated = 0; wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK); @@ -35782,8 +52374,13 @@ index 6bc231a130..ad29c3cfec 100644 hw->adapter_stopped = true; dev->data->dev_started = 0; hw->dev_start = false; -@@ -2061,6 +2061,8 @@ txgbe_dev_close(struct rte_eth_dev *dev) +@@ -2059,8 +2064,13 @@ txgbe_dev_close(struct rte_eth_dev *dev) + + ret = txgbe_dev_stop(dev); ++ /* Let firmware take over control of hardware */ ++ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_DRVLOAD, 0); ++ txgbe_dev_free_queues(dev); + txgbe_set_pcie_master(hw, false); @@ -35791,7 +52388,7 @@ index 6bc231a130..ad29c3cfec 100644 /* reprogram the RAR[0] in case user changed it. */ txgbe_set_rar(hw, 0, hw->mac.addr, 0, true); -@@ -2671,7 +2673,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -2671,7 +2681,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; @@ -35802,7 +52399,25 @@ index 6bc231a130..ad29c3cfec 100644 dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; dev_info->max_vfs = pci_dev->max_vfs; -@@ -2876,6 +2880,7 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2752,7 +2764,7 @@ txgbe_dev_detect_sfp(void *param) + PMD_DRV_LOG(INFO, "SFP not present."); + } else if (err == 0) { + hw->mac.setup_sfp(hw); +- PMD_DRV_LOG(INFO, "detected SFP+: %d\n", hw->phy.sfp_type); ++ PMD_DRV_LOG(INFO, "detected SFP+: %d", hw->phy.sfp_type); + txgbe_dev_setup_link_alarm_handler(dev); + txgbe_dev_link_update(dev, 0); + } +@@ -2767,6 +2779,8 @@ txgbe_dev_sfp_event(struct rte_eth_dev *dev) + + wr32(hw, TXGBE_GPIOINTMASK, 0xFF); + reg = rd32(hw, TXGBE_GPIORAWINTSTAT); ++ if (reg & TXGBE_GPIOBIT_0) ++ wr32(hw, TXGBE_GPIOEOI, TXGBE_GPIOBIT_0); + if (reg & TXGBE_GPIOBIT_2) { + wr32(hw, TXGBE_GPIOEOI, TXGBE_GPIOBIT_2); + rte_eal_alarm_set(1000 * 100, txgbe_dev_detect_sfp, dev); +@@ -2876,6 +2890,7 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, bool link_up; int err; int wait = 1; @@ -35810,7 +52425,7 @@ index 6bc231a130..ad29c3cfec 100644 memset(&link, 0, sizeof(link)); link.link_status = RTE_ETH_LINK_DOWN; -@@ -2963,9 +2968,14 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2963,9 +2978,14 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, } /* Re configure MAC RX */ @@ -35826,7 +52441,7 @@ index 6bc231a130..ad29c3cfec 100644 return rte_eth_linkstatus_set(dev, &link); } -@@ -3683,12 +3693,8 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +@@ -3683,12 +3703,8 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; } @@ -35841,7 +52456,7 @@ index 6bc231a130..ad29c3cfec 100644 return 0; } -@@ -3839,13 +3845,13 @@ txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +@@ -3839,13 +3855,13 @@ txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct txgbe_hw *hw = TXGBE_DEV_HW(dev); if (queue_id < 32) { @@ -35861,7 +52476,7 @@ index 6bc231a130..ad29c3cfec 100644 } rte_intr_enable(intr_handle); -@@ -3860,11 +3866,11 @@ txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +@@ -3860,11 +3876,11 @@ txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) if (queue_id < 32) { mask = rd32(hw, TXGBE_IMS(0)); @@ -35875,7 +52490,7 @@ index 6bc231a130..ad29c3cfec 100644 wr32(hw, TXGBE_IMS(1), mask); } -@@ -3898,7 +3904,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, +@@ -3898,7 +3914,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, wr32(hw, TXGBE_IVARMISC, tmp); } else { /* rx or tx causes */ @@ -35884,7 +52499,7 @@ index 6bc231a130..ad29c3cfec 100644 idx = ((16 * (queue & 1)) + (8 * direction)); tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); tmp &= ~(0xFF << idx); -@@ -4004,6 +4010,7 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, +@@ -4004,6 +4020,7 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); uint32_t syn_info; uint32_t synqf; @@ -35892,7 +52507,7 @@ index 6bc231a130..ad29c3cfec 100644 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; -@@ -4013,7 +4020,11 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, +@@ -4013,7 +4030,11 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, if (add) { if (syn_info & TXGBE_SYNCLS_ENA) return -EINVAL; @@ -35905,7 +52520,7 @@ index 6bc231a130..ad29c3cfec 100644 synqf |= TXGBE_SYNCLS_ENA; if (filter->hig_pri) -@@ -4082,7 +4093,10 @@ txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, +@@ -4082,7 +4103,10 @@ txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, wr32(hw, TXGBE_5TFPORT(i), sdpqf); wr32(hw, TXGBE_5TFCTL0(i), ftqf); @@ -35917,7 +52532,7 @@ index 6bc231a130..ad29c3cfec 100644 wr32(hw, TXGBE_5TFCTL1(i), l34timir); } -@@ -4366,7 +4380,17 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, +@@ -4366,7 +4390,17 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, if (add) { etqf = TXGBE_ETFLT_ENA; etqf |= TXGBE_ETFLT_ETID(filter->ether_type); @@ -36040,8 +52655,209 @@ index a198b6781b..f627ab681d 100644 node = txgbe_fdir_filter_lookup(info, &rule->input); if (node) { if (!update) { +diff --git a/dpdk/drivers/net/txgbe/txgbe_ipsec.c b/dpdk/drivers/net/txgbe/txgbe_ipsec.c +index f9f8108fb8..4af49dd802 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_ipsec.c ++++ b/dpdk/drivers/net/txgbe/txgbe_ipsec.c +@@ -100,7 +100,7 @@ txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session) + /* Fail if no match and no free entries*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, +- "No free entry left in the Rx IP table\n"); ++ "No free entry left in the Rx IP table"); + return -1; + } + +@@ -114,7 +114,7 @@ txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session) + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, +- "No free entry left in the Rx SA table\n"); ++ "No free entry left in the Rx SA table"); + return -1; + } + +@@ -210,7 +210,7 @@ txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session) + /* Fail if no free entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, +- "No free entry left in the Tx SA table\n"); ++ "No free entry left in the Tx SA table"); + return -1; + } + +@@ -269,7 +269,7 @@ txgbe_crypto_remove_sa(struct rte_eth_dev *dev, + /* Fail if no match*/ + if (ip_index < 0) { + PMD_DRV_LOG(ERR, +- "Entry not found in the Rx IP table\n"); ++ "Entry not found in the Rx IP table"); + return -1; + } + +@@ -284,7 +284,7 @@ txgbe_crypto_remove_sa(struct rte_eth_dev *dev, + /* Fail if no match*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, +- "Entry not found in the Rx SA table\n"); ++ "Entry not found in the Rx SA table"); + return -1; + } + +@@ -329,7 +329,7 @@ txgbe_crypto_remove_sa(struct rte_eth_dev *dev, + /* Fail if no match entries*/ + if (sa_index < 0) { + PMD_DRV_LOG(ERR, +- "Entry not found in the Tx SA table\n"); ++ "Entry not found in the Tx SA table"); + return -1; + } + reg_val = TXGBE_IPSRXIDX_WRITE | (sa_index << 3); +@@ -359,7 +359,7 @@ txgbe_crypto_create_session(void *device, + if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD || + conf->crypto_xform->aead.algo != + RTE_CRYPTO_AEAD_AES_GCM) { +- PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n"); ++ PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode"); + return -ENOTSUP; + } + aead_xform = &conf->crypto_xform->aead; +@@ -368,14 +368,14 @@ txgbe_crypto_create_session(void *device, + if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) { + ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION; + } else { +- PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n"); ++ PMD_DRV_LOG(ERR, "IPsec decryption not enabled"); + return -ENOTSUP; + } + } else { + if (dev_conf->txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY) { + ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION; + } else { +- PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n"); ++ PMD_DRV_LOG(ERR, "IPsec encryption not enabled"); + return -ENOTSUP; + } + } +@@ -389,7 +389,7 @@ txgbe_crypto_create_session(void *device, + + if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) { + if (txgbe_crypto_add_sa(ic_session)) { +- PMD_DRV_LOG(ERR, "Failed to add SA\n"); ++ PMD_DRV_LOG(ERR, "Failed to add SA"); + return -EPERM; + } + } +@@ -411,12 +411,12 @@ txgbe_crypto_remove_session(void *device, + struct txgbe_crypto_session *ic_session = SECURITY_GET_SESS_PRIV(session); + + if (eth_dev != ic_session->dev) { +- PMD_DRV_LOG(ERR, "Session not bound to this device\n"); ++ PMD_DRV_LOG(ERR, "Session not bound to this device"); + return -ENODEV; + } + + if (txgbe_crypto_remove_sa(eth_dev, ic_session)) { +- PMD_DRV_LOG(ERR, "Failed to remove session\n"); ++ PMD_DRV_LOG(ERR, "Failed to remove session"); + return -EFAULT; + } + +diff --git a/dpdk/drivers/net/txgbe/txgbe_pf.c b/dpdk/drivers/net/txgbe/txgbe_pf.c +index 176f79005c..700632bd88 100644 +--- a/dpdk/drivers/net/txgbe/txgbe_pf.c ++++ b/dpdk/drivers/net/txgbe/txgbe_pf.c +@@ -85,7 +85,7 @@ int txgbe_pf_host_init(struct rte_eth_dev *eth_dev) + sizeof(struct txgbe_vf_info) * vf_num, 0); + if (*vfinfo == NULL) { + PMD_INIT_LOG(ERR, +- "Cannot allocate memory for private VF data\n"); ++ "Cannot allocate memory for private VF data"); + return -ENOMEM; + } + +@@ -167,14 +167,14 @@ txgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) + struct txgbe_ethertype_filter ethertype_filter; + + if (!hw->mac.set_ethertype_anti_spoofing) { +- PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n"); ++ PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported."); + return; + } + + i = txgbe_ethertype_filter_lookup(filter_info, + TXGBE_ETHERTYPE_FLOW_CTRL); + if (i >= 0) { +- PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n"); ++ PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!"); + return; + } + +@@ -187,7 +187,7 @@ txgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) + i = txgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (i < 0) { +- PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n"); ++ PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control."); + return; + } + +@@ -408,7 +408,7 @@ txgbe_disable_vf_mc_promisc(struct rte_eth_dev *eth_dev, uint32_t vf) + + vmolr = rd32(hw, TXGBE_POOLETHCTL(vf)); + +- PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf); ++ PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous", vf); + + vmolr &= ~TXGBE_POOLETHCTL_MCP; + +@@ -570,7 +570,7 @@ txgbe_negotiate_vf_api(struct rte_eth_dev *eth_dev, + break; + } + +- PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n", ++ PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d", + api_version, vf); + + return -1; +@@ -614,7 +614,7 @@ txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf) + case RTE_ETH_MQ_TX_NONE: + case RTE_ETH_MQ_TX_DCB: + PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u" +- ", but its tx mode = %d\n", vf, ++ ", but its tx mode = %d", vf, + eth_conf->txmode.mq_mode); + return -1; + +@@ -648,7 +648,7 @@ txgbe_get_vf_queues(struct rte_eth_dev *eth_dev, uint32_t vf, uint32_t *msgbuf) + break; + + default: +- PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n", ++ PMD_DRV_LOG(ERR, "PF work with invalid mode = %d", + eth_conf->txmode.mq_mode); + return -1; + } +@@ -704,7 +704,7 @@ txgbe_set_vf_mc_promisc(struct rte_eth_dev *eth_dev, + if (!(fctrl & TXGBE_PSRCTL_UCP)) { + /* VF promisc requires PF in promisc */ + PMD_DRV_LOG(ERR, +- "Enabling VF promisc requires PF in promisc\n"); ++ "Enabling VF promisc requires PF in promisc"); + return -1; + } + +@@ -741,7 +741,7 @@ txgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) + + if (index) { + if (!rte_is_valid_assigned_ether_addr(ea)) { +- PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf); ++ PMD_DRV_LOG(ERR, "set invalid mac vf:%d", vf); + return -1; + } + diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.c b/dpdk/drivers/net/txgbe/txgbe_rxtx.c -index 1cd4b25965..2efc2bcf29 100644 +index 1cd4b25965..dcea5c23e2 100644 --- a/dpdk/drivers/net/txgbe/txgbe_rxtx.c +++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.c @@ -564,26 +564,17 @@ tx_desc_ol_flags_to_ptype(uint64_t oflags) @@ -36184,7 +53000,17 @@ index 1cd4b25965..2efc2bcf29 100644 rte_free(txq); } } -@@ -2341,6 +2347,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -2248,8 +2254,7 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev) + + tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT; + +- tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM; + + #ifdef RTE_LIB_SECURITY + if (dev->security_ctx) +@@ -2341,6 +2346,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } @@ -36192,7 +53018,7 @@ index 1cd4b25965..2efc2bcf29 100644 txq->nb_tx_desc = nb_desc; txq->tx_free_thresh = tx_free_thresh; txq->pthresh = tx_conf->tx_thresh.pthresh; -@@ -2458,6 +2465,7 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq) +@@ -2458,6 +2464,7 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq) txgbe_rx_queue_release_mbufs(rxq); rte_free(rxq->sw_ring); rte_free(rxq->sw_sc_ring); @@ -36200,7 +53026,7 @@ index 1cd4b25965..2efc2bcf29 100644 rte_free(rxq); } } -@@ -2551,6 +2559,7 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq) +@@ -2551,6 +2558,7 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq) rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); rxq->rx_tail = 0; rxq->nb_rx_hold = 0; @@ -36208,7 +53034,7 @@ index 1cd4b25965..2efc2bcf29 100644 rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; } -@@ -2631,6 +2640,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, +@@ -2631,6 +2639,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } @@ -36216,7 +53042,7 @@ index 1cd4b25965..2efc2bcf29 100644 /* * Zero init all the descriptors in the ring. */ -@@ -5069,6 +5079,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, +@@ -5069,6 +5078,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, uint32_t reta; uint16_t i; uint16_t j; @@ -36224,7 +53050,7 @@ index 1cd4b25965..2efc2bcf29 100644 struct rte_eth_rss_conf rss_conf = { .rss_key = conf->conf.key_len ? (void *)(uintptr_t)conf->conf.key : NULL, -@@ -5101,7 +5112,12 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, +@@ -5101,7 +5111,12 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) { if (j == conf->conf.queue_num) j = 0; @@ -36295,7 +53121,7 @@ index 3c05ac9cc0..c10252506b 100644 do { r = sendmsg(fd, &msgh, 0); diff --git a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c -index af1f8c8237..1bfd6aba80 100644 +index af1f8c8237..d93d443ec9 100644 --- a/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/dpdk/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -20,6 +20,7 @@ @@ -36546,6 +53372,27 @@ index af1f8c8237..1bfd6aba80 100644 dev->queue_pairs = q_pairs; return ret; +@@ -1056,7 +1088,7 @@ virtio_user_dev_create_shadow_cvq(struct virtio_user_dev *dev, struct virtqueue + scvq = virtqueue_alloc(&dev->hw, vq->vq_queue_index, vq->vq_nentries, + VTNET_CQ, SOCKET_ID_ANY, name); + if (!scvq) { +- PMD_INIT_LOG(ERR, "(%s) Failed to alloc shadow control vq\n", dev->path); ++ PMD_INIT_LOG(ERR, "(%s) Failed to alloc shadow control vq", dev->path); + return -ENOMEM; + } + +diff --git a/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/dpdk/drivers/net/virtio/virtio_user_ethdev.c +index 3a31642899..f176df86d4 100644 +--- a/dpdk/drivers/net/virtio/virtio_user_ethdev.c ++++ b/dpdk/drivers/net/virtio/virtio_user_ethdev.c +@@ -199,6 +199,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq, + vring->device = (void *)(uintptr_t)used_addr; + dev->packed_queues[queue_idx].avail_wrap_counter = true; + dev->packed_queues[queue_idx].used_wrap_counter = true; ++ dev->packed_queues[queue_idx].used_idx = 0; + + for (i = 0; i < vring->num; i++) + vring->desc[i].flags = 0; diff --git a/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h b/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h index 24c235876e..a6bb281d8d 100644 --- a/dpdk/drivers/net/vmxnet3/base/vmxnet3_defs.h @@ -36559,7 +53406,7 @@ index 24c235876e..a6bb281d8d 100644 VMXNET3_CMD_GET_DCR0_REG, } Vmxnet3_Cmd; diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c -index e49191718a..70ae9c6035 100644 +index e49191718a..8305c27d15 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -257,6 +257,7 @@ vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw) @@ -36578,7 +53425,28 @@ index e49191718a..70ae9c6035 100644 /* * Gets tx data ring descriptor size. -@@ -1129,6 +1131,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -400,6 +402,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) + /* Vendor and Device ID need to be set before init of shared code */ + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; ++ hw->adapter_stopped = TRUE; + hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr; + hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr; + +@@ -1092,10 +1095,10 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) + ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); + if (ret != 0) + PMD_INIT_LOG(DEBUG, +- "Failed in setup memory region cmd\n"); ++ "Failed in setup memory region cmd"); + ret = 0; + } else { +- PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n"); ++ PMD_INIT_LOG(DEBUG, "Failed to setup memory region"); + } + } else { + PMD_INIT_LOG(WARNING, "Memregs can't init (rx: %d, tx: %d)", +@@ -1129,6 +1132,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) /* Setting proper Rx Mode and issue Rx Mode Update command */ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); @@ -36586,7 +53454,7 @@ index e49191718a..70ae9c6035 100644 /* Setup interrupt callback */ rte_intr_callback_register(dev->intr_handle, vmxnet3_interrupt_handler, dev); -@@ -1140,6 +1143,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -1140,6 +1144,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) /* enable all intrs */ vmxnet3_enable_all_intrs(hw); @@ -36594,7 +53462,88 @@ index e49191718a..70ae9c6035 100644 vmxnet3_process_events(dev); -@@ -1928,11 +1932,13 @@ done: +@@ -1465,42 +1470,52 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + struct vmxnet3_hw *hw = dev->data->dev_private; + struct UPT1_TxStats txStats; + struct UPT1_RxStats rxStats; ++ uint64_t packets, bytes; + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + + for (i = 0; i < hw->num_tx_queues; i++) { + vmxnet3_tx_stats_get(hw, i, &txStats); + +- stats->q_opackets[i] = txStats.ucastPktsTxOK + ++ packets = txStats.ucastPktsTxOK + + txStats.mcastPktsTxOK + + txStats.bcastPktsTxOK; + +- stats->q_obytes[i] = txStats.ucastBytesTxOK + ++ bytes = txStats.ucastBytesTxOK + + txStats.mcastBytesTxOK + + txStats.bcastBytesTxOK; + +- stats->opackets += stats->q_opackets[i]; +- stats->obytes += stats->q_obytes[i]; ++ stats->opackets += packets; ++ stats->obytes += bytes; + stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard; ++ ++ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { ++ stats->q_opackets[i] = packets; ++ stats->q_obytes[i] = bytes; ++ } + } + + for (i = 0; i < hw->num_rx_queues; i++) { + vmxnet3_rx_stats_get(hw, i, &rxStats); + +- stats->q_ipackets[i] = rxStats.ucastPktsRxOK + ++ packets = rxStats.ucastPktsRxOK + + rxStats.mcastPktsRxOK + + rxStats.bcastPktsRxOK; + +- stats->q_ibytes[i] = rxStats.ucastBytesRxOK + ++ bytes = rxStats.ucastBytesRxOK + + rxStats.mcastBytesRxOK + + rxStats.bcastBytesRxOK; + +- stats->ipackets += stats->q_ipackets[i]; +- stats->ibytes += stats->q_ibytes[i]; +- +- stats->q_errors[i] = rxStats.pktsRxError; ++ stats->ipackets += packets; ++ stats->ibytes += bytes; + stats->ierrors += rxStats.pktsRxError; + stats->imissed += rxStats.pktsRxOutOfBuf; ++ ++ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { ++ stats->q_ipackets[i] = packets; ++ stats->q_ibytes[i] = bytes; ++ stats->q_errors[i] = rxStats.pktsRxError; ++ } + } + + return 0; +@@ -1516,8 +1531,6 @@ vmxnet3_dev_stats_reset(struct rte_eth_dev *dev) + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + +- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); +- + for (i = 0; i < hw->num_tx_queues; i++) { + vmxnet3_hw_tx_stats_get(hw, i, &txStats); + memcpy(&hw->snapshot_tx_stats[i], &txStats, +@@ -1561,7 +1574,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev, + dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; + dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ + dev_info->min_mtu = VMXNET3_MIN_MTU; +- dev_info->max_mtu = VMXNET3_MAX_MTU; ++ dev_info->max_mtu = VMXNET3_VERSION_GE_6(hw) ? VMXNET3_V6_MAX_MTU : VMXNET3_MAX_MTU; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G; + dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS; + +@@ -1928,11 +1941,13 @@ done: static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -36608,6 +53557,21 @@ index e49191718a..70ae9c6035 100644 return 0; } +diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h +index 2b3e2c4caa..e9ded6663d 100644 +--- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h ++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h +@@ -121,8 +121,8 @@ struct vmxnet3_hw { + #define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t)) + UPT1_TxStats saved_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES]; + UPT1_RxStats saved_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES]; +- UPT1_TxStats snapshot_tx_stats[VMXNET3_MAX_TX_QUEUES]; +- UPT1_RxStats snapshot_rx_stats[VMXNET3_MAX_RX_QUEUES]; ++ UPT1_TxStats snapshot_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES]; ++ UPT1_RxStats snapshot_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES]; + uint16_t tx_prod_offset; + uint16_t rx_prod_offset[2]; + /* device capability bit map */ diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h b/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h index 74154e3a1a..ae8542811a 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h @@ -36621,6 +53585,511 @@ index 74154e3a1a..ae8542811a 100644 "%s(): " fmt "\n", __func__, ## args) #define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c +index 380f41f98b..e226641fdf 100644 +--- a/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c ++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_rxtx.c +@@ -1341,7 +1341,7 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev) + /* Zero number of descriptors in the configuration of the RX queue */ + if (ret == 0) { + PMD_INIT_LOG(ERR, +- "Invalid configuration in Rx queue: %d, buffers ring: %d\n", ++ "Invalid configuration in Rx queue: %d, buffers ring: %d", + i, j); + return -EINVAL; + } +diff --git a/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c +index aeee4ac289..de8c024abb 100644 +--- a/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c ++++ b/dpdk/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c +@@ -68,7 +68,7 @@ dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_CMDIF_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -99,14 +99,14 @@ dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev, + do { + ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1); + if (ret < 0 && ret != -EBUSY) +- DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret); ++ DPAA2_CMDIF_ERR("Transmit failure with err: %d", ret); + retry_count++; + } while ((ret == -EBUSY) && (retry_count < DPAA2_MAX_TX_RETRY_COUNT)); + + if (ret < 0) + return ret; + +- DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n"); ++ DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet"); + + return 1; + } +@@ -133,7 +133,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev, + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_CMDIF_ERR( +- "Failed to allocate IO portal, tid: %d\n", ++ "Failed to allocate IO portal, tid: %d", + rte_gettid()); + return 0; + } +@@ -152,7 +152,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev, + + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { +- DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n"); ++ DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy"); + /* Portal was busy, try again */ + continue; + } +@@ -169,7 +169,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev, + /* Check for valid frame. */ + status = (uint8_t)qbman_result_DQ_flags(dq_storage); + if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { +- DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n"); ++ DPAA2_CMDIF_DP_DEBUG("No frame is delivered"); + return 0; + } + +@@ -181,7 +181,7 @@ dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev, + cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd); + cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd); + +- DPAA2_CMDIF_DP_DEBUG("packet received\n"); ++ DPAA2_CMDIF_DP_DEBUG("packet received"); + + return 1; + } +diff --git a/dpdk/drivers/raw/ifpga/afu_pmd_n3000.c b/dpdk/drivers/raw/ifpga/afu_pmd_n3000.c +index 67b3941265..6aae1b224e 100644 +--- a/dpdk/drivers/raw/ifpga/afu_pmd_n3000.c ++++ b/dpdk/drivers/raw/ifpga/afu_pmd_n3000.c +@@ -1506,7 +1506,7 @@ static int dma_afu_set_irqs(struct afu_rawdev *dev, uint32_t vec_start, + rte_memcpy(&irq_set->data, efds, sizeof(*efds) * count); + ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set); + if (ret) +- IFPGA_RAWDEV_PMD_ERR("Error enabling MSI-X interrupts\n"); ++ IFPGA_RAWDEV_PMD_ERR("Error enabling MSI-X interrupts"); + + rte_free(irq_set); + return ret; +diff --git a/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c b/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c +index dd97a5f9fd..d5a9ceb6e3 100644 +--- a/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c ++++ b/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c +@@ -6,6 +6,13 @@ + #include + #include "opae_osdep.h" + ++#ifndef TAILQ_FOREACH_SAFE ++#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ ++ for ((var) = TAILQ_FIRST((head)); \ ++ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ ++ (var) = (tvar)) ++#endif ++ + int max10_sys_read(struct intel_max10_device *dev, + unsigned int offset, unsigned int *val) + { +@@ -746,9 +753,9 @@ static int fdt_get_named_reg(const void *fdt, int node, const char *name, + + static void max10_sensor_uinit(struct intel_max10_device *dev) + { +- struct opae_sensor_info *info; ++ struct opae_sensor_info *info, *next; + +- TAILQ_FOREACH(info, &dev->opae_sensor_list, node) { ++ TAILQ_FOREACH_SAFE(info, &dev->opae_sensor_list, node, next) { + TAILQ_REMOVE(&dev->opae_sensor_list, info, node); + opae_free(info); + } +diff --git a/dpdk/drivers/raw/ifpga/ifpga_rawdev.c b/dpdk/drivers/raw/ifpga/ifpga_rawdev.c +index f89bd3f9e2..3b4d771d1b 100644 +--- a/dpdk/drivers/raw/ifpga/ifpga_rawdev.c ++++ b/dpdk/drivers/raw/ifpga/ifpga_rawdev.c +@@ -383,7 +383,7 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev, + goto fail; + + if (value == 0xdeadbeef) { +- IFPGA_RAWDEV_PMD_DEBUG("dev_id %d sensor %s value %x\n", ++ IFPGA_RAWDEV_PMD_DEBUG("dev_id %d sensor %s value %x", + raw_dev->dev_id, sensor->name, value); + continue; + } +@@ -391,13 +391,13 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev, + /* monitor temperature sensors */ + if (!strcmp(sensor->name, "Board Temperature") || + !strcmp(sensor->name, "FPGA Die Temperature")) { +- IFPGA_RAWDEV_PMD_DEBUG("read sensor %s %d %d %d\n", ++ IFPGA_RAWDEV_PMD_DEBUG("read sensor %s %d %d %d", + sensor->name, value, sensor->high_warn, + sensor->high_fatal); + + if (HIGH_WARN(sensor, value) || + LOW_WARN(sensor, value)) { +- IFPGA_RAWDEV_PMD_INFO("%s reach threshold %d\n", ++ IFPGA_RAWDEV_PMD_INFO("%s reach threshold %d", + sensor->name, value); + *gsd_start = true; + break; +@@ -408,7 +408,7 @@ ifpga_monitor_sensor(struct rte_rawdev *raw_dev, + if (!strcmp(sensor->name, "12V AUX Voltage")) { + if (value < AUX_VOLTAGE_WARN) { + IFPGA_RAWDEV_PMD_INFO( +- "%s reach threshold %d mV\n", ++ "%s reach threshold %d mV", + sensor->name, value); + *gsd_start = true; + break; +@@ -444,7 +444,7 @@ static int set_surprise_link_check_aer( + if (ifpga_monitor_sensor(rdev, &enable)) + return -EFAULT; + if (enable || force_disable) { +- IFPGA_RAWDEV_PMD_ERR("Set AER, pls graceful shutdown\n"); ++ IFPGA_RAWDEV_PMD_ERR("Set AER, pls graceful shutdown"); + ifpga_rdev->aer_enable = 1; + /* get bridge fd */ + strlcpy(path, "/sys/bus/pci/devices/", sizeof(path)); +@@ -660,7 +660,7 @@ ifpga_rawdev_info_get(struct rte_rawdev *dev, + continue; + + if (ifpga_fill_afu_dev(acc, afu_dev)) { +- IFPGA_RAWDEV_PMD_ERR("cannot get info\n"); ++ IFPGA_RAWDEV_PMD_ERR("cannot get info"); + return -ENOENT; + } + } +@@ -815,13 +815,13 @@ fpga_pr(struct rte_rawdev *raw_dev, u32 port_id, const char *buffer, u32 size, + + ret = opae_manager_flash(mgr, port_id, buffer, size, status); + if (ret) { +- IFPGA_RAWDEV_PMD_ERR("%s pr error %d\n", __func__, ret); ++ IFPGA_RAWDEV_PMD_ERR("%s pr error %d", __func__, ret); + return ret; + } + + ret = opae_bridge_reset(br); + if (ret) { +- IFPGA_RAWDEV_PMD_ERR("%s reset port:%d error %d\n", ++ IFPGA_RAWDEV_PMD_ERR("%s reset port:%d error %d", + __func__, port_id, ret); + return ret; + } +@@ -845,14 +845,14 @@ rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id, + + file_fd = open(file_name, O_RDONLY); + if (file_fd < 0) { +- IFPGA_RAWDEV_PMD_ERR("%s: open file error: %s\n", ++ IFPGA_RAWDEV_PMD_ERR("%s: open file error: %s", + __func__, file_name); +- IFPGA_RAWDEV_PMD_ERR("Message : %s\n", strerror(errno)); ++ IFPGA_RAWDEV_PMD_ERR("Message : %s", strerror(errno)); + return -EINVAL; + } + ret = stat(file_name, &file_stat); + if (ret) { +- IFPGA_RAWDEV_PMD_ERR("stat on bitstream file failed: %s\n", ++ IFPGA_RAWDEV_PMD_ERR("stat on bitstream file failed: %s", + file_name); + ret = -EINVAL; + goto close_fd; +@@ -863,7 +863,7 @@ rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id, + goto close_fd; + } + +- IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu\n", buffer_size); ++ IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu", buffer_size); + buffer = rte_malloc(NULL, buffer_size, 0); + if (!buffer) { + ret = -ENOMEM; +@@ -879,7 +879,7 @@ rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id, + + /*do PR now*/ + ret = fpga_pr(rawdev, port_id, buffer, buffer_size, &pr_error); +- IFPGA_RAWDEV_PMD_INFO("downloading to device port %d....%s.\n", port_id, ++ IFPGA_RAWDEV_PMD_INFO("downloading to device port %d....%s.", port_id, + ret ? "failed" : "success"); + if (ret) { + ret = -EINVAL; +@@ -922,7 +922,7 @@ ifpga_rawdev_pr(struct rte_rawdev *dev, + afu_pr_conf->afu_id.port, + afu_pr_conf->bs_path); + if (ret) { +- IFPGA_RAWDEV_PMD_ERR("do pr error %d\n", ret); ++ IFPGA_RAWDEV_PMD_ERR("do pr error %d", ret); + return ret; + } + } +@@ -953,7 +953,7 @@ ifpga_rawdev_pr(struct rte_rawdev *dev, + rte_memcpy(&afu_pr_conf->afu_id.uuid.uuid_high, uuid.b + 8, + sizeof(u64)); + +- IFPGA_RAWDEV_PMD_INFO("%s: uuid_l=0x%lx, uuid_h=0x%lx\n", ++ IFPGA_RAWDEV_PMD_INFO("%s: uuid_l=0x%lx, uuid_h=0x%lx", + __func__, + (unsigned long)afu_pr_conf->afu_id.uuid.uuid_low, + (unsigned long)afu_pr_conf->afu_id.uuid.uuid_high); +@@ -1229,13 +1229,13 @@ fme_err_read_seu_emr(struct opae_manager *mgr) + if (ret) + return -EINVAL; + +- IFPGA_RAWDEV_PMD_INFO("seu emr low: 0x%" PRIx64 "\n", val); ++ IFPGA_RAWDEV_PMD_INFO("seu emr low: 0x%" PRIx64, val); + + ret = ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_SEU_EMR_HIGH, &val); + if (ret) + return -EINVAL; + +- IFPGA_RAWDEV_PMD_INFO("seu emr high: 0x%" PRIx64 "\n", val); ++ IFPGA_RAWDEV_PMD_INFO("seu emr high: 0x%" PRIx64, val); + + return 0; + } +@@ -1250,7 +1250,7 @@ static int fme_clear_warning_intr(struct opae_manager *mgr) + if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_NONFATAL_ERRORS, &val)) + return -EINVAL; + if ((val & 0x40) != 0) +- IFPGA_RAWDEV_PMD_INFO("clean not done\n"); ++ IFPGA_RAWDEV_PMD_INFO("clean not done"); + + return 0; + } +@@ -1262,14 +1262,14 @@ static int fme_clean_fme_error(struct opae_manager *mgr) + if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_ERRORS, &val)) + return -EINVAL; + +- IFPGA_RAWDEV_PMD_DEBUG("before clean 0x%" PRIx64 "\n", val); ++ IFPGA_RAWDEV_PMD_DEBUG("before clean 0x%" PRIx64, val); + + ifpga_set_fme_error_prop(mgr, FME_ERR_PROP_CLEAR, val); + + if (ifpga_get_fme_error_prop(mgr, FME_ERR_PROP_ERRORS, &val)) + return -EINVAL; + +- IFPGA_RAWDEV_PMD_DEBUG("after clean 0x%" PRIx64 "\n", val); ++ IFPGA_RAWDEV_PMD_DEBUG("after clean 0x%" PRIx64, val); + + return 0; + } +@@ -1289,15 +1289,15 @@ fme_err_handle_error0(struct opae_manager *mgr) + fme_error0.csr = val; + + if (fme_error0.fabric_err) +- IFPGA_RAWDEV_PMD_ERR("Fabric error\n"); ++ IFPGA_RAWDEV_PMD_ERR("Fabric error"); + else if (fme_error0.fabfifo_overflow) +- IFPGA_RAWDEV_PMD_ERR("Fabric fifo under/overflow error\n"); ++ IFPGA_RAWDEV_PMD_ERR("Fabric fifo under/overflow error"); + else if (fme_error0.afu_acc_mode_err) +- IFPGA_RAWDEV_PMD_ERR("AFU PF/VF access mismatch detected\n"); ++ IFPGA_RAWDEV_PMD_ERR("AFU PF/VF access mismatch detected"); + else if (fme_error0.pcie0cdc_parity_err) +- IFPGA_RAWDEV_PMD_ERR("PCIe0 CDC Parity Error\n"); ++ IFPGA_RAWDEV_PMD_ERR("PCIe0 CDC Parity Error"); + else if (fme_error0.cvlcdc_parity_err) +- IFPGA_RAWDEV_PMD_ERR("CVL CDC Parity Error\n"); ++ IFPGA_RAWDEV_PMD_ERR("CVL CDC Parity Error"); + else if (fme_error0.fpgaseuerr) + fme_err_read_seu_emr(mgr); + +@@ -1320,17 +1320,17 @@ fme_err_handle_catfatal_error(struct opae_manager *mgr) + fme_catfatal.csr = val; + + if (fme_catfatal.cci_fatal_err) +- IFPGA_RAWDEV_PMD_ERR("CCI error detected\n"); ++ IFPGA_RAWDEV_PMD_ERR("CCI error detected"); + else if (fme_catfatal.fabric_fatal_err) +- IFPGA_RAWDEV_PMD_ERR("Fabric fatal error detected\n"); ++ IFPGA_RAWDEV_PMD_ERR("Fabric fatal error detected"); + else if (fme_catfatal.pcie_poison_err) +- IFPGA_RAWDEV_PMD_ERR("Poison error from PCIe ports\n"); ++ IFPGA_RAWDEV_PMD_ERR("Poison error from PCIe ports"); + else if (fme_catfatal.inject_fata_err) +- IFPGA_RAWDEV_PMD_ERR("Injected Fatal Error\n"); ++ IFPGA_RAWDEV_PMD_ERR("Injected Fatal Error"); + else if (fme_catfatal.crc_catast_err) +- IFPGA_RAWDEV_PMD_ERR("a catastrophic EDCRC error\n"); ++ IFPGA_RAWDEV_PMD_ERR("a catastrophic EDCRC error"); + else if (fme_catfatal.injected_catast_err) +- IFPGA_RAWDEV_PMD_ERR("Injected Catastrophic Error\n"); ++ IFPGA_RAWDEV_PMD_ERR("Injected Catastrophic Error"); + else if (fme_catfatal.bmc_seu_catast_err) + fme_err_read_seu_emr(mgr); + +@@ -1349,28 +1349,28 @@ fme_err_handle_nonfaterror(struct opae_manager *mgr) + nonfaterr.csr = val; + + if (nonfaterr.temp_thresh_ap1) +- IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP1\n"); ++ IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP1"); + else if (nonfaterr.temp_thresh_ap2) +- IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP2\n"); ++ IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP2"); + else if (nonfaterr.pcie_error) +- IFPGA_RAWDEV_PMD_INFO("an error has occurred in pcie\n"); ++ IFPGA_RAWDEV_PMD_INFO("an error has occurred in pcie"); + else if (nonfaterr.portfatal_error) +- IFPGA_RAWDEV_PMD_INFO("fatal error occurred in AFU port.\n"); ++ IFPGA_RAWDEV_PMD_INFO("fatal error occurred in AFU port."); + else if (nonfaterr.proc_hot) +- IFPGA_RAWDEV_PMD_INFO("a ProcHot event\n"); ++ IFPGA_RAWDEV_PMD_INFO("a ProcHot event"); + else if (nonfaterr.afu_acc_mode_err) +- IFPGA_RAWDEV_PMD_INFO("an AFU PF/VF access mismatch\n"); ++ IFPGA_RAWDEV_PMD_INFO("an AFU PF/VF access mismatch"); + else if (nonfaterr.injected_nonfata_err) { +- IFPGA_RAWDEV_PMD_INFO("Injected Warning Error\n"); ++ IFPGA_RAWDEV_PMD_INFO("Injected Warning Error"); + fme_clear_warning_intr(mgr); + } else if (nonfaterr.temp_thresh_AP6) +- IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP6\n"); ++ IFPGA_RAWDEV_PMD_INFO("Temperature threshold triggered AP6"); + else if (nonfaterr.power_thresh_AP1) +- IFPGA_RAWDEV_PMD_INFO("Power threshold triggered AP1\n"); ++ IFPGA_RAWDEV_PMD_INFO("Power threshold triggered AP1"); + else if (nonfaterr.power_thresh_AP2) +- IFPGA_RAWDEV_PMD_INFO("Power threshold triggered AP2\n"); ++ IFPGA_RAWDEV_PMD_INFO("Power threshold triggered AP2"); + else if (nonfaterr.mbp_err) +- IFPGA_RAWDEV_PMD_INFO("an MBP event\n"); ++ IFPGA_RAWDEV_PMD_INFO("an MBP event"); + + return 0; + } +@@ -1380,7 +1380,7 @@ fme_interrupt_handler(void *param) + { + struct opae_manager *mgr = (struct opae_manager *)param; + +- IFPGA_RAWDEV_PMD_INFO("%s interrupt occurred\n", __func__); ++ IFPGA_RAWDEV_PMD_INFO("%s interrupt occurred", __func__); + + fme_err_handle_error0(mgr); + fme_err_handle_nonfaterror(mgr); +@@ -1406,7 +1406,7 @@ ifpga_unregister_msix_irq(struct ifpga_rawdev *dev, enum ifpga_irq_type type, + return -EINVAL; + + if ((*intr_handle) == NULL) { +- IFPGA_RAWDEV_PMD_ERR("%s interrupt %d not registered\n", ++ IFPGA_RAWDEV_PMD_ERR("%s interrupt %d not registered", + type == IFPGA_FME_IRQ ? "FME" : "AFU", + type == IFPGA_FME_IRQ ? 0 : vec_start); + return -ENOENT; +@@ -1416,7 +1416,7 @@ ifpga_unregister_msix_irq(struct ifpga_rawdev *dev, enum ifpga_irq_type type, + + rc = rte_intr_callback_unregister(*intr_handle, handler, arg); + if (rc < 0) { +- IFPGA_RAWDEV_PMD_ERR("Failed to unregister %s interrupt %d\n", ++ IFPGA_RAWDEV_PMD_ERR("Failed to unregister %s interrupt %d", + type == IFPGA_FME_IRQ ? "FME" : "AFU", + type == IFPGA_FME_IRQ ? 0 : vec_start); + } else { +@@ -1479,7 +1479,7 @@ ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id, + rte_intr_efds_index_get(*intr_handle, 0))) + return -rte_errno; + +- IFPGA_RAWDEV_PMD_DEBUG("register %s irq, vfio_fd=%d, fd=%d\n", ++ IFPGA_RAWDEV_PMD_DEBUG("register %s irq, vfio_fd=%d, fd=%d", + name, rte_intr_dev_fd_get(*intr_handle), + rte_intr_fd_get(*intr_handle)); + +@@ -1498,7 +1498,7 @@ ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id, + + nb_intr = rte_intr_nb_intr_get(*intr_handle); + +- intr_efds = calloc(nb_intr, sizeof(int)); ++ intr_efds = rte_calloc("ifpga_efds", nb_intr, sizeof(int), 0); + if (!intr_efds) + return -ENOMEM; + +@@ -1507,7 +1507,7 @@ ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id, + + ret = opae_acc_set_irq(acc, vec_start, count, intr_efds); + if (ret) { +- free(intr_efds); ++ rte_free(intr_efds); + return -EINVAL; + } + } +@@ -1516,13 +1516,13 @@ ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id, + ret = rte_intr_callback_register(*intr_handle, + handler, (void *)arg); + if (ret) { +- free(intr_efds); ++ rte_free(intr_efds); + return -EINVAL; + } + +- IFPGA_RAWDEV_PMD_INFO("success register %s interrupt\n", name); ++ IFPGA_RAWDEV_PMD_INFO("success register %s interrupt", name); + +- free(intr_efds); ++ rte_free(intr_efds); + return 0; + } + +diff --git a/dpdk/drivers/regex/cn9k/cn9k_regexdev.c b/dpdk/drivers/regex/cn9k/cn9k_regexdev.c +index e96cbf4141..aa809ab5bf 100644 +--- a/dpdk/drivers/regex/cn9k/cn9k_regexdev.c ++++ b/dpdk/drivers/regex/cn9k/cn9k_regexdev.c +@@ -192,7 +192,7 @@ ree_dev_register(const char *name) + { + struct rte_regexdev *dev; + +- cn9k_ree_dbg("Creating regexdev %s\n", name); ++ cn9k_ree_dbg("Creating regexdev %s", name); + + /* allocate device structure */ + dev = rte_regexdev_register(name); +diff --git a/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c b/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c +index f034bd59ba..2958368813 100644 +--- a/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c ++++ b/dpdk/drivers/vdpa/ifc/ifcvf_vdpa.c +@@ -536,7 +536,7 @@ notify_relay(void *arg) + if (nfds < 0) { + if (errno == EINTR) + continue; +- DRV_LOG(ERR, "epoll_wait return fail\n"); ++ DRV_LOG(ERR, "epoll_wait return fail"); + return 1; + } + +@@ -651,12 +651,12 @@ intr_relay(void *arg) + errno == EWOULDBLOCK || + errno == EAGAIN) + continue; +- DRV_LOG(ERR, "Error reading from file descriptor %d: %s\n", ++ DRV_LOG(ERR, "Error reading from file descriptor %d: %s", + csc_event.data.fd, + strerror(errno)); + goto out; + } else if (nbytes == 0) { +- DRV_LOG(ERR, "Read nothing from file descriptor %d\n", ++ DRV_LOG(ERR, "Read nothing from file descriptor %d", + csc_event.data.fd); + continue; + } else { +@@ -1500,7 +1500,7 @@ ifcvf_pci_get_device_type(struct rte_pci_device *pci_dev) + uint16_t device_id; + + if (pci_device_id < 0x1000 || pci_device_id > 0x107f) { +- DRV_LOG(ERR, "Probe device is not a virtio device\n"); ++ DRV_LOG(ERR, "Probe device is not a virtio device"); + return -1; + } + +@@ -1577,7 +1577,7 @@ ifcvf_blk_get_config(int vid, uint8_t *config, uint32_t size) + DRV_LOG(DEBUG, " sectors : %u", dev_cfg->geometry.sectors); + DRV_LOG(DEBUG, "num_queues: 0x%08x", dev_cfg->num_queues); + +- DRV_LOG(DEBUG, "config: [%x] [%x] [%x] [%x] [%x] [%x] [%x] [%x]\n", ++ DRV_LOG(DEBUG, "config: [%x] [%x] [%x] [%x] [%x] [%x] [%x] [%x]", + config[0], config[1], config[2], config[3], config[4], + config[5], config[6], config[7]); + return 0; diff --git a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c index 9557c1042e..32430614d5 100644 --- a/dpdk/drivers/vdpa/mlx5/mlx5_vdpa_event.c @@ -36674,6 +54143,92 @@ index 9557c1042e..32430614d5 100644 /* FW will set event qp to error state in q destroy. */ if (reset && !mlx5_vdpa_qps2rst2rts(eqp)) rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)), +diff --git a/dpdk/drivers/vdpa/nfp/nfp_vdpa.c b/dpdk/drivers/vdpa/nfp/nfp_vdpa.c +index cef80b5476..3e4247dbcb 100644 +--- a/dpdk/drivers/vdpa/nfp/nfp_vdpa.c ++++ b/dpdk/drivers/vdpa/nfp/nfp_vdpa.c +@@ -127,7 +127,7 @@ nfp_vdpa_vfio_setup(struct nfp_vdpa_dev *device) + if (device->vfio_group_fd < 0) + goto container_destroy; + +- DRV_VDPA_LOG(DEBUG, "container_fd=%d, group_fd=%d,\n", ++ DRV_VDPA_LOG(DEBUG, "container_fd=%d, group_fd=%d,", + device->vfio_container_fd, device->vfio_group_fd); + + ret = rte_pci_map_device(pci_dev); +diff --git a/dpdk/drivers/vdpa/nfp/nfp_vdpa_core.c b/dpdk/drivers/vdpa/nfp/nfp_vdpa_core.c +index 7b877605e4..6d07356581 100644 +--- a/dpdk/drivers/vdpa/nfp/nfp_vdpa_core.c ++++ b/dpdk/drivers/vdpa/nfp/nfp_vdpa_core.c +@@ -55,7 +55,10 @@ nfp_vdpa_hw_init(struct nfp_vdpa_hw *vdpa_hw, + struct rte_pci_device *pci_dev) + { + uint32_t queue; ++ uint8_t *tx_bar; ++ uint32_t start_q; + struct nfp_hw *hw; ++ uint32_t tx_bar_off; + uint8_t *notify_base; + + hw = &vdpa_hw->super; +@@ -82,6 +85,12 @@ nfp_vdpa_hw_init(struct nfp_vdpa_hw *vdpa_hw, + idx + 1, vdpa_hw->notify_addr[idx + 1]); + } + ++ /* NFP vDPA cfg queue setup */ ++ start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ); ++ tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ; ++ tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off; ++ hw->qcp_cfg = tx_bar + NFP_QCP_QUEUE_ADDR_SZ; ++ + vdpa_hw->features = (1ULL << VIRTIO_F_VERSION_1) | + (1ULL << VIRTIO_F_IN_ORDER) | + (1ULL << VHOST_USER_F_PROTOCOL_FEATURES); +@@ -92,7 +101,7 @@ nfp_vdpa_hw_init(struct nfp_vdpa_hw *vdpa_hw, + static uint32_t + nfp_vdpa_check_offloads(void) + { +- return NFP_NET_CFG_CTRL_SCATTER | ++ return NFP_NET_CFG_CTRL_VIRTIO | + NFP_NET_CFG_CTRL_IN_ORDER; + } + +@@ -103,6 +112,7 @@ nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw, + int ret; + uint32_t update; + uint32_t new_ctrl; ++ uint32_t new_ext_ctrl; + struct timespec wait_tst; + struct nfp_hw *hw = &vdpa_hw->super; + uint8_t mac_addr[RTE_ETHER_ADDR_LEN]; +@@ -122,8 +132,6 @@ nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw, + nfp_disable_queues(hw); + nfp_enable_queues(hw, NFP_VDPA_MAX_QUEUES, NFP_VDPA_MAX_QUEUES); + +- new_ctrl = nfp_vdpa_check_offloads(); +- + nn_cfg_writel(hw, NFP_NET_CFG_MTU, 9216); + nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, 10240); + +@@ -138,8 +146,17 @@ nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw, + /* Writing new MAC to the specific port BAR address */ + nfp_write_mac(hw, (uint8_t *)mac_addr); + ++ new_ext_ctrl = nfp_vdpa_check_offloads(); ++ ++ update = NFP_NET_CFG_UPDATE_GEN; ++ ret = nfp_ext_reconfig(hw, new_ext_ctrl, update); ++ if (ret != 0) ++ return -EIO; ++ ++ hw->ctrl_ext = new_ext_ctrl; ++ + /* Enable device */ +- new_ctrl |= NFP_NET_CFG_CTRL_ENABLE; ++ new_ctrl = NFP_NET_CFG_CTRL_ENABLE; + + /* Signal the NIC about the change */ + update = NFP_NET_CFG_UPDATE_MACADDR | diff --git a/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c b/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c index edb7e35c2c..7e43719f53 100644 --- a/dpdk/drivers/vdpa/sfc/sfc_vdpa_hw.c @@ -36751,6 +54306,40 @@ index 8958f58dac..5e897cf5d2 100644 all_nics_in_dpdk_devbind, ) self.verify( +diff --git a/dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c b/dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c +index 783f68c91e..831d7fd53d 100644 +--- a/dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c ++++ b/dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c +@@ -38,10 +38,12 @@ worker_generic(void *arg) + } + received++; + +- /* The first worker stage does classification */ +- if (ev.queue_id == cdata.qid[0]) ++ /* The first worker stage does classification and sets txq. */ ++ if (ev.queue_id == cdata.qid[0]) { + ev.flow_id = ev.mbuf->hash.rss + % cdata.num_fids; ++ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0); ++ } + + ev.queue_id = cdata.next_qid[ev.queue_id]; + ev.op = RTE_EVENT_OP_FORWARD; +@@ -96,10 +98,12 @@ worker_generic_burst(void *arg) + + for (i = 0; i < nb_rx; i++) { + +- /* The first worker stage does classification */ +- if (events[i].queue_id == cdata.qid[0]) ++ /* The first worker stage does classification and sets txq. */ ++ if (events[i].queue_id == cdata.qid[0]) { + events[i].flow_id = events[i].mbuf->hash.rss + % cdata.num_fids; ++ rte_event_eth_tx_adapter_txq_set(events[i].mbuf, 0); ++ } + + events[i].queue_id = cdata.next_qid[events[i].queue_id]; + events[i].op = RTE_EVENT_OP_FORWARD; diff --git a/dpdk/examples/fips_validation/fips_validation_rsa.c b/dpdk/examples/fips_validation/fips_validation_rsa.c index f675b51051..55f81860a0 100644 --- a/dpdk/examples/fips_validation/fips_validation_rsa.c @@ -36797,7 +54386,7 @@ index dfb81bfcf1..be635685b4 100644 }; diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/dpdk/examples/ipsec-secgw/ipsec-secgw.c -index bf98d2618b..761b9cf396 100644 +index bf98d2618b..5e77d9d2ce 100644 --- a/dpdk/examples/ipsec-secgw/ipsec-secgw.c +++ b/dpdk/examples/ipsec-secgw/ipsec-secgw.c @@ -220,8 +220,8 @@ static const char *cfgfile; @@ -36820,7 +54409,37 @@ index bf98d2618b..761b9cf396 100644 { struct ipsec_traffic traffic; -@@ -695,9 +695,7 @@ ipsec_poll_mode_worker(void) +@@ -626,12 +626,13 @@ drain_inbound_crypto_queues(const struct lcore_conf *qconf, + uint32_t n; + struct ipsec_traffic trf; + unsigned int lcoreid = rte_lcore_id(); ++ const int nb_pkts = RTE_DIM(trf.ipsec.pkts); + + if (app_sa_prm.enable == 0) { + + /* dequeue packets from crypto-queue */ + n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts, +- RTE_DIM(trf.ipsec.pkts)); ++ RTE_MIN(MAX_PKT_BURST, nb_pkts)); + + trf.ip4.num = 0; + trf.ip6.num = 0; +@@ -663,12 +664,13 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf, + { + uint32_t n; + struct ipsec_traffic trf; ++ const int nb_pkts = RTE_DIM(trf.ipsec.pkts); + + if (app_sa_prm.enable == 0) { + + /* dequeue packets from crypto-queue */ + n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts, +- RTE_DIM(trf.ipsec.pkts)); ++ RTE_MIN(MAX_PKT_BURST, nb_pkts)); + + trf.ip4.num = 0; + trf.ip6.num = 0; +@@ -695,9 +697,7 @@ ipsec_poll_mode_worker(void) struct rte_mbuf *pkts[MAX_PKT_BURST]; uint32_t lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; @@ -36831,7 +54450,7 @@ index bf98d2618b..761b9cf396 100644 struct lcore_conf *qconf; int32_t rc, socket_id; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) -@@ -744,7 +742,7 @@ ipsec_poll_mode_worker(void) +@@ -744,7 +744,7 @@ ipsec_poll_mode_worker(void) portid = rxql[i].port_id; queueid = rxql[i].queue_id; RTE_LOG(INFO, IPSEC, @@ -36840,7 +54459,7 @@ index bf98d2618b..761b9cf396 100644 lcore_id, portid, queueid); } -@@ -789,8 +787,7 @@ int +@@ -789,8 +789,7 @@ int check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) { uint16_t i; @@ -36850,7 +54469,7 @@ index bf98d2618b..761b9cf396 100644 for (i = 0; i < nb_lcore_params; ++i) { portid = lcore_params_array[i].port_id; -@@ -810,7 +807,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) +@@ -810,7 +809,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) static int32_t check_poll_mode_params(struct eh_conf *eh_conf) { @@ -36859,7 +54478,7 @@ index bf98d2618b..761b9cf396 100644 uint16_t portid; uint16_t i; int32_t socket_id; -@@ -829,13 +826,13 @@ check_poll_mode_params(struct eh_conf *eh_conf) +@@ -829,13 +828,13 @@ check_poll_mode_params(struct eh_conf *eh_conf) for (i = 0; i < nb_lcore_params; ++i) { lcore = lcore_params[i].lcore_id; if (!rte_lcore_is_enabled(lcore)) { @@ -36875,7 +54494,7 @@ index bf98d2618b..761b9cf396 100644 "with numa off\n", lcore, socket_id); } -@@ -852,7 +849,7 @@ check_poll_mode_params(struct eh_conf *eh_conf) +@@ -852,7 +851,7 @@ check_poll_mode_params(struct eh_conf *eh_conf) return 0; } @@ -36884,7 +54503,7 @@ index bf98d2618b..761b9cf396 100644 get_port_nb_rx_queues(const uint16_t port) { int32_t queue = -1; -@@ -863,14 +860,14 @@ get_port_nb_rx_queues(const uint16_t port) +@@ -863,14 +862,14 @@ get_port_nb_rx_queues(const uint16_t port) lcore_params[i].queue_id > queue) queue = lcore_params[i].queue_id; } @@ -36901,7 +54520,7 @@ index bf98d2618b..761b9cf396 100644 for (i = 0; i < nb_lcore_params; ++i) { lcore = lcore_params[i].lcore_id; -@@ -1051,6 +1048,11 @@ parse_config(const char *q_arg) +@@ -1051,6 +1050,11 @@ parse_config(const char *q_arg) char *str_fld[_NUM_FLD]; int32_t i; uint32_t size; @@ -36913,7 +54532,7 @@ index bf98d2618b..761b9cf396 100644 nb_lcore_params = 0; -@@ -1071,7 +1073,7 @@ parse_config(const char *q_arg) +@@ -1071,7 +1075,7 @@ parse_config(const char *q_arg) for (i = 0; i < _NUM_FLD; i++) { errno = 0; int_fld[i] = strtoul(str_fld[i], &end, 0); @@ -36922,7 +54541,7 @@ index bf98d2618b..761b9cf396 100644 return -1; } if (nb_lcore_params >= MAX_LCORE_PARAMS) { -@@ -1080,11 +1082,11 @@ parse_config(const char *q_arg) +@@ -1080,11 +1084,11 @@ parse_config(const char *q_arg) return -1; } lcore_params_array[nb_lcore_params].port_id = @@ -36937,7 +54556,7 @@ index bf98d2618b..761b9cf396 100644 ++nb_lcore_params; } lcore_params = lcore_params_array; -@@ -1920,7 +1922,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, +@@ -1920,7 +1924,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, struct rte_eth_dev_info dev_info; struct rte_eth_txconf *txconf; uint16_t nb_tx_queue, nb_rx_queue; @@ -36947,7 +54566,7 @@ index bf98d2618b..761b9cf396 100644 int32_t ret, socket_id; struct lcore_conf *qconf; struct rte_ether_addr ethaddr; -@@ -2094,10 +2097,10 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, +@@ -2094,10 +2099,10 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, /* Register Rx callback if ptypes are not supported */ if (!ptype_supported && @@ -37043,6 +54662,27 @@ index bdcada1c40..d4ecfdf08d 100644 void *sec_ctx; } __rte_cache_aligned; +diff --git a/dpdk/examples/ipsec-secgw/ipsec_process.c b/dpdk/examples/ipsec-secgw/ipsec_process.c +index b0cece3ad1..1a64a4b49f 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec_process.c ++++ b/dpdk/examples/ipsec-secgw/ipsec_process.c +@@ -336,6 +336,7 @@ ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) + struct rte_ipsec_session *ss; + struct traffic_type *out; + struct rte_ipsec_group *pg; ++ const int nb_cops = RTE_DIM(trf->ipsec.pkts); + struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)]; + struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; + +@@ -345,7 +346,7 @@ ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) + out = &trf->ipsec; + + /* dequeue completed crypto-ops */ +- n = ctx_dequeue(ctx, cop, RTE_DIM(cop)); ++ n = ctx_dequeue(ctx, cop, RTE_MIN(MAX_PKT_BURST, nb_cops)); + if (n == 0) + return; + diff --git a/dpdk/examples/ipsec-secgw/ipsec_worker.c b/dpdk/examples/ipsec-secgw/ipsec_worker.c index 8d122e8519..c9c43ebd2b 100644 --- a/dpdk/examples/ipsec-secgw/ipsec_worker.c @@ -37120,6 +54760,18 @@ index 98f8176651..2bd6df335b 100644 if (st->status < 0) return; } +diff --git a/dpdk/examples/l2fwd-event/l2fwd_event.c b/dpdk/examples/l2fwd-event/l2fwd_event.c +index 4b5a032e35..78f10f31ad 100644 +--- a/dpdk/examples/l2fwd-event/l2fwd_event.c ++++ b/dpdk/examples/l2fwd-event/l2fwd_event.c +@@ -141,6 +141,7 @@ l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc) + rte_spinlock_lock(&evt_rsrc->evp.lock); + if (index >= evt_rsrc->evp.nb_ports) { + printf("No free event port is available\n"); ++ rte_spinlock_unlock(&evt_rsrc->evp.lock); + return -1; + } + diff --git a/dpdk/examples/l3fwd-graph/main.c b/dpdk/examples/l3fwd-graph/main.c index 96cb1c81ff..4ded69b4a0 100644 --- a/dpdk/examples/l3fwd-graph/main.c @@ -37238,7 +54890,7 @@ index 96cb1c81ff..4ded69b4a0 100644 struct rte_eth_dev_info dev_info; uint32_t nb_ports, nb_conf = 0; diff --git a/dpdk/examples/l3fwd-power/main.c b/dpdk/examples/l3fwd-power/main.c -index 9c0dcd343b..996ac6dc56 100644 +index 9c0dcd343b..7640b5a9a3 100644 --- a/dpdk/examples/l3fwd-power/main.c +++ b/dpdk/examples/l3fwd-power/main.c @@ -213,7 +213,7 @@ enum freq_scale_hint_t @@ -37420,7 +55072,51 @@ index 9c0dcd343b..996ac6dc56 100644 return -1; } else { lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = -@@ -1660,6 +1657,11 @@ parse_config(const char *q_arg) +@@ -1526,8 +1523,12 @@ print_usage(const char *prgname) + prgname); + } + ++/* ++ * Caller must give the right upper limit so as to ensure receiver variable ++ * doesn't overflow. ++ */ + static int +-parse_int(const char *opt) ++parse_uint(const char *opt, uint32_t max, uint32_t *res) + { + char *end = NULL; + unsigned long val; +@@ -1537,23 +1538,15 @@ parse_int(const char *opt) + if ((opt[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + +- return val; +-} +- +-static int parse_max_pkt_len(const char *pktlen) +-{ +- char *end = NULL; +- unsigned long len; +- +- /* parse decimal string */ +- len = strtoul(pktlen, &end, 10); +- if ((pktlen[0] == '\0') || (end == NULL) || (*end != '\0')) ++ if (val > max) { ++ RTE_LOG(ERR, L3FWD_POWER, "%s parameter shouldn't exceed %u.\n", ++ opt, max); + return -1; ++ } + +- if (len == 0) +- return -1; ++ *res = val; + +- return len; ++ return 0; + } + + static int +@@ -1660,6 +1653,11 @@ parse_config(const char *q_arg) char *str_fld[_NUM_FLD]; int i; unsigned size; @@ -37432,7 +55128,7 @@ index 9c0dcd343b..996ac6dc56 100644 nb_lcore_params = 0; -@@ -1679,8 +1681,7 @@ parse_config(const char *q_arg) +@@ -1679,8 +1677,7 @@ parse_config(const char *q_arg) for (i = 0; i < _NUM_FLD; i++){ errno = 0; int_fld[i] = strtoul(str_fld[i], &end, 0); @@ -37442,7 +55138,7 @@ index 9c0dcd343b..996ac6dc56 100644 return -1; } if (nb_lcore_params >= MAX_LCORE_PARAMS) { -@@ -1689,11 +1690,11 @@ parse_config(const char *q_arg) +@@ -1689,11 +1686,11 @@ parse_config(const char *q_arg) return -1; } lcore_params_array[nb_lcore_params].port_id = @@ -37457,7 +55153,56 @@ index 9c0dcd343b..996ac6dc56 100644 ++nb_lcore_params; } lcore_params = lcore_params_array; -@@ -2500,8 +2501,8 @@ main(int argc, char **argv) +@@ -1896,8 +1893,9 @@ parse_args(int argc, char **argv) + if (!strncmp(lgopts[option_index].name, + CMD_LINE_OPT_MAX_PKT_LEN, + sizeof(CMD_LINE_OPT_MAX_PKT_LEN))) { ++ if (parse_uint(optarg, UINT32_MAX, &max_pkt_len) != 0) ++ return -1; + printf("Custom frame size is configured\n"); +- max_pkt_len = parse_max_pkt_len(optarg); + } + + if (!strncmp(lgopts[option_index].name, +@@ -1910,29 +1908,33 @@ parse_args(int argc, char **argv) + if (!strncmp(lgopts[option_index].name, + CMD_LINE_OPT_MAX_EMPTY_POLLS, + sizeof(CMD_LINE_OPT_MAX_EMPTY_POLLS))) { ++ if (parse_uint(optarg, UINT32_MAX, &max_empty_polls) != 0) ++ return -1; + printf("Maximum empty polls configured\n"); +- max_empty_polls = parse_int(optarg); + } + + if (!strncmp(lgopts[option_index].name, + CMD_LINE_OPT_PAUSE_DURATION, + sizeof(CMD_LINE_OPT_PAUSE_DURATION))) { ++ if (parse_uint(optarg, UINT32_MAX, &pause_duration) != 0) ++ return -1; + printf("Pause duration configured\n"); +- pause_duration = parse_int(optarg); + } + + if (!strncmp(lgopts[option_index].name, + CMD_LINE_OPT_SCALE_FREQ_MIN, + sizeof(CMD_LINE_OPT_SCALE_FREQ_MIN))) { ++ if (parse_uint(optarg, UINT32_MAX, &scale_freq_min) != 0) ++ return -1; + printf("Scaling frequency minimum configured\n"); +- scale_freq_min = parse_int(optarg); + } + + if (!strncmp(lgopts[option_index].name, + CMD_LINE_OPT_SCALE_FREQ_MAX, + sizeof(CMD_LINE_OPT_SCALE_FREQ_MAX))) { ++ if (parse_uint(optarg, UINT32_MAX, &scale_freq_max) != 0) ++ return -1; + printf("Scaling frequency maximum configured\n"); +- scale_freq_max = parse_int(optarg); + } + + break; +@@ -2500,8 +2502,8 @@ main(int argc, char **argv) uint64_t hz; uint32_t n_tx_queue, nb_lcores; uint32_t dev_rxq_num, dev_txq_num; @@ -37617,6 +55362,48 @@ index 401692bcec..31798ccb10 100644 } } } +diff --git a/dpdk/examples/l3fwd/l3fwd_altivec.h b/dpdk/examples/l3fwd/l3fwd_altivec.h +index e45e138e59..b91a6b5587 100644 +--- a/dpdk/examples/l3fwd/l3fwd_altivec.h ++++ b/dpdk/examples/l3fwd/l3fwd_altivec.h +@@ -11,6 +11,9 @@ + #include "altivec/port_group.h" + #include "l3fwd_common.h" + ++#undef SENDM_PORT_OVERHEAD ++#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) ++ + /* + * Update source and destination MAC addresses in the ethernet header. + * Perform RFC1812 checks and updates for IPV4 packets. +@@ -117,7 +120,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) + */ + static __rte_always_inline void + send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, +- uint16_t dst_port[MAX_PKT_BURST], int nb_rx) ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], ++ int nb_rx) + { + int32_t k; + int j = 0; +diff --git a/dpdk/examples/l3fwd/l3fwd_common.h b/dpdk/examples/l3fwd/l3fwd_common.h +index 224b1c08e8..d94e5f1357 100644 +--- a/dpdk/examples/l3fwd/l3fwd_common.h ++++ b/dpdk/examples/l3fwd/l3fwd_common.h +@@ -18,6 +18,13 @@ + /* Minimum value of IPV4 total length (20B) in network byte order. */ + #define IPV4_MIN_LEN_BE (sizeof(struct rte_ipv4_hdr) << 8) + ++/* ++ * send_packet_multi() specific number of dest ports ++ * due to implementation we need to allocate array bigger then ++ * actual max number of elements in the array. ++ */ ++#define SENDM_PORT_OVERHEAD(x) (x) ++ + /* + * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2: + * - The IP version number must be 4. diff --git a/dpdk/examples/l3fwd/l3fwd_em.c b/dpdk/examples/l3fwd/l3fwd_em.c index 40e102b38a..f18ac0048b 100644 --- a/dpdk/examples/l3fwd/l3fwd_em.c @@ -37639,6 +55426,32 @@ index 40e102b38a..f18ac0048b 100644 lcore_id, portid, queueid); } +diff --git a/dpdk/examples/l3fwd/l3fwd_em_hlm.h b/dpdk/examples/l3fwd/l3fwd_em_hlm.h +index 31cda9ddc1..c1d819997a 100644 +--- a/dpdk/examples/l3fwd/l3fwd_em_hlm.h ++++ b/dpdk/examples/l3fwd/l3fwd_em_hlm.h +@@ -249,7 +249,7 @@ static inline void + l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, + struct lcore_conf *qconf) + { +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + l3fwd_em_process_packets(nb_rx, pkts_burst, dst_port, portid, qconf, 0); + send_packets_multi(qconf, pkts_burst, dst_port, nb_rx); +diff --git a/dpdk/examples/l3fwd/l3fwd_em_sequential.h b/dpdk/examples/l3fwd/l3fwd_em_sequential.h +index 067f23889a..3a40b2e434 100644 +--- a/dpdk/examples/l3fwd/l3fwd_em_sequential.h ++++ b/dpdk/examples/l3fwd/l3fwd_em_sequential.h +@@ -79,7 +79,7 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, + uint16_t portid, struct lcore_conf *qconf) + { + int32_t i, j; +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + if (nb_rx > 0) { + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[0], diff --git a/dpdk/examples/l3fwd/l3fwd_event.h b/dpdk/examples/l3fwd/l3fwd_event.h index 9aad358003..c6a4a89127 100644 --- a/dpdk/examples/l3fwd/l3fwd_event.h @@ -37654,9 +55467,18 @@ index 9aad358003..c6a4a89127 100644 uint64_t vector_tmo_ns; }; diff --git a/dpdk/examples/l3fwd/l3fwd_fib.c b/dpdk/examples/l3fwd/l3fwd_fib.c -index 6a21984415..f38b19af3f 100644 +index 6a21984415..a36330119a 100644 --- a/dpdk/examples/l3fwd/l3fwd_fib.c +++ b/dpdk/examples/l3fwd/l3fwd_fib.c +@@ -121,7 +121,7 @@ fib_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, + { + uint32_t ipv4_arr[nb_rx]; + uint8_t ipv6_arr[nb_rx][RTE_FIB6_IPV6_ADDR_SIZE]; +- uint16_t hops[nb_rx]; ++ uint16_t hops[SENDM_PORT_OVERHEAD(nb_rx)]; + uint64_t hopsv4[nb_rx], hopsv6[nb_rx]; + uint8_t type_arr[nb_rx]; + uint32_t ipv4_cnt = 0, ipv6_cnt = 0; @@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy) uint64_t prev_tsc, diff_tsc, cur_tsc; int i, nb_rx; @@ -37698,6 +55520,93 @@ index a484a33089..e8fd95aae9 100644 lcore_id, portid, queueid); } +diff --git a/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h b/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h +index adb82f1478..91aad5c313 100644 +--- a/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h ++++ b/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h +@@ -145,7 +145,7 @@ static inline void + l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint8_t portid, + struct lcore_conf *qconf) + { +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, + 0); +diff --git a/dpdk/examples/l3fwd/l3fwd_lpm_neon.h b/dpdk/examples/l3fwd/l3fwd_lpm_neon.h +index 2a68c4c15e..3c1f827424 100644 +--- a/dpdk/examples/l3fwd/l3fwd_lpm_neon.h ++++ b/dpdk/examples/l3fwd/l3fwd_lpm_neon.h +@@ -171,7 +171,7 @@ static inline void + l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, + struct lcore_conf *qconf) + { +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, + 0); +diff --git a/dpdk/examples/l3fwd/l3fwd_lpm_sse.h b/dpdk/examples/l3fwd/l3fwd_lpm_sse.h +index db15030320..50f1abbd8a 100644 +--- a/dpdk/examples/l3fwd/l3fwd_lpm_sse.h ++++ b/dpdk/examples/l3fwd/l3fwd_lpm_sse.h +@@ -129,7 +129,7 @@ static inline void + l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, + struct lcore_conf *qconf) + { +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, + 0); +diff --git a/dpdk/examples/l3fwd/l3fwd_neon.h b/dpdk/examples/l3fwd/l3fwd_neon.h +index 40807d5965..bc2bab8265 100644 +--- a/dpdk/examples/l3fwd/l3fwd_neon.h ++++ b/dpdk/examples/l3fwd/l3fwd_neon.h +@@ -10,6 +10,9 @@ + #include "neon/port_group.h" + #include "l3fwd_common.h" + ++#undef SENDM_PORT_OVERHEAD ++#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) ++ + /* + * Update source and destination MAC addresses in the ethernet header. + * Perform RFC1812 checks and updates for IPV4 packets. +@@ -92,7 +95,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) + */ + static __rte_always_inline void + send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, +- uint16_t dst_port[MAX_PKT_BURST], int nb_rx) ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], ++ int nb_rx) + { + int32_t k; + int j = 0; +diff --git a/dpdk/examples/l3fwd/l3fwd_sse.h b/dpdk/examples/l3fwd/l3fwd_sse.h +index 083729cdef..6236b7873c 100644 +--- a/dpdk/examples/l3fwd/l3fwd_sse.h ++++ b/dpdk/examples/l3fwd/l3fwd_sse.h +@@ -10,6 +10,9 @@ + #include "sse/port_group.h" + #include "l3fwd_common.h" + ++#undef SENDM_PORT_OVERHEAD ++#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) ++ + /* + * Update source and destination MAC addresses in the ethernet header. + * Perform RFC1812 checks and updates for IPV4 packets. +@@ -91,7 +94,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) + */ + static __rte_always_inline void + send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, +- uint16_t dst_port[MAX_PKT_BURST], int nb_rx) ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], ++ int nb_rx) + { + int32_t k; + int j = 0; diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c index 3bf28aec0c..a239869ada 100644 --- a/dpdk/examples/l3fwd/main.c @@ -37885,6 +55794,22 @@ index 3bf28aec0c..a239869ada 100644 printf("\n"); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { +diff --git a/dpdk/examples/ntb/ntb_fwd.c b/dpdk/examples/ntb/ntb_fwd.c +index 95a6148c82..fcf0ec9b56 100644 +--- a/dpdk/examples/ntb/ntb_fwd.c ++++ b/dpdk/examples/ntb/ntb_fwd.c +@@ -1285,7 +1285,10 @@ main(int argc, char **argv) + eth_port_id = rte_eth_find_next(0); + + if (eth_port_id < RTE_MAX_ETHPORTS) { +- rte_eth_dev_info_get(eth_port_id, ðdev_info); ++ ret = rte_eth_dev_info_get(eth_port_id, ðdev_info); ++ if (ret) ++ rte_exit(EXIT_FAILURE, "Can't get info for port %u\n", eth_port_id); ++ + eth_pconf.rx_adv_conf.rss_conf.rss_hf &= + ethdev_info.flow_type_rss_offloads; + ret = rte_eth_dev_configure(eth_port_id, num_queues, diff --git a/dpdk/examples/packet_ordering/main.c b/dpdk/examples/packet_ordering/main.c index d2fd6f77e4..f839db9102 100644 --- a/dpdk/examples/packet_ordering/main.c @@ -38041,6 +55966,19 @@ index ce5c1efddf..3fc1b151d1 100644 while (isblank(*addrs)) addrs++; if (*addrs == '\0') { +diff --git a/dpdk/examples/vhost_blk/vhost_blk.c b/dpdk/examples/vhost_blk/vhost_blk.c +index 376f7b89a7..4dc99eb648 100644 +--- a/dpdk/examples/vhost_blk/vhost_blk.c ++++ b/dpdk/examples/vhost_blk/vhost_blk.c +@@ -776,7 +776,7 @@ vhost_blk_bdev_construct(const char *bdev_name, + bdev->data = rte_zmalloc(NULL, blk_cnt * blk_size, 0); + if (!bdev->data) { + fprintf(stderr, "No enough reserved huge memory for disk\n"); +- free(bdev); ++ rte_free(bdev); + return NULL; + } + diff --git a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c index 94bfbbaf78..5eddb47847 100644 --- a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c @@ -38112,6 +56050,19 @@ index cfebea09c7..e09bb97abb 100644 dev_id, queue_id, op, epfd, vec); return ret; } +diff --git a/dpdk/lib/bpf/bpf_convert.c b/dpdk/lib/bpf/bpf_convert.c +index d441be6663..cb400a4ffb 100644 +--- a/dpdk/lib/bpf/bpf_convert.c ++++ b/dpdk/lib/bpf/bpf_convert.c +@@ -556,7 +556,7 @@ rte_bpf_convert(const struct bpf_program *prog) + ret = bpf_convert_filter(prog->bf_insns, prog->bf_len, ebpf, &ebpf_len); + if (ret < 0) { + RTE_BPF_LOG(ERR, "%s: cannot convert cBPF to eBPF\n", __func__); +- free(prm); ++ rte_free(prm); + rte_errno = -ret; + return NULL; + } diff --git a/dpdk/lib/bpf/bpf_validate.c b/dpdk/lib/bpf/bpf_validate.c index 95b9ef99ef..da8d5f3deb 100644 --- a/dpdk/lib/bpf/bpf_validate.c @@ -38882,7 +56833,7 @@ index 10d02edde9..95dd41b818 100644 RTE_VERIFY(rc == 0); diff --git a/dpdk/lib/dmadev/rte_dmadev.c b/dpdk/lib/dmadev/rte_dmadev.c -index 4e5e420c82..5093c6e38b 100644 +index 4e5e420c82..a2e52cc8ff 100644 --- a/dpdk/lib/dmadev/rte_dmadev.c +++ b/dpdk/lib/dmadev/rte_dmadev.c @@ -158,15 +158,24 @@ static int @@ -38914,7 +56865,12 @@ index 4e5e420c82..5093c6e38b 100644 return 0; } -@@ -726,7 +735,7 @@ rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status * +@@ -722,11 +731,11 @@ rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status * + { + struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; + +- if (!rte_dma_is_valid(dev_id)) ++ if (!rte_dma_is_valid(dev_id) || status == NULL) return -EINVAL; if (vchan >= dev->data->dev_conf.nb_vchans) { @@ -38932,6 +56888,34 @@ index 4e5e420c82..5093c6e38b 100644 if (buf == NULL) return -ENOMEM; +diff --git a/dpdk/lib/eal/common/eal_common_dev.c b/dpdk/lib/eal/common/eal_common_dev.c +index 614ef6c9fc..bc53b2e28d 100644 +--- a/dpdk/lib/eal/common/eal_common_dev.c ++++ b/dpdk/lib/eal/common/eal_common_dev.c +@@ -550,16 +550,17 @@ rte_dev_event_callback_unregister(const char *device_name, + next = TAILQ_NEXT(event_cb, next); + + if (device_name != NULL && event_cb->dev_name != NULL) { +- if (!strcmp(event_cb->dev_name, device_name)) { +- if (event_cb->cb_fn != cb_fn || +- (cb_arg != (void *)-1 && +- event_cb->cb_arg != cb_arg)) +- continue; +- } ++ if (strcmp(event_cb->dev_name, device_name)) ++ continue; + } else if (device_name != NULL) { + continue; + } + ++ /* Remove only matching callback with arg */ ++ if (event_cb->cb_fn != cb_fn || ++ (cb_arg != (void *)-1 && event_cb->cb_arg != cb_arg)) ++ continue; ++ + /* + * if this callback is not executing right now, + * then remove it. diff --git a/dpdk/lib/eal/common/eal_common_fbarray.c b/dpdk/lib/eal/common/eal_common_fbarray.c index 2055bfa57d..253110360e 100644 --- a/dpdk/lib/eal/common/eal_common_fbarray.c @@ -39198,6 +57182,111 @@ index 1a7cf8e7b7..9fc2f7763a 100644 } return ret; } +diff --git a/dpdk/lib/eal/unix/meson.build b/dpdk/lib/eal/unix/meson.build +index cc7d67dd32..f1eb82e16a 100644 +--- a/dpdk/lib/eal/unix/meson.build ++++ b/dpdk/lib/eal/unix/meson.build +@@ -11,3 +11,8 @@ sources += files( + 'eal_unix_timer.c', + 'rte_thread.c', + ) ++ ++if is_freebsd or cc.has_function('pthread_attr_setaffinity_np', args: '-D_GNU_SOURCE', ++ prefix : '#include ') ++ cflags += '-DRTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP' ++endif +diff --git a/dpdk/lib/eal/unix/rte_thread.c b/dpdk/lib/eal/unix/rte_thread.c +index 36a21ab2f9..ac13829760 100644 +--- a/dpdk/lib/eal/unix/rte_thread.c ++++ b/dpdk/lib/eal/unix/rte_thread.c +@@ -17,6 +17,7 @@ struct eal_tls_key { + pthread_key_t thread_index; + }; + ++#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP + struct thread_start_context { + rte_thread_func thread_func; + void *thread_args; +@@ -26,6 +27,7 @@ struct thread_start_context { + int wrapper_ret; + bool wrapper_done; + }; ++#endif + + static int + thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri, +@@ -86,6 +88,7 @@ thread_map_os_priority_to_eal_priority(int policy, int os_pri, + return 0; + } + ++#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP + static void * + thread_start_wrapper(void *arg) + { +@@ -111,6 +114,7 @@ thread_start_wrapper(void *arg) + + return (void *)(uintptr_t)thread_func(thread_args); + } ++#endif + + int + rte_thread_create(rte_thread_t *thread_id, +@@ -124,6 +128,7 @@ rte_thread_create(rte_thread_t *thread_id, + .sched_priority = 0, + }; + int policy = SCHED_OTHER; ++#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP + struct thread_start_context ctx = { + .thread_func = thread_func, + .thread_args = args, +@@ -132,6 +137,7 @@ rte_thread_create(rte_thread_t *thread_id, + .wrapper_mutex = PTHREAD_MUTEX_INITIALIZER, + .wrapper_cond = PTHREAD_COND_INITIALIZER, + }; ++#endif + + if (thread_attr != NULL) { + ret = pthread_attr_init(&attr); +@@ -142,6 +148,16 @@ rte_thread_create(rte_thread_t *thread_id, + + attrp = &attr; + ++#ifdef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP ++ if (CPU_COUNT(&thread_attr->cpuset) > 0) { ++ ret = pthread_attr_setaffinity_np(attrp, sizeof(thread_attr->cpuset), ++ &thread_attr->cpuset); ++ if (ret != 0) { ++ RTE_LOG(DEBUG, EAL, "pthread_attr_setaffinity_np failed\n"); ++ goto cleanup; ++ } ++ } ++#endif + /* + * Set the inherit scheduler parameter to explicit, + * otherwise the priority attribute is ignored. +@@ -176,6 +192,14 @@ rte_thread_create(rte_thread_t *thread_id, + } + } + ++#ifdef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP ++ ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp, ++ (void *)(void *)thread_func, args); ++ if (ret != 0) { ++ RTE_LOG(DEBUG, EAL, "pthread_create failed\n"); ++ goto cleanup; ++ } ++#else /* !RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP */ + ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp, + thread_start_wrapper, &ctx); + if (ret != 0) { +@@ -191,6 +215,7 @@ rte_thread_create(rte_thread_t *thread_id, + + if (ret != 0) + rte_thread_join(*thread_id, NULL); ++#endif /* RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP */ + + cleanup: + if (attrp != NULL) diff --git a/dpdk/lib/eal/windows/eal_memory.c b/dpdk/lib/eal/windows/eal_memory.c index 31410a41fd..fd39155163 100644 --- a/dpdk/lib/eal/windows/eal_memory.c @@ -39221,6 +57310,19 @@ index 5fb1962ac7..e985a77d58 100644 'rte_windows.h', + 'sched.h', ) +diff --git a/dpdk/lib/eal/x86/include/rte_io.h b/dpdk/lib/eal/x86/include/rte_io.h +index 0e1fefdee1..5366e09c47 100644 +--- a/dpdk/lib/eal/x86/include/rte_io.h ++++ b/dpdk/lib/eal/x86/include/rte_io.h +@@ -24,7 +24,7 @@ __rte_x86_movdiri(uint32_t value, volatile void *addr) + { + asm volatile( + /* MOVDIRI */ +- ".byte 0x40, 0x0f, 0x38, 0xf9, 0x02" ++ ".byte 0x0f, 0x38, 0xf9, 0x02" + : + : "a" (value), "d" (addr)); + } diff --git a/dpdk/lib/eal/x86/rte_cycles.c b/dpdk/lib/eal/x86/rte_cycles.c index 69ed59b4f0..f147a5231d 100644 --- a/dpdk/lib/eal/x86/rte_cycles.c @@ -39423,7 +57525,7 @@ index b61dae849d..311beb17cb 100644 return NULL; } diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c -index 3858983fcc..b9d99ece15 100644 +index 3858983fcc..dfcdf76fee 100644 --- a/dpdk/lib/ethdev/rte_ethdev.c +++ b/dpdk/lib/ethdev/rte_ethdev.c @@ -724,7 +724,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) @@ -39551,7 +57653,43 @@ index 3858983fcc..b9d99ece15 100644 cap.max_nb_queues); return -EINVAL; } -@@ -6716,7 +6716,7 @@ rte_eth_ip_reassembly_capability_get(uint16_t port_id, +@@ -2823,6 +2823,12 @@ rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + ++#ifdef RTE_ETHDEV_DEBUG_TX ++ ret = eth_dev_validate_tx_queue(dev, queue_id); ++ if (ret != 0) ++ return ret; ++#endif ++ + if (*dev->dev_ops->tx_done_cleanup == NULL) + return -ENOTSUP; + +@@ -6556,13 +6562,19 @@ static void + eth_dev_adjust_nb_desc(uint16_t *nb_desc, + const struct rte_eth_desc_lim *desc_lim) + { ++ /* Upcast to uint32 to avoid potential overflow with RTE_ALIGN_CEIL(). */ ++ uint32_t nb_desc_32 = (uint32_t)*nb_desc; ++ + if (desc_lim->nb_align != 0) +- *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); ++ nb_desc_32 = RTE_ALIGN_CEIL(nb_desc_32, desc_lim->nb_align); + + if (desc_lim->nb_max != 0) +- *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); ++ nb_desc_32 = RTE_MIN(nb_desc_32, desc_lim->nb_max); ++ ++ nb_desc_32 = RTE_MAX(nb_desc_32, desc_lim->nb_min); + +- *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); ++ /* Assign clipped u32 back to u16. */ ++ *nb_desc = (uint16_t)nb_desc_32; + } + + int +@@ -6716,7 +6728,7 @@ rte_eth_ip_reassembly_capability_get(uint16_t port_id, } if (reassembly_capa == NULL) { @@ -39560,7 +57698,7 @@ index 3858983fcc..b9d99ece15 100644 return -EINVAL; } -@@ -6752,7 +6752,7 @@ rte_eth_ip_reassembly_conf_get(uint16_t port_id, +@@ -6752,7 +6764,7 @@ rte_eth_ip_reassembly_conf_get(uint16_t port_id, } if (conf == NULL) { @@ -39569,7 +57707,7 @@ index 3858983fcc..b9d99ece15 100644 return -EINVAL; } -@@ -6780,7 +6780,7 @@ rte_eth_ip_reassembly_conf_set(uint16_t port_id, +@@ -6780,7 +6792,7 @@ rte_eth_ip_reassembly_conf_set(uint16_t port_id, if (dev->data->dev_configured == 0) { RTE_ETHDEV_LOG(ERR, "Device with port_id=%u is not configured.\n" @@ -39990,7 +58128,7 @@ index af4b5ad388..4196164305 100644 } diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c -index 6db03adf04..1b83a55b5c 100644 +index 6db03adf04..bdcc3e3539 100644 --- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c @@ -293,14 +293,14 @@ rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, @@ -40080,6 +58218,15 @@ index 6db03adf04..1b83a55b5c 100644 return -rte_errno; } +@@ -2299,7 +2299,7 @@ rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, + for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) + dev_info->intr_queue[i] = i; + } else { +- if (!rxa_intr_queue(dev_info, rx_queue_id)) ++ if (!rxa_intr_queue(dev_info, rx_queue_id) && nb_rx_intr > 0) + dev_info->intr_queue[nb_rx_intr - 1] = + rx_queue_id; + } @@ -2445,7 +2445,7 @@ rxa_create(uint8_t id, uint8_t dev_id, RTE_DIM(default_rss_key)); @@ -40367,6 +58514,49 @@ index ec9b02455d..7fd9016ca7 100644 /* Scheduler type definitions */ #define RTE_SCHED_TYPE_ORDERED 0 +diff --git a/dpdk/lib/fib/dir24_8.c b/dpdk/lib/fib/dir24_8.c +index c739e92304..07c324743b 100644 +--- a/dpdk/lib/fib/dir24_8.c ++++ b/dpdk/lib/fib/dir24_8.c +@@ -526,8 +526,8 @@ dir24_8_create(const char *name, int socket_id, struct rte_fib_conf *fib_conf) + + snprintf(mem_name, sizeof(mem_name), "DP_%s", name); + dp = rte_zmalloc_socket(name, sizeof(struct dir24_8_tbl) + +- DIR24_8_TBL24_NUM_ENT * (1 << nh_sz), RTE_CACHE_LINE_SIZE, +- socket_id); ++ DIR24_8_TBL24_NUM_ENT * (1 << nh_sz) + sizeof(uint32_t), ++ RTE_CACHE_LINE_SIZE, socket_id); + if (dp == NULL) { + rte_errno = ENOMEM; + return NULL; +diff --git a/dpdk/lib/fib/trie.c b/dpdk/lib/fib/trie.c +index 09470e7287..ca1c2fe3bc 100644 +--- a/dpdk/lib/fib/trie.c ++++ b/dpdk/lib/fib/trie.c +@@ -46,8 +46,10 @@ static inline rte_fib6_lookup_fn_t + get_vector_fn(enum rte_fib_trie_nh_sz nh_sz) + { + #ifdef CC_TRIE_AVX512_SUPPORT +- if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0) || +- (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) ++ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0 || ++ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ) <= 0 || ++ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) <= 0 || ++ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512) + return NULL; + switch (nh_sz) { + case RTE_FIB6_TRIE_2B: +@@ -645,8 +647,8 @@ trie_create(const char *name, int socket_id, + + snprintf(mem_name, sizeof(mem_name), "DP_%s", name); + dp = rte_zmalloc_socket(name, sizeof(struct rte_trie_tbl) + +- TRIE_TBL24_NUM_ENT * (1 << nh_sz), RTE_CACHE_LINE_SIZE, +- socket_id); ++ TRIE_TBL24_NUM_ENT * (1 << nh_sz) + sizeof(uint32_t), ++ RTE_CACHE_LINE_SIZE, socket_id); + if (dp == NULL) { + rte_errno = ENOMEM; + return dp; diff --git a/dpdk/lib/graph/graph.c b/dpdk/lib/graph/graph.c index 26f0968a97..8ea2109645 100644 --- a/dpdk/lib/graph/graph.c @@ -40988,6 +59178,50 @@ index 7ecc021111..ba96521529 100644 * array of user data. This value is unique for this key, and is the same * value that was returned when the key was added. */ +diff --git a/dpdk/lib/hash/rte_thash.c b/dpdk/lib/hash/rte_thash.c +index 4ff567ee5a..a952006686 100644 +--- a/dpdk/lib/hash/rte_thash.c ++++ b/dpdk/lib/hash/rte_thash.c +@@ -160,6 +160,30 @@ thash_get_rand_poly(uint32_t poly_degree) + RTE_DIM(irreducible_poly_table[poly_degree])]; + } + ++static inline uint32_t ++get_rev_poly(uint32_t poly, int degree) ++{ ++ int i; ++ /* ++ * The implicit highest coefficient of the polynomial ++ * becomes the lowest after reversal. ++ */ ++ uint32_t rev_poly = 1; ++ uint32_t mask = (1 << degree) - 1; ++ ++ /* ++ * Here we assume "poly" argument is an irreducible polynomial, ++ * thus the lowest coefficient of the "poly" must always be equal to "1". ++ * After the reversal, this the lowest coefficient becomes the highest and ++ * it is omitted since the highest coefficient is implicitly determined by ++ * degree of the polynomial. ++ */ ++ for (i = 1; i < degree; i++) ++ rev_poly |= ((poly >> i) & 0x1) << (degree - i); ++ ++ return rev_poly & mask; ++} ++ + static struct thash_lfsr * + alloc_lfsr(struct rte_thash_ctx *ctx) + { +@@ -179,7 +203,7 @@ alloc_lfsr(struct rte_thash_ctx *ctx) + lfsr->state = rte_rand() & ((1 << lfsr->deg) - 1); + } while (lfsr->state == 0); + /* init reverse order polynomial */ +- lfsr->rev_poly = (lfsr->poly >> 1) | (1 << (lfsr->deg - 1)); ++ lfsr->rev_poly = get_rev_poly(lfsr->poly, lfsr->deg); + /* init proper rev_state*/ + lfsr->rev_state = lfsr->state; + for (i = 0; i <= lfsr->deg; i++) diff --git a/dpdk/lib/latencystats/rte_latencystats.c b/dpdk/lib/latencystats/rte_latencystats.c index 8985a377db..e47eac2cf8 100644 --- a/dpdk/lib/latencystats/rte_latencystats.c @@ -41046,6 +59280,19 @@ index 286b32b788..c266727a13 100644 memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1)); } +diff --git a/dpdk/lib/member/rte_member_ht.c b/dpdk/lib/member/rte_member_ht.c +index a85561b472..0d0376b264 100644 +--- a/dpdk/lib/member/rte_member_ht.c ++++ b/dpdk/lib/member/rte_member_ht.c +@@ -493,7 +493,7 @@ rte_member_add_ht(const struct rte_member_setsum *ss, + return ret; + + /* Random pick prim or sec for recursive displacement */ +- uint32_t select_bucket = (tmp_sig && 1U) ? prim_bucket : sec_bucket; ++ uint32_t select_bucket = (tmp_sig & 1U) ? prim_bucket : sec_bucket; + if (ss->cache) { + ret = evict_from_bucket(); + buckets[select_bucket].sigs[ret] = tmp_sig; diff --git a/dpdk/lib/mempool/rte_mempool_ops.c b/dpdk/lib/mempool/rte_mempool_ops.c index ae1d288f27..e871de9ec9 100644 --- a/dpdk/lib/mempool/rte_mempool_ops.c @@ -41760,9 +60007,20 @@ index 27d1808c71..3a96741622 100644 return 0; } diff --git a/dpdk/lib/pcapng/rte_pcapng.c b/dpdk/lib/pcapng/rte_pcapng.c -index f74ec939a9..7254defce7 100644 +index f74ec939a9..16485b27cb 100644 --- a/dpdk/lib/pcapng/rte_pcapng.c +++ b/dpdk/lib/pcapng/rte_pcapng.c +@@ -33,8 +33,8 @@ + /* conversion from DPDK speed to PCAPNG */ + #define PCAPNG_MBPS_SPEED 1000000ull + +-/* upper bound for section, stats and interface blocks */ +-#define PCAPNG_BLKSIZ 2048 ++/* upper bound for section, stats and interface blocks (in uint32_t) */ ++#define PCAPNG_BLKSIZ (2048 / sizeof(uint32_t)) + + /* Format of the capture file handle */ + struct rte_pcapng { @@ -128,7 +128,8 @@ pcapng_add_option(struct pcapng_option *popt, uint16_t code, { popt->code = code; @@ -41773,6 +60031,80 @@ index f74ec939a9..7254defce7 100644 return (struct pcapng_option *)((uint8_t *)popt + pcapng_optlen(len)); } +@@ -143,7 +144,7 @@ pcapng_section_block(rte_pcapng_t *self, + { + struct pcapng_section_header *hdr; + struct pcapng_option *opt; +- uint8_t buf[PCAPNG_BLKSIZ]; ++ uint32_t buf[PCAPNG_BLKSIZ]; + uint32_t len; + + len = sizeof(*hdr); +@@ -211,7 +212,7 @@ rte_pcapng_add_interface(rte_pcapng_t *self, uint16_t port, + struct pcapng_option *opt; + const uint8_t tsresol = 9; /* nanosecond resolution */ + uint32_t len; +- uint8_t buf[PCAPNG_BLKSIZ]; ++ uint32_t buf[PCAPNG_BLKSIZ]; + char ifname_buf[IF_NAMESIZE]; + char ifhw[256]; + uint64_t speed = 0; +@@ -329,7 +330,7 @@ rte_pcapng_write_stats(rte_pcapng_t *self, uint16_t port_id, + uint64_t start_time = self->offset_ns; + uint64_t sample_time; + uint32_t optlen, len; +- uint8_t buf[PCAPNG_BLKSIZ]; ++ uint32_t buf[PCAPNG_BLKSIZ]; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + +@@ -474,7 +475,7 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, + const char *comment) + { + struct pcapng_enhance_packet_block *epb; +- uint32_t orig_len, data_len, padding, flags; ++ uint32_t orig_len, pkt_len, padding, flags; + struct pcapng_option *opt; + uint64_t timestamp; + uint16_t optlen; +@@ -515,8 +516,8 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, + (md->ol_flags & RTE_MBUF_F_RX_RSS_HASH)); + + /* pad the packet to 32 bit boundary */ +- data_len = rte_pktmbuf_data_len(mc); +- padding = RTE_ALIGN(data_len, sizeof(uint32_t)) - data_len; ++ pkt_len = rte_pktmbuf_pkt_len(mc); ++ padding = RTE_ALIGN(pkt_len, sizeof(uint32_t)) - pkt_len; + if (padding > 0) { + void *tail = rte_pktmbuf_append(mc, padding); + +@@ -583,7 +584,7 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, + goto fail; + + epb->block_type = PCAPNG_ENHANCED_PACKET_BLOCK; +- epb->block_length = rte_pktmbuf_data_len(mc); ++ epb->block_length = rte_pktmbuf_pkt_len(mc); + + /* Interface index is filled in later during write */ + mc->port = port_id; +@@ -592,7 +593,7 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, + timestamp = rte_get_tsc_cycles(); + epb->timestamp_hi = timestamp >> 32; + epb->timestamp_lo = (uint32_t)timestamp; +- epb->capture_length = data_len; ++ epb->capture_length = pkt_len; + epb->original_length = orig_len; + + /* set trailer of block length */ +@@ -622,7 +623,7 @@ rte_pcapng_write_packets(rte_pcapng_t *self, + /* sanity check that is really a pcapng mbuf */ + epb = rte_pktmbuf_mtod(m, struct pcapng_enhance_packet_block *); + if (unlikely(epb->block_type != PCAPNG_ENHANCED_PACKET_BLOCK || +- epb->block_length != rte_pktmbuf_data_len(m))) { ++ epb->block_length != rte_pktmbuf_pkt_len(m))) { + rte_errno = EINVAL; + return -1; + } diff --git a/dpdk/lib/pipeline/rte_swx_pipeline_spec.c b/dpdk/lib/pipeline/rte_swx_pipeline_spec.c index 2bba0d0524..17419e7b85 100644 --- a/dpdk/lib/pipeline/rte_swx_pipeline_spec.c @@ -41808,6 +60140,114 @@ index 7b2ae0b650..c964332011 100644 goto error; } /* QEMU needs a delay after connection */ +diff --git a/dpdk/lib/power/power_acpi_cpufreq.c b/dpdk/lib/power/power_acpi_cpufreq.c +index 8b55f19247..d860a12a8c 100644 +--- a/dpdk/lib/power/power_acpi_cpufreq.c ++++ b/dpdk/lib/power/power_acpi_cpufreq.c +@@ -258,7 +258,11 @@ power_acpi_cpufreq_init(unsigned int lcore_id) + return -1; + } + +- pi->lcore_id = lcore_id; ++ if (power_get_lcore_mapped_cpu_id(lcore_id, &pi->lcore_id) < 0) { ++ RTE_LOG(ERR, POWER, "Cannot get CPU ID mapped for lcore %u\n", lcore_id); ++ return -1; ++ } ++ + /* Check and set the governor */ + if (power_set_governor_userspace(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to " +diff --git a/dpdk/lib/power/power_amd_pstate_cpufreq.c b/dpdk/lib/power/power_amd_pstate_cpufreq.c +index dbd9d2b3ee..ff63e91183 100644 +--- a/dpdk/lib/power/power_amd_pstate_cpufreq.c ++++ b/dpdk/lib/power/power_amd_pstate_cpufreq.c +@@ -376,7 +376,11 @@ power_amd_pstate_cpufreq_init(unsigned int lcore_id) + return -1; + } + +- pi->lcore_id = lcore_id; ++ if (power_get_lcore_mapped_cpu_id(lcore_id, &pi->lcore_id) < 0) { ++ RTE_LOG(ERR, POWER, "Cannot get CPU ID mapped for lcore %u\n", lcore_id); ++ return -1; ++ } ++ + /* Check and set the governor */ + if (power_set_governor_userspace(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to " +diff --git a/dpdk/lib/power/power_common.c b/dpdk/lib/power/power_common.c +index 1e09facb86..ee0c264c8d 100644 +--- a/dpdk/lib/power/power_common.c ++++ b/dpdk/lib/power/power_common.c +@@ -9,6 +9,7 @@ + + #include + #include ++#include + + #include "power_common.h" + +@@ -202,3 +203,24 @@ out: + + return ret; + } ++ ++int power_get_lcore_mapped_cpu_id(uint32_t lcore_id, uint32_t *cpu_id) ++{ ++ rte_cpuset_t lcore_cpus; ++ uint32_t cpu; ++ ++ lcore_cpus = rte_lcore_cpuset(lcore_id); ++ if (CPU_COUNT(&lcore_cpus) != 1) { ++ RTE_LOG(ERR, POWER, "Power library does not support lcore %u mapping to %u CPUs\n", ++ lcore_id, CPU_COUNT(&lcore_cpus)); ++ return -1; ++ } ++ ++ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) { ++ if (CPU_ISSET(cpu, &lcore_cpus)) ++ break; ++ } ++ *cpu_id = cpu; ++ ++ return 0; ++} +diff --git a/dpdk/lib/power/power_common.h b/dpdk/lib/power/power_common.h +index c1c7139276..b928df941f 100644 +--- a/dpdk/lib/power/power_common.h ++++ b/dpdk/lib/power/power_common.h +@@ -27,5 +27,6 @@ int open_core_sysfs_file(FILE **f, const char *mode, const char *format, ...) + int read_core_sysfs_u32(FILE *f, uint32_t *val); + int read_core_sysfs_s(FILE *f, char *buf, unsigned int len); + int write_core_sysfs_s(FILE *f, const char *str); ++int power_get_lcore_mapped_cpu_id(uint32_t lcore_id, uint32_t *cpu_id); + + #endif /* _POWER_COMMON_H_ */ +diff --git a/dpdk/lib/power/power_cppc_cpufreq.c b/dpdk/lib/power/power_cppc_cpufreq.c +index bb70f6ae52..add477c804 100644 +--- a/dpdk/lib/power/power_cppc_cpufreq.c ++++ b/dpdk/lib/power/power_cppc_cpufreq.c +@@ -36,7 +36,7 @@ + #define POWER_SYSFILE_SYS_MAX \ + "/sys/devices/system/cpu/cpu%u/cpufreq/cpuinfo_max_freq" + +-#define POWER_CPPC_DRIVER "cppc-cpufreq" ++#define POWER_CPPC_DRIVER "cppc_cpufreq" + #define BUS_FREQ 100000 + + enum power_state { +@@ -362,7 +362,11 @@ power_cppc_cpufreq_init(unsigned int lcore_id) + return -1; + } + +- pi->lcore_id = lcore_id; ++ if (power_get_lcore_mapped_cpu_id(lcore_id, &pi->lcore_id) < 0) { ++ RTE_LOG(ERR, POWER, "Cannot get CPU ID mapped for lcore %u\n", lcore_id); ++ return -1; ++ } ++ + /* Check and set the governor */ + if (power_set_governor_userspace(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to " diff --git a/dpdk/lib/power/power_intel_uncore.c b/dpdk/lib/power/power_intel_uncore.c index 688aebc4ee..be174dce44 100644 --- a/dpdk/lib/power/power_intel_uncore.c @@ -41847,11 +60287,55 @@ index 688aebc4ee..be174dce44 100644 RTE_LOG(ERR, POWER, "Too many available uncore frequencies: %d\n", num_uncore_freqs); goto out; +diff --git a/dpdk/lib/power/power_pstate_cpufreq.c b/dpdk/lib/power/power_pstate_cpufreq.c +index 5ca5f60bcd..fa9ef2aa8a 100644 +--- a/dpdk/lib/power/power_pstate_cpufreq.c ++++ b/dpdk/lib/power/power_pstate_cpufreq.c +@@ -564,7 +564,11 @@ power_pstate_cpufreq_init(unsigned int lcore_id) + return -1; + } + +- pi->lcore_id = lcore_id; ++ if (power_get_lcore_mapped_cpu_id(lcore_id, &pi->lcore_id) < 0) { ++ RTE_LOG(ERR, POWER, "Cannot get CPU ID mapped for lcore %u\n", lcore_id); ++ return -1; ++ } ++ + /* Check and set the governor */ + if (power_set_governor_performance(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to " diff --git a/dpdk/lib/power/rte_power_pmd_mgmt.c b/dpdk/lib/power/rte_power_pmd_mgmt.c -index 38f8384085..6f18ed0adf 100644 +index 38f8384085..20aa753c3a 100644 --- a/dpdk/lib/power/rte_power_pmd_mgmt.c +++ b/dpdk/lib/power/rte_power_pmd_mgmt.c -@@ -686,7 +686,7 @@ int +@@ -419,11 +419,12 @@ check_scale(unsigned int lcore) + { + enum power_management_env env; + +- /* only PSTATE and ACPI modes are supported */ ++ /* only PSTATE, AMD-PSTATE, ACPI and CPPC modes are supported */ + if (!rte_power_check_env_supported(PM_ENV_ACPI_CPUFREQ) && + !rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ) && +- !rte_power_check_env_supported(PM_ENV_AMD_PSTATE_CPUFREQ)) { +- RTE_LOG(DEBUG, POWER, "Neither ACPI nor PSTATE modes are supported\n"); ++ !rte_power_check_env_supported(PM_ENV_AMD_PSTATE_CPUFREQ) && ++ !rte_power_check_env_supported(PM_ENV_CPPC_CPUFREQ)) { ++ RTE_LOG(DEBUG, POWER, "Only ACPI, PSTATE, AMD-PSTATE, or CPPC modes are supported\n"); + return -ENOTSUP; + } + /* ensure we could initialize the power library */ +@@ -433,8 +434,8 @@ check_scale(unsigned int lcore) + /* ensure we initialized the correct env */ + env = rte_power_get_env(); + if (env != PM_ENV_ACPI_CPUFREQ && env != PM_ENV_PSTATE_CPUFREQ && +- env != PM_ENV_AMD_PSTATE_CPUFREQ) { +- RTE_LOG(DEBUG, POWER, "Neither ACPI nor PSTATE modes were initialized\n"); ++ env != PM_ENV_AMD_PSTATE_CPUFREQ && env != PM_ENV_CPPC_CPUFREQ) { ++ RTE_LOG(DEBUG, POWER, "Unable to initialize ACPI, PSTATE, AMD-PSTATE, or CPPC modes\n"); + return -ENOTSUP; + } + +@@ -686,7 +687,7 @@ int rte_power_pmd_mgmt_set_pause_duration(unsigned int duration) { if (duration == 0) { @@ -41860,7 +60344,7 @@ index 38f8384085..6f18ed0adf 100644 return -EINVAL; } pause_duration = duration; -@@ -709,7 +709,7 @@ rte_power_pmd_mgmt_set_scaling_freq_min(unsigned int lcore, unsigned int min) +@@ -709,7 +710,7 @@ rte_power_pmd_mgmt_set_scaling_freq_min(unsigned int lcore, unsigned int min) } if (min > scale_freq_max[lcore]) { @@ -41869,7 +60353,7 @@ index 38f8384085..6f18ed0adf 100644 return -EINVAL; } scale_freq_min[lcore] = min; -@@ -729,7 +729,7 @@ rte_power_pmd_mgmt_set_scaling_freq_max(unsigned int lcore, unsigned int max) +@@ -729,7 +730,7 @@ rte_power_pmd_mgmt_set_scaling_freq_max(unsigned int lcore, unsigned int max) if (max == 0) max = UINT32_MAX; if (max < scale_freq_min[lcore]) { @@ -41892,7 +60376,7 @@ index 474bdc9540..4f8897b639 100644 return -ENOMEM; diff --git a/dpdk/lib/rcu/rte_rcu_qsbr.c b/dpdk/lib/rcu/rte_rcu_qsbr.c -index a9f3d6cc98..41a44be4b9 100644 +index a9f3d6cc98..e46ce7958e 100644 --- a/dpdk/lib/rcu/rte_rcu_qsbr.c +++ b/dpdk/lib/rcu/rte_rcu_qsbr.c @@ -92,7 +92,7 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id) @@ -41904,6 +60388,29 @@ index a9f3d6cc98..41a44be4b9 100644 v->qsbr_cnt[thread_id].lock_cnt); id = thread_id & __RTE_QSBR_THRID_MASK; +@@ -104,11 +104,11 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id) + /* Check if the thread is already registered */ + old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i), + rte_memory_order_relaxed); +- if (old_bmap & 1UL << id) ++ if (old_bmap & RTE_BIT64(id)) + return 0; + + do { +- new_bmap = old_bmap | (1UL << id); ++ new_bmap = old_bmap | RTE_BIT64(id); + success = rte_atomic_compare_exchange_strong_explicit( + __RTE_QSBR_THRID_ARRAY_ELM(v, i), + &old_bmap, new_bmap, +@@ -117,7 +117,7 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id) + if (success) + rte_atomic_fetch_add_explicit(&v->num_threads, + 1, rte_memory_order_relaxed); +- else if (old_bmap & (1UL << id)) ++ else if (old_bmap & RTE_BIT64(id)) + /* Someone else registered this thread. + * Counter should not be incremented. + */ @@ -144,7 +144,7 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id) return 1; } @@ -41913,6 +60420,47 @@ index a9f3d6cc98..41a44be4b9 100644 v->qsbr_cnt[thread_id].lock_cnt); id = thread_id & __RTE_QSBR_THRID_MASK; +@@ -156,11 +156,11 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id) + /* Check if the thread is already unregistered */ + old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i), + rte_memory_order_relaxed); +- if (!(old_bmap & (1UL << id))) ++ if (!(old_bmap & RTE_BIT64(id))) + return 0; + + do { +- new_bmap = old_bmap & ~(1UL << id); ++ new_bmap = old_bmap & ~RTE_BIT64(id); + /* Make sure any loads of the shared data structure are + * completed before removal of the thread from the list of + * reporting threads. +@@ -173,7 +173,7 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id) + if (success) + rte_atomic_fetch_sub_explicit(&v->num_threads, + 1, rte_memory_order_relaxed); +- else if (!(old_bmap & (1UL << id))) ++ else if (!(old_bmap & RTE_BIT64(id))) + /* Someone else unregistered this thread. + * Counter should not be incremented. + */ +@@ -234,7 +234,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v) + t = rte_ctz64(bmap); + fprintf(f, "%u ", id + t); + +- bmap &= ~(1UL << t); ++ bmap &= ~RTE_BIT64(t); + } + } + +@@ -261,7 +261,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v) + rte_atomic_load_explicit( + &v->qsbr_cnt[id + t].lock_cnt, + rte_memory_order_relaxed)); +- bmap &= ~(1UL << t); ++ bmap &= ~RTE_BIT64(t); + } + } + diff --git a/dpdk/lib/rcu/rte_rcu_qsbr.h b/dpdk/lib/rcu/rte_rcu_qsbr.h index 5979fb0efb..8bda00e911 100644 --- a/dpdk/lib/rcu/rte_rcu_qsbr.h @@ -42165,8 +60713,21 @@ index 6315904c8e..7816fb11ac 100644 +void fdset_pipe_notify_sync(struct fdset *fdset); #endif +diff --git a/dpdk/lib/vhost/rte_vhost.h b/dpdk/lib/vhost/rte_vhost.h +index db92f05344..c6dba67a67 100644 +--- a/dpdk/lib/vhost/rte_vhost.h ++++ b/dpdk/lib/vhost/rte_vhost.h +@@ -613,6 +613,8 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num); + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice. + * + * Set the maximum number of queue pairs supported by the device. ++ * The value set is ignored for Vhost-user backends. It is only taken into ++ * account with VDUSE backends. + * + * @param path + * The vhost-user socket file path diff --git a/dpdk/lib/vhost/socket.c b/dpdk/lib/vhost/socket.c -index 5882e44176..0b95c54c5b 100644 +index 5882e44176..ffb8518e74 100644 --- a/dpdk/lib/vhost/socket.c +++ b/dpdk/lib/vhost/socket.c @@ -93,6 +93,7 @@ static struct vhost_user vhost_user = { @@ -42177,8 +60738,26 @@ index 5882e44176..0b95c54c5b 100644 .num = 0 }, .vsocket_cnt = 0, +@@ -864,6 +865,17 @@ rte_vhost_driver_set_max_queue_num(const char *path, uint32_t max_queue_pairs) + goto unlock_exit; + } + ++ /* ++ * This is only useful for VDUSE for which number of virtqueues is set ++ * by the backend. For Vhost-user, the number of virtqueues is defined ++ * by the frontend. ++ */ ++ if (!vsocket->is_vduse) { ++ VHOST_LOG_CONFIG(path, DEBUG, "Keeping %u max queue pairs for Vhost-user backend\n", ++ VHOST_MAX_QUEUE_PAIRS); ++ goto unlock_exit; ++ } ++ + vsocket->max_queue_pairs = max_queue_pairs; + + unlock_exit: diff --git a/dpdk/lib/vhost/vdpa.c b/dpdk/lib/vhost/vdpa.c -index 219eef879c..ce4fb09859 100644 +index 219eef879c..f9730d0685 100644 --- a/dpdk/lib/vhost/vdpa.c +++ b/dpdk/lib/vhost/vdpa.c @@ -19,6 +19,7 @@ @@ -42197,7 +60776,15 @@ index 219eef879c..ce4fb09859 100644 { struct virtio_net *dev = get_device(vid); uint16_t idx, idx_m, desc_id; -@@ -193,17 +193,21 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) +@@ -174,6 +174,7 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) + idx = vq->used->idx; + idx_m = s_vring->used->idx; + ret = (uint16_t)(idx_m - idx); ++ vq->used->flags = s_vring->used->flags; + + while (idx != idx_m) { + /* copy used entry, used ring logging is not covered here */ +@@ -193,17 +194,21 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) if (unlikely(nr_descs > vq->size)) return -1; @@ -42219,7 +60806,7 @@ index 219eef879c..ce4fb09859 100644 if (unlikely(!idesc)) return -1; -@@ -220,9 +224,12 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) +@@ -220,9 +225,12 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) if (unlikely(nr_descs-- == 0)) goto fail; desc = desc_ring[desc_id]; @@ -42312,7 +60899,7 @@ index 9bf5ef67b9..7b22281815 100644 } diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c -index e36312181a..f8e42dd619 100644 +index e36312181a..5b6c90437c 100644 --- a/dpdk/lib/vhost/vhost_user.c +++ b/dpdk/lib/vhost/vhost_user.c @@ -1799,6 +1799,7 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, @@ -42333,8 +60920,17 @@ index e36312181a..f8e42dd619 100644 return RTE_VHOST_MSG_RESULT_REPLY; } +@@ -2325,7 +2328,7 @@ vhost_user_set_log_base(struct virtio_net **pdev, + * mmap from 0 to workaround a hugepage mmap bug: mmap will + * fail when offset is not page size aligned. + */ +- addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); ++ addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, off); + alignment = get_blk_size(fd); + close(fd); + if (addr == MAP_FAILED) { diff --git a/dpdk/lib/vhost/virtio_net.c b/dpdk/lib/vhost/virtio_net.c -index 8af20f1487..6d53ff932d 100644 +index 8af20f1487..49c2052eba 100644 --- a/dpdk/lib/vhost/virtio_net.c +++ b/dpdk/lib/vhost/virtio_net.c @@ -1696,6 +1696,17 @@ virtio_dev_rx_packed(struct virtio_net *dev, @@ -42407,7 +61003,7 @@ index 8af20f1487..6d53ff932d 100644 - goto out; + if (unlikely(!vq->access_ok)) { + vhost_user_iotlb_rd_unlock(vq); -+ rte_rwlock_read_unlock(&vq->access_lock); ++ rte_rwlock_write_unlock(&vq->access_lock); + + virtio_dev_vring_translate(dev, vq); + goto out_no_unlock; @@ -42423,7 +61019,17 @@ index 8af20f1487..6d53ff932d 100644 return nb_tx; } -@@ -3083,7 +3104,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2810,6 +2831,9 @@ vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr, + */ + uint16_t csum = 0, off; + ++ if (hdr->csum_start >= rte_pktmbuf_pkt_len(m)) ++ return; ++ + if (rte_raw_cksum_mbuf(m, hdr->csum_start, + rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0) + return; +@@ -3083,7 +3107,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, { uint16_t i; uint16_t avail_entries; @@ -42431,7 +61037,7 @@ index 8af20f1487..6d53ff932d 100644 static bool allocerr_warned; /* -@@ -3122,11 +3142,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -3122,11 +3145,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, update_shadow_used_ring_split(vq, head_idx, 0); @@ -42444,7 +61050,7 @@ index 8af20f1487..6d53ff932d 100644 buf_len -= dev->vhost_hlen; -@@ -3143,8 +3160,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -3143,8 +3163,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, buf_len, mbuf_pool->name); allocerr_warned = true; } @@ -42453,7 +61059,7 @@ index 8af20f1487..6d53ff932d 100644 break; } -@@ -3155,27 +3170,21 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -3155,27 +3173,21 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n"); allocerr_warned = true; } @@ -42486,7 +61092,7 @@ index 8af20f1487..6d53ff932d 100644 } __rte_noinline -@@ -3581,11 +3590,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, +@@ -3581,11 +3593,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, vhost_user_iotlb_rd_lock(vq); @@ -42505,7 +61111,7 @@ index 8af20f1487..6d53ff932d 100644 /* * Construct a RARP broadcast packet, and inject it to the "pkts" -@@ -3646,6 +3657,7 @@ out_access_unlock: +@@ -3646,6 +3660,7 @@ out_access_unlock: if (unlikely(rarp_mbuf != NULL)) count += 1; @@ -42513,7 +61119,7 @@ index 8af20f1487..6d53ff932d 100644 return count; } -@@ -4196,11 +4208,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id, +@@ -4196,11 +4211,14 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id, vhost_user_iotlb_rd_lock(vq); @@ -42533,7 +61139,7 @@ index 8af20f1487..6d53ff932d 100644 /* * Construct a RARP broadcast packet, and inject it to the "pkts" -@@ -4266,5 +4281,6 @@ out_access_unlock: +@@ -4266,5 +4284,6 @@ out_access_unlock: if (unlikely(rarp_mbuf != NULL)) count += 1; diff --git a/SPECS/openvswitch3.3.spec b/SPECS/openvswitch3.3.spec index ccf6c75..52873e2 100644 --- a/SPECS/openvswitch3.3.spec +++ b/SPECS/openvswitch3.3.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 3.3.0 -Release: 91%{?dist} +Release: 92%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -769,6 +769,236 @@ exit 0 %endif %changelog +* Tue Feb 18 2025 Open vSwitch CI - 3.3.0-92 +- Merging dpdk subtree [RH git: 9bc783a662] + Commit list: + 0835a3db48 Merge tag 'v23.11.3' into 23.11 + c66daa12e7 version: 23.11.3 + b8e7797c11 net/virtio: fix Rx checksum calculation + 39bcde2d05 Revert "test/bonding: fix loop on members" () + 7715edd977 power: fix log message when checking lcore ID + 425f5c7785 log: remove per line log helper + 67b9d0a29a version: 23.11.3-rc1 + c6222d018c rcu: fix implicit conversion in bit shift + aca2600bac devtools: fix check of multiple commits fixed at once + 3f0cc8e583 doc: correct definition of stats per queue feature + c56601621d app/testpmd: avoid potential outside of array reference + d04a9ee164 app/testpmd: remove redundant policy action condition + 8a40df7196 test/eal: fix lcore check + efa612cc5a test/eal: fix loop coverage for alignment macros + cdb7df2917 test/event: avoid duplicate initialization + d7e1948e49 test/security: fix IPv6 extension loop + 24327bf071 test/bonding: fix MAC address comparison + cdf2912166 test/bonding: fix loop on members + 3d235f9cd4 pcapng: avoid potential unaligned data + 4e8254bf88 net/txgbe: fix a mass of interrupts + e6339f0cab vhost: fix deadlock in Rx async path + 2ea75ee333 net/mlx5: fix shared Rx queue control release + 8d25cec04e app/testpmd: fix aged flow destroy + e7a3a7d8c4 member: fix choice of bucket for displacement + bc468700b4 app/procinfo: fix leak on exit + e5180d0546 net/e1000/base: fix fallthrough in switch + 545d0c7dc5 common/dpaax/caamflib: enable fallthrough warnings + 31057c4fa6 net/i40e: check register read for outer VLAN + db0eb33a6a net/iavf: add segment-length check to Tx prep + 347fa26942 bus/dpaa: fix lock condition during error handling + 3f018be5c5 net/mlx5: fix miniCQEs number calculation + 56ed7562cc net/mlx5: fix Rx queue reference count in flushing flows + 2168fb3f24 net/mlx5: fix default RSS flows creation order + 12bdfdbf01 common/mlx5: fix misalignment + c86614bf2e net/mlx5: fix Rx queue control management + 8ef65f53eb net/mlx5: fix counter query loop getting stuck + 847c1bf789 net/mlx5/hws: fix allocation of STCs + 2f5821b3e7 net/mlx5: fix shared queue port number in vector Rx + 2f4b5d1426 common/mlx5: fix error CQE handling for 128 bytes CQE + 7f6b403c06 net/dpaa2: remove unnecessary check for null before free + 366c6b75e2 eventdev: fix possible array underflow/overflow + dd6f41e09e examples/l2fwd-event: fix spinlock handling + c761f2a838 net/cnxk: fix build on Ubuntu 24.04 + 2ac522b91b common/cnxk: fix build on Ubuntu 24.04 + bacffdd59e net/bnx2x: fix duplicate branch + fa00f7eb99 net/bnx2x: fix possible infinite loop at startup + 8080a3eb46 net/bnx2x: fix always true expression + 324925feea net/bnx2x: remove dead conditional + 5389d19ae9 net/bnxt: fix bad action offset in Tx BD + 17c6715f6c net/bnxt: fix TCP and UDP checksum flags + 4c1ae73212 net/bnxt: fix reading SFF-8436 SFP EEPROMs + 3b94bef6cd net/bnxt/tf_core: fix slice count in case of HA entry move + d0c86aba09 net/bnxt/tf_core: fix Thor TF EM key size check + 9df7f2642d net/bnxt/tf_core: fix TCAM manager data corruption + 31a6d07adb net/bnxt/tf_core: fix WC TCAM multi-slice delete + 9e0f8e220f net/cpfl: fix forwarding to physical port + ee03528a9d net/igc: fix Rx buffers when timestamping enabled + 57a39baf83 crypto/qat: fix ECDSA session handling + fe1d9398be crypto/qat: fix modexp/inv length + 378e2300c1 test/crypto: fix synchronous API calls + bbfd6cdd6b crypto/openssl: fix potential string overflow + 3b1fe8a417 baseband/acc: fix ring memory allocation + 8ee3e2a2ab event/octeontx: fix possible integer overflow + f388f8a755 net/hns3: fix fully use hardware flow director table + 6603e0a887 net/hns3: fix error code for repeatedly create counter + f9de7c177b net/vmxnet3: support larger MTU with version 6 + 728b42e03a net/vmxnet3: fix potential out of bounds stats access + 3490d4e57c net/ngbe: restrict configuration of VLAN strip offload + 7a8481b344 net/ngbe: fix interrupt lost in legacy or MSI mode + a00742dc65 net/ngbe: reconfigure more MAC Rx registers + d387ad6d3a net/ngbe: fix driver load bit to inform firmware + c8a526e4aa net/txgbe: fix driver load bit to inform firmware + b636e174f9 net/txgbe: remove outer UDP checksum capability + 5efb2e5375 net/txgbe: fix VF-PF mbox interrupt + 9028f6687e net/txgbe: fix SWFW mbox + 3763c93479 net/hns3: remove ROH devices + 45b60a7435 net/vmxnet3: fix crash after configuration failure + c6ad6ce36d net/netvsc: force Tx VLAN offload on 801.2Q packet + dcc46f0e5f examples/ntb: check info query return + f984d9801f test/bonding: remove redundant info query + 50aa17be6d examples/l3fwd: fix read beyond boundaries + 1ad8946112 examples/l3fwd-power: fix options parsing overflow + 57735df5fa net/dpaa2: fix memory corruption in TM + 690a4466f4 bus/fslmc: fix Coverity warnings in QBMAN + 3a06ee1e70 app/dumpcap: remove unused struct array + 67c2018ae1 net/mlx5: fix indirect list flow action callback invocation + fc9a001f1d net/mlx5: fix reported Rx/Tx descriptor limits + 8d1138af4d net/mlx5: fix SWS meter state initialization + c5b4c3ecad net/mlx5: fix non-template flow action validation + b6ca715f56 net/mlx5: fix SQ flow item size + 1fca4f8bf1 net/mlx5/hws: fix range definer error recovery + ffaa641b8f net/mlx5: fix GRE flow item translation for root table + 5eac1e6ff0 net/mlx5: fix memory leak in metering + 158ea9b655 eal/unix: optimize thread creation + 53b225e96b net/mlx5: fix Tx tracing to use single clock source + 9af26e2cde net/mlx5: fix real time counter reading from PCI BAR + 367c6c9c84 net/mlx5: fix trace script for multiple burst completion + e594df339a crypto/openssl: fix 3DES-CTR with big endian CPUs + 5301879859 common/cnxk: fix double free of flow aging resources + e26531c225 net/mvneta: fix possible out-of-bounds write + d30bef9034 net/ixgbe: fix link status delay on FreeBSD + d366bfb9c8 net/ice: detect stopping a flow director queue twice + 78b0870687 net/hns3: register VLAN flow match mode parameter + 8d0ca45d12 net/hns3: restrict tunnel flow rule to one header + 379d498833 net/netvsc: fix using Tx queue higher than Rx queues + c7073bd24e 23.11.3-rc1 + 45d48356e5 net/ionic: fix build with Fedora Rawhide + 1afa4c7c5e power: fix mapped lcore ID + cdeda6ffe8 net/gve/base: fix build with Fedora Rawhide + 8844b427e8 dmadev: fix potential null pointer access + a0607f5fb0 net/nfp: do not set IPv6 flag in transport mode + d21d489fad net/nfp: notify flower firmware about PF speed + 886b99e9fa hash: fix thash LFSR initialization + 1f89198cfb build: remove version check on compiler links function + 44ba0b17ea net/mlx5: fix flex item header length field translation + 0645f6587a net/mlx5: fix non full word sample fields in flex item + 39c058f69c net/mlx5: fix next protocol validation after flex item + 07b84c0ed3 app/testpmd: remove flex item init command leftover + be6e70f4b0 net/mlx5: fix number of supported flex parsers + 3f7bd80b7e net/mlx5: fix flex item tunnel mode + 01d6deac1d net/mlx5: add flex item query for tunnel mode + 0a64d025f6 net/mlx5/hws: fix flex item as tunnel header + 57d7ef4bd1 net/mlx5: workaround list management of Rx queue control + c5b4422ac6 net/iavf: preserve MAC address with i40e PF Linux driver + 55b382e481 net/ice/base: fix VLAN replay after reset + bf928774cc net/ice/base: add bounds check + 5325a17d8f net/pcap: fix blocking Rx + 23918a6a09 net/nfp: fix pause frame setting check + 732636cea5 net/nfp: fix link change return value + fe1c8a18d1 net/hns3: verify reset type from firmware + 1660095be0 ethdev: verify queue ID in Tx done cleanup + 80d2fcc130 net/tap: restrict maximum number of MP FDs + a228e595c1 net/memif: fix buffer overflow in zero copy Rx + 1766e0a61a net/gve: add IO memory barriers before reading descriptors + d4bf7fd4b3 net/gve: fix refill logic causing memory corruption + e6a3bb2869 net/nfp: fix representor port link status update + be3892c592 net/nfp: fix type declaration of some variables + b6bf4f5532 net/gve: always attempt Rx refill on DQ + 47d8a13a51 net/gve: fix mbuf allocation memory leak for DQ Rx + 07fb7df112 net/dpaa: fix reallocate mbuf handling + bc069f0864 bus/dpaa: fix the fman details status + c9c31ad42f bus/dpaa: fix VSP for 1G fm1-mac9 and 10 + bc23744d31 net/dpaa: fix typecasting channel ID + d8653104f8 bus/dpaa: fix PFDRs leaks due to FQRNIs + aaf4108fc3 ethdev: fix overflow in descriptor count + 07e7664545 net/hns3: fix dump counter of registers + 9691b735d0 net/hns3: remove some basic address dump + 423cac5f35 net/ena: revert redefining memcpy + a050ba7ffe net/mana: support rdma-core via pkg-config + 43526bd598 net/pcap: set live interface as non-blocking + d5a5ea31bb app/testpmd: remove unnecessary cast + d82a275bd9 net/tap: avoid memcpy with null argument + 661aa0b6e7 net/gve: fix Tx for chained mbuf + 8a558580d8 net/gve: fix queue setup and stop + e6295c5087 common/idpf: fix AVX-512 pointer copy on 32-bit + 19ba29791a net/iavf: fix AVX-512 pointer copy on 32-bit + 5bbf365cc9 net/ice: fix AVX-512 pointer copy on 32-bit + ea5a85fc65 net/i40e: fix AVX-512 pointer copy on 32-bit + ef1ec2ea1e net/iavf: delay VF reset command + 066aae50ee net/i40e/base: fix loop bounds + abd0416c81 net/i40e/base: fix unchecked return value + fc6a0dc44c net/i40e/base: fix repeated register dumps + 59d2c7c064 net/i40e/base: fix DDP loading with reserved track ID + e3abeff288 net/i40e/base: fix blinking X722 with X557 PHY + 190f37ff8e net/i40e/base: add missing X710TL device check + a9e11044c0 net/i40e/base: fix misleading debug logs and comments + 915830f032 net/i40e/base: fix setting flags in init function + fa2ad7b33c net/ixgbe/base: fix unchecked return value + ce72043138 net/ice/base: fix iteration of TLVs in Preserved Fields Area + 97b84ba5e9 net/ice/base: fix link speed for 200G + 49c494431a net/cpfl: fix parsing protocol ID mask field + 21d264149c net/iavf: fix crash when link is unstable + e4a7cb42dc net/cpfl: add checks for flow action types + c93b6d6b2d net/e1000: fix link status crash in secondary process + ac3339a7ce fib: fix AVX512 lookup + bc578c07f0 vhost: restrict set max queue pair API to VDUSE + 1048dc138b net/virtio-user: reset used index counter + 13406e37f9 vdpa/nfp: fix reconfiguration + fe6e2ce164 vdpa/nfp: fix hardware initialization + 75cf78c0af vdpa: update used flags in used ring relay + 6e3cb080ec vhost: fix offset while mapping log base address + b9c33e6227 baseband/acc: fix soft output bypass RM + 015973002b baseband/acc: fix access to deallocated mem + 149d6cee55 common/cnxk: fix IRQ reconfiguration + 2151056d7a common/cnxk: fix base log level + 9193f9aaec event/cnxk: fix OOP handling in event mode + da82470018 net/cnxk: fix OOP handling for inbound packets + c79d3d3f9e common/cnxk: fix CPT HW word size for outbound SA + b4d9080615 common/cnxk: fix inline CTX write + 7ad2841fe8 common/cnxk: fix MAC address change with active VF + b07c1fbe16 event/cnxk: fix Rx timestamp handling + 47d7010639 net/cnxk: fix Rx offloads to handle timestamp + 44a397a33b net/cnxk: fix Rx timestamp handling for VF + 181e17a8ce ml/cnxk: fix handling of TVM model I/O + 0f68a40858 app/dumpcap: fix handling of jumbo frames + 92040c569e pcapng: fix handling of chained mbufs + 11ac84c58a fib6: add runtime checks in AVX512 lookup + f9cb3c9d11 power: enable CPPC + 2b19a7499a net/nfb: fix use after free + 037b235bde examples/vhost: fix free function mismatch + 0fc3555488 raw/ifpga: fix free function mismatch in interrupt config + 57d0cbadba raw/ifpga/base: fix use after free + 6ed76c1fb7 net/sfc: fix use after free in debug logs + 3e0f595521 net/nfp: fix double free in flow destroy + 9667b34853 net/e1000: fix use after free in filter flush + c24dc6acb5 net/cpfl: fix invalid free in JSON parser + e794faf1e5 net/cnxk: fix use after free in mempool create + d8502a35fa event/cnxk: fix free function mismatch in port config + 5fe3dbe571 dma/idxd: fix free function mismatch in device probe + c879fa42df crypto/bcmfs: fix free function mismatch + 02197bc242 common/idpf: fix use after free in mailbox init + 10f467409f common/qat: fix use after free in device probe + bd33ab9160 baseband/la12xx: fix use after free in modem config + c890f365b6 bpf: fix free function mismatch if convert fails + 77d2cb36a6 examples/ipsec-secgw: fix dequeue count from cryptodev + 023d3c4a2b crypto/scheduler: fix session size computation + 16c50a2885 dev: fix callback lookup when unregistering device + a5680bcdb4 common/dpaax/caamflib: fix PDCP SNOW-ZUC watchdog + 26befc8aee crypto/dpaa2_sec: fix memory leak + 97d5f9eedb examples/eventdev: fix queue crash with generic pipeline + 2d4b832135 test/event: fix target event queue + 0786b20a56 test/event: fix schedule type + 058c13be8d eal/x86: fix 32-bit write combining store + e56ba1eea6 drivers: remove redundant newline from logs + 1d5ac7180a bus/vdev: revert fix devargs in secondary process + + * Mon Feb 17 2025 Open vSwitch CI - 3.3.0-91 - Merging upstream branch-3.3 [RH git: b01c4cda51] Commit list: