From 644ec2c0815cfc5dc697ca803e619a3baad55121 Mon Sep 17 00:00:00 2001 From: Open vSwitch CI Date: Feb 18 2025 20:04:56 +0000 Subject: Import openvswitch3.1-3.1.0-148 from Fast DataPath --- diff --git a/SOURCES/openvswitch-3.1.0.patch b/SOURCES/openvswitch-3.1.0.patch index 0920b38..f086db3 100644 --- a/SOURCES/openvswitch-3.1.0.patch +++ b/SOURCES/openvswitch-3.1.0.patch @@ -20720,7 +20720,7 @@ index 82d83f4030..c08f6ae827 100644 name: ${{ steps.get_keys.outputs.logs }} path: | diff --git a/dpdk/.mailmap b/dpdk/.mailmap -index 75884b6fe2..b0d505e36d 100644 +index 75884b6fe2..bef76e5fd9 100644 --- a/dpdk/.mailmap +++ b/dpdk/.mailmap @@ -2,7 +2,7 @@ Aakash Sasidharan @@ -20786,7 +20786,15 @@ index 75884b6fe2..b0d505e36d 100644 Baruch Siach Bassam Zaid AlKilani Beilei Xing -@@ -166,7 +173,9 @@ Bin Huang +@@ -159,6 +166,7 @@ Bert van Leeuwen + Bhagyada Modali + Bharat Mota + Bill Hong ++Bill Xiang + Billy McFall + Billy O'Mahony + Bing Zhao +@@ -166,7 +174,9 @@ Bin Huang Bin Zheng Björn Töpel Bo Chen @@ -20796,7 +20804,7 @@ index 75884b6fe2..b0d505e36d 100644 Boris Pismenny Brandon Lo Brendan Ryan -@@ -195,6 +204,7 @@ Chaoyong He +@@ -195,6 +205,7 @@ Chaoyong He Chao Zhu Charles Brett Charles Myers @@ -20804,15 +20812,16 @@ index 75884b6fe2..b0d505e36d 100644 Chas Williams <3chas3@gmail.com> Chenbo Xia Chengchang Tang -@@ -206,6 +216,7 @@ Cheng Liu +@@ -206,6 +217,8 @@ Cheng Liu Cheng Peng Chengwen Feng Chenmin Sun +Chenming Chang ++Chenxingyu Wang Chenxu Di Cheryl Houser Chinh T Cao -@@ -295,6 +306,8 @@ Deepak Khandelwal +@@ -295,6 +308,8 @@ Deepak Khandelwal Deepak Kumar Jain Deirdre O'Connor Dekel Peled @@ -20821,7 +20830,7 @@ index 75884b6fe2..b0d505e36d 100644 Dennis Marinus Derek Chickles Des O Dea -@@ -338,12 +351,14 @@ Dzmitry Sautsa +@@ -338,12 +353,14 @@ Dzmitry Sautsa Ed Czeck Eduard Serra Edward Makarov @@ -20836,7 +20845,7 @@ index 75884b6fe2..b0d505e36d 100644 Emma Finn Emma Kenny Emmanuel Roullit -@@ -371,13 +386,17 @@ Farah Smith +@@ -371,13 +388,17 @@ Farah Smith Fei Chen Feifei Wang Fei Qin @@ -20854,7 +20863,7 @@ index 75884b6fe2..b0d505e36d 100644 Forrest Shi Francesco Santoro Francis Kelly -@@ -455,6 +474,7 @@ Hanoch Haim +@@ -455,6 +476,7 @@ Hanoch Haim Hanumanth Pothula Hao Chen Hao Wu @@ -20862,7 +20871,7 @@ index 75884b6fe2..b0d505e36d 100644 Hari Kumar Vemula Harini Ramakrishnan Hariprasad Govindharajan -@@ -474,6 +494,7 @@ Helin Zhang +@@ -474,6 +496,7 @@ Helin Zhang Hemant Agrawal Heng Ding Hengjian Zhang @@ -20870,7 +20879,7 @@ index 75884b6fe2..b0d505e36d 100644 Heng Wang Henning Schild Henry Cai -@@ -488,6 +509,7 @@ Hideyuki Yamashita +@@ -488,6 +511,7 @@ Hideyuki Yamashita Hiroki Shirokura Hiroshi Shimamoto Hiroyuki Mikita @@ -20878,7 +20887,7 @@ index 75884b6fe2..b0d505e36d 100644 Hongbo Zheng Hongjun Ni Hongzhi Guo -@@ -524,6 +546,7 @@ Ilya Maximets +@@ -524,6 +548,7 @@ Ilya Maximets Ilya V. Matveychikov Ilyes Ben Hamouda Intiyaz Basha @@ -20886,7 +20895,7 @@ index 75884b6fe2..b0d505e36d 100644 Itsuro Oda Ivan Boule Ivan Dyukov -@@ -601,6 +624,7 @@ Jie Liu +@@ -601,6 +626,7 @@ Jie Liu Jie Pan Jie Wang Jie Zhou @@ -20894,7 +20903,7 @@ index 75884b6fe2..b0d505e36d 100644 Jijiang Liu Jilei Chen Jim Harris -@@ -615,7 +639,8 @@ Jin Liu +@@ -615,7 +641,8 @@ Jin Liu Jin Yu Jiri Slaby Job Abraham @@ -20904,7 +20913,7 @@ index 75884b6fe2..b0d505e36d 100644 Joey Xing Johan Faltstrom Johan Källström -@@ -634,9 +659,11 @@ John McNamara +@@ -634,9 +661,11 @@ John McNamara John Miller John OLoughlin John Ousterhout @@ -20917,7 +20926,7 @@ index 75884b6fe2..b0d505e36d 100644 Jon DeVree Jon Loeliger Joongi Kim -@@ -663,13 +690,17 @@ Junjie Wan +@@ -663,13 +692,17 @@ Junjie Wan Jun Qiu Jun W Zhou Junxiao Shi @@ -20935,7 +20944,12 @@ index 75884b6fe2..b0d505e36d 100644 Kamil Bednarczyk Kamil Chalupnik Kamil Rytarowski -@@ -708,7 +739,9 @@ Konstantin Ananyev + Kishore Padmanabha + Klaus Degner + Kommula Shiva Shankar +-Konstantin Ananyev ++Konstantin Ananyev Krishna Murthy Krzysztof Galazka Krzysztof Kanas @@ -20945,7 +20959,7 @@ index 75884b6fe2..b0d505e36d 100644 Kuba Kozak Kumar Amber Kumara Parameshwaran -@@ -732,6 +765,7 @@ Leszek Zygo +@@ -732,6 +767,7 @@ Leszek Zygo Levend Sayar Lev Faerman Lewei Yang @@ -20953,7 +20967,7 @@ index 75884b6fe2..b0d505e36d 100644 Leyi Rong Liang Ma Liang-Min Larry Wang -@@ -747,7 +781,7 @@ Liming Sun +@@ -747,7 +783,7 @@ Liming Sun Linfan Hu Lingli Chen Lingyu Liu @@ -20962,7 +20976,7 @@ index 75884b6fe2..b0d505e36d 100644 Linsi Yuan Lior Margalit Li Qiang -@@ -784,6 +818,7 @@ Maciej Paczkowski +@@ -784,9 +820,11 @@ Maciej Paczkowski Maciej Rabeda Maciej Szwed Madhuker Mythri @@ -20970,7 +20984,11 @@ index 75884b6fe2..b0d505e36d 100644 Mahipal Challa Mah Yock Gen Mairtin o Loingsigh -@@ -795,7 +830,6 @@ Mandeep Rohilla ++Malcolm Bumgardner + Mallesham Jatharakonda + Mallesh Koujalagi + Malvika Gupta +@@ -795,7 +833,6 @@ Mandeep Rohilla Manish Chopra Manish Tomar Mao Jiang @@ -20978,7 +20996,7 @@ index 75884b6fe2..b0d505e36d 100644 Marcel Apfelbaum Marcel Cornu Marcelo Ricardo Leitner -@@ -812,6 +846,7 @@ Marcin Wojtas +@@ -812,6 +849,7 @@ Marcin Wojtas Marcin Zapolski Marco Varlese Marc Sune @@ -20986,7 +21004,7 @@ index 75884b6fe2..b0d505e36d 100644 Maria Lingemark Mario Carrillo Mário Kuka -@@ -835,6 +870,7 @@ Martin Weiser +@@ -835,6 +873,7 @@ Martin Weiser Martyna Szapar Maryam Tahhan Masoud Hasanifard @@ -20994,7 +21012,7 @@ index 75884b6fe2..b0d505e36d 100644 Matan Azrad Matej Vido Mateusz Kowalski -@@ -843,6 +879,7 @@ Mateusz Rusinski +@@ -843,6 +882,7 @@ Mateusz Rusinski Matias Elo Mats Liljegren Matteo Croce @@ -21002,7 +21020,7 @@ index 75884b6fe2..b0d505e36d 100644 Matthew Hall Matthew Smith Matthew Vick -@@ -877,6 +914,7 @@ Michael Santana +@@ -877,6 +917,7 @@ Michael Santana Michael Savisko Michael Shamis Michael S. Tsirkin @@ -21010,7 +21028,7 @@ index 75884b6fe2..b0d505e36d 100644 Michael Wildt Michal Berger Michal Jastrzebski -@@ -886,6 +924,7 @@ Michal Litwicki +@@ -886,10 +927,12 @@ Michal Litwicki Michal Mazurek Michal Michalik Michał Mirosław @@ -21018,7 +21036,12 @@ index 75884b6fe2..b0d505e36d 100644 Michal Swiatkowski Michal Wilczynski Michel Machado -@@ -911,6 +950,7 @@ Mitch Williams + Miguel Bernal Marin ++Mihai Brodschi + Mihai Pogonaru + Mike Baucom + Mike Pattrick +@@ -911,6 +954,7 @@ Mitch Williams Mit Matelske Mohamad Noor Alim Hussin Mohammad Abdul Awal @@ -21026,16 +21049,26 @@ index 75884b6fe2..b0d505e36d 100644 Mohammed Gamal Mohsin Kazmi Mohsin Mazhar Shaikh -@@ -947,7 +987,7 @@ Nemanja Marjanovic +@@ -946,8 +990,9 @@ Nelson Escobar + Nemanja Marjanovic Netanel Belgazal Netanel Gonen ++Niall Meade Niall Power -Nick Connolly +Nick Connolly Nick Nunley Niclas Storm Nicolas Chautru -@@ -1024,6 +1064,7 @@ Pawel Rutkowski +@@ -972,6 +1017,7 @@ Noa Ezra + Nobuhiro Miki + Norbert Ciosek + Odi Assli ++Ofer Dagan + Ognjen Joldzic + Ola Liljedahl + Oleg Polyakov +@@ -1024,6 +1070,7 @@ Pawel Rutkowski Pawel Wodkowski Pei Chao Pei Zhang @@ -21043,7 +21076,12 @@ index 75884b6fe2..b0d505e36d 100644 Peng He Peng Huang Peng Sun -@@ -1035,6 +1076,7 @@ Peter Spreadborough +@@ -1031,10 +1078,12 @@ Peng Yu + Peng Zhang + Pengzhen Liu + Peter Mccarthy ++Peter Morrow + Peter Spreadborough Petr Houska Phanendra Vukkisala Phil Yang @@ -21051,7 +21089,7 @@ index 75884b6fe2..b0d505e36d 100644 Pierre Pfister Piotr Azarewicz Piotr Bartosiewicz -@@ -1050,12 +1092,14 @@ Prashant Upadhyaya +@@ -1050,12 +1099,14 @@ Prashant Upadhyaya Prateek Agarwal Praveen Shetty Pravin Pathak @@ -21066,7 +21104,7 @@ index 75884b6fe2..b0d505e36d 100644 Qian Xu Qiao Liu Qi Fu -@@ -1070,6 +1114,7 @@ Quentin Armitage +@@ -1070,6 +1121,7 @@ Quentin Armitage Qun Wan Radha Mohan Chintakuntla Radoslaw Biernacki @@ -21074,7 +21112,7 @@ index 75884b6fe2..b0d505e36d 100644 Radu Bulie Radu Nicolau Rafael Ávila de Espíndola -@@ -1143,6 +1188,7 @@ Roy Franz +@@ -1143,6 +1195,7 @@ Roy Franz Roy Pledge Roy Shterman Ruifeng Wang @@ -21082,7 +21120,7 @@ index 75884b6fe2..b0d505e36d 100644 Ryan E Hall Sabyasachi Sengupta Sachin Saxena -@@ -1159,6 +1205,7 @@ Sangjin Han +@@ -1159,6 +1212,7 @@ Sangjin Han Sankar Chokkalingam Santoshkumar Karanappa Rastapur Santosh Shukla @@ -21090,7 +21128,7 @@ index 75884b6fe2..b0d505e36d 100644 Saori Usami Sarath Somasekharan Sarosh Arif -@@ -1167,6 +1214,7 @@ Satananda Burla +@@ -1167,6 +1221,7 @@ Satananda Burla Satha Rao Satheesh Paul Sathesh Edara @@ -21098,7 +21136,7 @@ index 75884b6fe2..b0d505e36d 100644 Savinay Dharmappa Scott Branden Scott Daniels -@@ -1196,6 +1244,7 @@ Shally Verma +@@ -1196,6 +1251,7 @@ Shally Verma Shannon Nelson Shannon Zhao Shaopeng He @@ -21106,7 +21144,7 @@ index 75884b6fe2..b0d505e36d 100644 Sharmila Podury Sharon Haroni Shay Agroskin -@@ -1210,6 +1259,7 @@ Shiqi Liu <835703180@qq.com> +@@ -1210,6 +1266,7 @@ Shiqi Liu <835703180@qq.com> Shiri Kuzin Shivanshu Shukla Shiweixian @@ -21114,7 +21152,7 @@ index 75884b6fe2..b0d505e36d 100644 Shlomi Gridish Shougang Wang Shraddha Joshi -@@ -1220,6 +1270,7 @@ Shuanglin Wang +@@ -1220,6 +1277,7 @@ Shuanglin Wang Shuki Katzenelson Shun Hao Shu Shen @@ -21122,7 +21160,7 @@ index 75884b6fe2..b0d505e36d 100644 Shweta Choudaha Shyam Kumar Shrivastav Shy Shyman -@@ -1232,6 +1283,7 @@ Simon Kuenzer +@@ -1232,6 +1290,7 @@ Simon Kuenzer Siobhan Butler Sirshak Das Sivaprasad Tummala @@ -21130,7 +21168,7 @@ index 75884b6fe2..b0d505e36d 100644 Siwar Zitouni Slawomir Mrozowicz Slawomir Rosek -@@ -1239,6 +1291,7 @@ Smadar Fuks +@@ -1239,6 +1298,7 @@ Smadar Fuks Solal Pirelli Solganik Alexander Somnath Kotur @@ -21138,18 +21176,19 @@ index 75884b6fe2..b0d505e36d 100644 Song Zhu Sony Chacko Sotiris Salloumis -@@ -1331,8 +1384,10 @@ Tianli Lai +@@ -1331,8 +1391,11 @@ Tianli Lai Tianyu Li Timmons C. Player Timothy McDaniel +Timothy Miskell Timothy Redaelli ++Tim Martin Tim Shearer +Ting-Kai Ku Ting Xu Tiwei Bie Todd Fujinaka -@@ -1346,6 +1401,7 @@ Tomasz Kulasek +@@ -1346,6 +1409,7 @@ Tomasz Kulasek Tomasz Zawadzki Tom Barbette Tom Crugnale @@ -21157,7 +21196,7 @@ index 75884b6fe2..b0d505e36d 100644 Tom Millington Tom Rix Tone Zhang -@@ -1364,6 +1420,7 @@ Vadim Suraev +@@ -1364,6 +1428,7 @@ Vadim Suraev Vakul Garg Vamsi Attunuru Vanshika Shukla @@ -21165,7 +21204,7 @@ index 75884b6fe2..b0d505e36d 100644 Vasily Philipov Veerasenareddy Burru Venkata Suresh Kumar P -@@ -1386,13 +1443,17 @@ Vijay Kumar Srivastava +@@ -1386,13 +1451,17 @@ Vijay Kumar Srivastava Vijay Srivastava Vikas Aggarwal Vikas Gupta @@ -21183,7 +21222,7 @@ index 75884b6fe2..b0d505e36d 100644 Vishal Kulkarni Vishwas Danivas Vitaliy Mysak -@@ -1410,11 +1471,13 @@ Walter Heymans +@@ -1410,11 +1479,13 @@ Walter Heymans Wang Sheng-Hui Wangyu (Eric) Waterman Cao @@ -21198,7 +21237,7 @@ index 75884b6fe2..b0d505e36d 100644 Wei Hu (Xavier) WeiJie Zhuang Weiliang Luo -@@ -1504,6 +1567,7 @@ Yi Lu +@@ -1504,6 +1575,7 @@ Yi Lu Yilun Xu Yinan Wang Ying A Wang @@ -21206,7 +21245,7 @@ index 75884b6fe2..b0d505e36d 100644 Yingya Han Yinjun Zhang Yipeng Wang -@@ -1562,6 +1626,7 @@ Zhipeng Lu +@@ -1562,6 +1634,7 @@ Zhipeng Lu Zhirun Yan Zhiwei He Zhiyong Yang @@ -21242,14 +21281,14 @@ index 22ef2ea4b9..1338ca00ba 100644 F: drivers/net/mana/ F: doc/guides/nics/mana.rst diff --git a/dpdk/VERSION b/dpdk/VERSION -index 7378dd9f9e..4e2024f345 100644 +index 7378dd9f9e..955e8424ce 100644 --- a/dpdk/VERSION +++ b/dpdk/VERSION @@ -1 +1 @@ -22.11.1 -+22.11.6 ++22.11.7 diff --git a/dpdk/app/dumpcap/main.c b/dpdk/app/dumpcap/main.c -index 2eb8414efa..81c9d7d2f1 100644 +index 2eb8414efa..0b6432ad4d 100644 --- a/dpdk/app/dumpcap/main.c +++ b/dpdk/app/dumpcap/main.c @@ -44,7 +44,6 @@ @@ -21260,7 +21299,16 @@ index 2eb8414efa..81c9d7d2f1 100644 #define MONITOR_INTERVAL (500 * 1000) #define MBUF_POOL_CACHE_SIZE 32 #define BURST_SIZE 32 -@@ -202,6 +201,7 @@ static void add_interface(uint16_t port, const char *name) +@@ -84,8 +83,6 @@ struct interface { + TAILQ_ENTRY(interface) next; + uint16_t port; + char name[RTE_ETH_NAME_MAX_LEN]; +- +- struct rte_rxtx_callback *rx_cb[RTE_MAX_QUEUES_PER_PORT]; + }; + + TAILQ_HEAD(interface_list, interface); +@@ -202,6 +199,7 @@ static void add_interface(uint16_t port, const char *name) rte_exit(EXIT_FAILURE, "no memory for interface\n"); memset(intf, 0, sizeof(*intf)); @@ -21268,7 +21316,7 @@ index 2eb8414efa..81c9d7d2f1 100644 rte_strscpy(intf->name, name, sizeof(intf->name)); printf("Capturing on '%s'\n", name); -@@ -546,6 +546,11 @@ static void dpdk_init(void) +@@ -546,6 +544,11 @@ static void dpdk_init(void) eal_argv[i++] = strdup(file_prefix); } @@ -21280,7 +21328,7 @@ index 2eb8414efa..81c9d7d2f1 100644 if (rte_eal_init(eal_argc, eal_argv) < 0) rte_exit(EXIT_FAILURE, "EAL init failed: is primary process running?\n"); } -@@ -554,6 +559,7 @@ static void dpdk_init(void) +@@ -554,6 +557,7 @@ static void dpdk_init(void) static struct rte_ring *create_ring(void) { struct rte_ring *ring; @@ -21288,7 +21336,7 @@ index 2eb8414efa..81c9d7d2f1 100644 size_t size, log2; /* Find next power of 2 >= size. */ -@@ -567,31 +573,31 @@ static struct rte_ring *create_ring(void) +@@ -567,31 +571,31 @@ static struct rte_ring *create_ring(void) ring_size = size; } @@ -21333,7 +21381,40 @@ index 2eb8414efa..81c9d7d2f1 100644 if (mp == NULL) rte_exit(EXIT_FAILURE, "Mempool (%s) creation failed: %s\n", pool_name, -@@ -799,6 +805,11 @@ int main(int argc, char **argv) +@@ -734,7 +738,7 @@ static ssize_t + pcap_write_packets(pcap_dumper_t *dumper, + struct rte_mbuf *pkts[], uint16_t n) + { +- uint8_t temp_data[snaplen]; ++ uint8_t temp_data[RTE_ETHER_MAX_JUMBO_FRAME_LEN]; + struct pcap_pkthdr header; + uint16_t i; + size_t total = 0; +@@ -743,14 +747,19 @@ pcap_write_packets(pcap_dumper_t *dumper, + + for (i = 0; i < n; i++) { + struct rte_mbuf *m = pkts[i]; ++ size_t len, caplen; ++ ++ len = caplen = rte_pktmbuf_pkt_len(m); ++ if (unlikely(!rte_pktmbuf_is_contiguous(m) && len > snaplen)) ++ caplen = snaplen; + +- header.len = rte_pktmbuf_pkt_len(m); +- header.caplen = RTE_MIN(header.len, snaplen); ++ header.len = len; ++ header.caplen = caplen; + + pcap_dump((u_char *)dumper, &header, +- rte_pktmbuf_read(m, 0, header.caplen, temp_data)); ++ rte_pktmbuf_read(m, 0, caplen, temp_data)); + +- total += sizeof(header) + header.len; ++ total += sizeof(header) + caplen; + } + + return total; +@@ -799,6 +808,11 @@ int main(int argc, char **argv) { struct rte_ring *r; struct rte_mempool *mp; @@ -21345,7 +21426,7 @@ index 2eb8414efa..81c9d7d2f1 100644 dumpcap_out_t out; char *p; -@@ -826,6 +837,14 @@ int main(int argc, char **argv) +@@ -826,6 +840,14 @@ int main(int argc, char **argv) if (TAILQ_EMPTY(&interfaces)) set_default_interface(); @@ -21433,7 +21514,7 @@ index c6cf9d9c87..6216d5454c 100644 argp[0] = argv[0]; argp[1] = n_flag; diff --git a/dpdk/app/proc-info/main.c b/dpdk/app/proc-info/main.c -index 53e852a07c..9104f9e6b9 100644 +index 53e852a07c..8fd7ac945c 100644 --- a/dpdk/app/proc-info/main.c +++ b/dpdk/app/proc-info/main.c @@ -19,7 +19,6 @@ @@ -21541,6 +21622,23 @@ index 53e852a07c..9104f9e6b9 100644 } #ifdef RTE_LIB_SECURITY +@@ -1791,7 +1793,7 @@ main(int argc, char **argv) + + if (mem_info) { + meminfo_display(); +- return 0; ++ goto cleanup; + } + + nb_ports = rte_eth_dev_count_avail(); +@@ -1873,6 +1875,7 @@ main(int argc, char **argv) + RTE_ETH_FOREACH_DEV(i) + rte_eth_dev_close(i); + ++cleanup: + ret = rte_eal_cleanup(); + if (ret) + printf("Error from rte_eal_cleanup(), %d\n", ret); diff --git a/dpdk/app/test-bbdev/meson.build b/dpdk/app/test-bbdev/meson.build index cd6a5089d5..926e0a5271 100644 --- a/dpdk/app/test-bbdev/meson.build @@ -23641,7 +23739,7 @@ index 46f6b7d6d2..24d34f983e 100644 (void *)&cmd_load_bpf_start, (void *)&cmd_load_bpf_dir, diff --git a/dpdk/app/test-pmd/cmdline.c b/dpdk/app/test-pmd/cmdline.c -index b32dc8bfd4..820332df50 100644 +index b32dc8bfd4..5336df528b 100644 --- a/dpdk/app/test-pmd/cmdline.c +++ b/dpdk/app/test-pmd/cmdline.c @@ -468,6 +468,12 @@ static void cmd_help_long_parsed(void *parsed_result, @@ -23821,6 +23919,459 @@ index b32dc8bfd4..820332df50 100644 } cmd_config_queue_tx_offloads(&ports[res->port_id]); +@@ -12625,232 +12615,232 @@ static cmdline_parse_inst_t cmd_show_port_flow_transfer_proxy = { + + /* list of instructions */ + static cmdline_parse_ctx_t builtin_ctx[] = { +- (cmdline_parse_inst_t *)&cmd_help_brief, +- (cmdline_parse_inst_t *)&cmd_help_long, +- (cmdline_parse_inst_t *)&cmd_quit, +- (cmdline_parse_inst_t *)&cmd_load_from_file, +- (cmdline_parse_inst_t *)&cmd_showport, +- (cmdline_parse_inst_t *)&cmd_showqueue, +- (cmdline_parse_inst_t *)&cmd_showeeprom, +- (cmdline_parse_inst_t *)&cmd_showportall, +- (cmdline_parse_inst_t *)&cmd_representor_info, +- (cmdline_parse_inst_t *)&cmd_showdevice, +- (cmdline_parse_inst_t *)&cmd_showcfg, +- (cmdline_parse_inst_t *)&cmd_showfwdall, +- (cmdline_parse_inst_t *)&cmd_start, +- (cmdline_parse_inst_t *)&cmd_start_tx_first, +- (cmdline_parse_inst_t *)&cmd_start_tx_first_n, +- (cmdline_parse_inst_t *)&cmd_set_link_up, +- (cmdline_parse_inst_t *)&cmd_set_link_down, +- (cmdline_parse_inst_t *)&cmd_reset, +- (cmdline_parse_inst_t *)&cmd_set_numbers, +- (cmdline_parse_inst_t *)&cmd_set_log, +- (cmdline_parse_inst_t *)&cmd_set_rxoffs, +- (cmdline_parse_inst_t *)&cmd_set_rxpkts, +- (cmdline_parse_inst_t *)&cmd_set_rxhdrs, +- (cmdline_parse_inst_t *)&cmd_set_txpkts, +- (cmdline_parse_inst_t *)&cmd_set_txsplit, +- (cmdline_parse_inst_t *)&cmd_set_txtimes, +- (cmdline_parse_inst_t *)&cmd_set_fwd_list, +- (cmdline_parse_inst_t *)&cmd_set_fwd_mask, +- (cmdline_parse_inst_t *)&cmd_set_fwd_mode, +- (cmdline_parse_inst_t *)&cmd_set_fwd_retry_mode, +- (cmdline_parse_inst_t *)&cmd_set_burst_tx_retry, +- (cmdline_parse_inst_t *)&cmd_set_promisc_mode_one, +- (cmdline_parse_inst_t *)&cmd_set_promisc_mode_all, +- (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_one, +- (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_all, +- (cmdline_parse_inst_t *)&cmd_set_flush_rx, +- (cmdline_parse_inst_t *)&cmd_set_link_check, +- (cmdline_parse_inst_t *)&cmd_vlan_offload, +- (cmdline_parse_inst_t *)&cmd_vlan_tpid, +- (cmdline_parse_inst_t *)&cmd_rx_vlan_filter_all, +- (cmdline_parse_inst_t *)&cmd_rx_vlan_filter, +- (cmdline_parse_inst_t *)&cmd_tx_vlan_set, +- (cmdline_parse_inst_t *)&cmd_tx_vlan_set_qinq, +- (cmdline_parse_inst_t *)&cmd_tx_vlan_reset, +- (cmdline_parse_inst_t *)&cmd_tx_vlan_set_pvid, +- (cmdline_parse_inst_t *)&cmd_csum_set, +- (cmdline_parse_inst_t *)&cmd_csum_show, +- (cmdline_parse_inst_t *)&cmd_csum_tunnel, +- (cmdline_parse_inst_t *)&cmd_csum_mac_swap, +- (cmdline_parse_inst_t *)&cmd_tso_set, +- (cmdline_parse_inst_t *)&cmd_tso_show, +- (cmdline_parse_inst_t *)&cmd_tunnel_tso_set, +- (cmdline_parse_inst_t *)&cmd_tunnel_tso_show, ++ &cmd_help_brief, ++ &cmd_help_long, ++ &cmd_quit, ++ &cmd_load_from_file, ++ &cmd_showport, ++ &cmd_showqueue, ++ &cmd_showeeprom, ++ &cmd_showportall, ++ &cmd_representor_info, ++ &cmd_showdevice, ++ &cmd_showcfg, ++ &cmd_showfwdall, ++ &cmd_start, ++ &cmd_start_tx_first, ++ &cmd_start_tx_first_n, ++ &cmd_set_link_up, ++ &cmd_set_link_down, ++ &cmd_reset, ++ &cmd_set_numbers, ++ &cmd_set_log, ++ &cmd_set_rxoffs, ++ &cmd_set_rxpkts, ++ &cmd_set_rxhdrs, ++ &cmd_set_txpkts, ++ &cmd_set_txsplit, ++ &cmd_set_txtimes, ++ &cmd_set_fwd_list, ++ &cmd_set_fwd_mask, ++ &cmd_set_fwd_mode, ++ &cmd_set_fwd_retry_mode, ++ &cmd_set_burst_tx_retry, ++ &cmd_set_promisc_mode_one, ++ &cmd_set_promisc_mode_all, ++ &cmd_set_allmulti_mode_one, ++ &cmd_set_allmulti_mode_all, ++ &cmd_set_flush_rx, ++ &cmd_set_link_check, ++ &cmd_vlan_offload, ++ &cmd_vlan_tpid, ++ &cmd_rx_vlan_filter_all, ++ &cmd_rx_vlan_filter, ++ &cmd_tx_vlan_set, ++ &cmd_tx_vlan_set_qinq, ++ &cmd_tx_vlan_reset, ++ &cmd_tx_vlan_set_pvid, ++ &cmd_csum_set, ++ &cmd_csum_show, ++ &cmd_csum_tunnel, ++ &cmd_csum_mac_swap, ++ &cmd_tso_set, ++ &cmd_tso_show, ++ &cmd_tunnel_tso_set, ++ &cmd_tunnel_tso_show, + #ifdef RTE_LIB_GRO +- (cmdline_parse_inst_t *)&cmd_gro_enable, +- (cmdline_parse_inst_t *)&cmd_gro_flush, +- (cmdline_parse_inst_t *)&cmd_gro_show, ++ &cmd_gro_enable, ++ &cmd_gro_flush, ++ &cmd_gro_show, + #endif + #ifdef RTE_LIB_GSO +- (cmdline_parse_inst_t *)&cmd_gso_enable, +- (cmdline_parse_inst_t *)&cmd_gso_size, +- (cmdline_parse_inst_t *)&cmd_gso_show, ++ &cmd_gso_enable, ++ &cmd_gso_size, ++ &cmd_gso_show, + #endif +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_hw, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_lw, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_pt, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_xon, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_macfwd, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_set_autoneg, +- (cmdline_parse_inst_t *)&cmd_link_flow_control_show, +- (cmdline_parse_inst_t *)&cmd_priority_flow_control_set, +- (cmdline_parse_inst_t *)&cmd_queue_priority_flow_control_set, +- (cmdline_parse_inst_t *)&cmd_config_dcb, +- (cmdline_parse_inst_t *)&cmd_read_rxd_txd, +- (cmdline_parse_inst_t *)&cmd_stop, +- (cmdline_parse_inst_t *)&cmd_mac_addr, +- (cmdline_parse_inst_t *)&cmd_set_fwd_eth_peer, +- (cmdline_parse_inst_t *)&cmd_set_qmap, +- (cmdline_parse_inst_t *)&cmd_set_xstats_hide_zero, +- (cmdline_parse_inst_t *)&cmd_set_record_core_cycles, +- (cmdline_parse_inst_t *)&cmd_set_record_burst_stats, +- (cmdline_parse_inst_t *)&cmd_operate_port, +- (cmdline_parse_inst_t *)&cmd_operate_specific_port, +- (cmdline_parse_inst_t *)&cmd_operate_attach_port, +- (cmdline_parse_inst_t *)&cmd_operate_detach_port, +- (cmdline_parse_inst_t *)&cmd_operate_detach_device, +- (cmdline_parse_inst_t *)&cmd_set_port_setup_on, +- (cmdline_parse_inst_t *)&cmd_config_speed_all, +- (cmdline_parse_inst_t *)&cmd_config_speed_specific, +- (cmdline_parse_inst_t *)&cmd_config_loopback_all, +- (cmdline_parse_inst_t *)&cmd_config_loopback_specific, +- (cmdline_parse_inst_t *)&cmd_config_rx_tx, +- (cmdline_parse_inst_t *)&cmd_config_mtu, +- (cmdline_parse_inst_t *)&cmd_config_max_pkt_len, +- (cmdline_parse_inst_t *)&cmd_config_max_lro_pkt_size, +- (cmdline_parse_inst_t *)&cmd_config_rx_mode_flag, +- (cmdline_parse_inst_t *)&cmd_config_rss, +- (cmdline_parse_inst_t *)&cmd_config_rxtx_ring_size, +- (cmdline_parse_inst_t *)&cmd_config_rxtx_queue, +- (cmdline_parse_inst_t *)&cmd_config_deferred_start_rxtx_queue, +- (cmdline_parse_inst_t *)&cmd_setup_rxtx_queue, +- (cmdline_parse_inst_t *)&cmd_config_rss_reta, +- (cmdline_parse_inst_t *)&cmd_showport_reta, +- (cmdline_parse_inst_t *)&cmd_showport_macs, +- (cmdline_parse_inst_t *)&cmd_show_port_flow_transfer_proxy, +- (cmdline_parse_inst_t *)&cmd_config_burst, +- (cmdline_parse_inst_t *)&cmd_config_thresh, +- (cmdline_parse_inst_t *)&cmd_config_threshold, +- (cmdline_parse_inst_t *)&cmd_set_uc_hash_filter, +- (cmdline_parse_inst_t *)&cmd_set_uc_all_hash_filter, +- (cmdline_parse_inst_t *)&cmd_vf_mac_addr_filter, +- (cmdline_parse_inst_t *)&cmd_queue_rate_limit, +- (cmdline_parse_inst_t *)&cmd_tunnel_udp_config, +- (cmdline_parse_inst_t *)&cmd_showport_rss_hash, +- (cmdline_parse_inst_t *)&cmd_showport_rss_hash_key, +- (cmdline_parse_inst_t *)&cmd_config_rss_hash_key, +- (cmdline_parse_inst_t *)&cmd_cleanup_txq_mbufs, +- (cmdline_parse_inst_t *)&cmd_dump, +- (cmdline_parse_inst_t *)&cmd_dump_one, +- (cmdline_parse_inst_t *)&cmd_flow, +- (cmdline_parse_inst_t *)&cmd_show_port_meter_cap, +- (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_srtcm, +- (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm, +- (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm_rfc4115, +- (cmdline_parse_inst_t *)&cmd_del_port_meter_profile, +- (cmdline_parse_inst_t *)&cmd_create_port_meter, +- (cmdline_parse_inst_t *)&cmd_enable_port_meter, +- (cmdline_parse_inst_t *)&cmd_disable_port_meter, +- (cmdline_parse_inst_t *)&cmd_del_port_meter, +- (cmdline_parse_inst_t *)&cmd_del_port_meter_policy, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_profile, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_dscp_table, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_vlan_table, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_in_proto, +- (cmdline_parse_inst_t *)&cmd_get_port_meter_in_proto, +- (cmdline_parse_inst_t *)&cmd_get_port_meter_in_proto_prio, +- (cmdline_parse_inst_t *)&cmd_set_port_meter_stats_mask, +- (cmdline_parse_inst_t *)&cmd_show_port_meter_stats, +- (cmdline_parse_inst_t *)&cmd_mcast_addr, +- (cmdline_parse_inst_t *)&cmd_set_vf_vlan_anti_spoof, +- (cmdline_parse_inst_t *)&cmd_set_vf_mac_anti_spoof, +- (cmdline_parse_inst_t *)&cmd_set_vf_vlan_stripq, +- (cmdline_parse_inst_t *)&cmd_set_vf_vlan_insert, +- (cmdline_parse_inst_t *)&cmd_set_tx_loopback, +- (cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en, +- (cmdline_parse_inst_t *)&cmd_set_vf_traffic, +- (cmdline_parse_inst_t *)&cmd_set_vf_rxmode, +- (cmdline_parse_inst_t *)&cmd_vf_rate_limit, +- (cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter, +- (cmdline_parse_inst_t *)&cmd_set_vf_mac_addr, +- (cmdline_parse_inst_t *)&cmd_set_vxlan, +- (cmdline_parse_inst_t *)&cmd_set_vxlan_tos_ttl, +- (cmdline_parse_inst_t *)&cmd_set_vxlan_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_nvgre, +- (cmdline_parse_inst_t *)&cmd_set_nvgre_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_l2_encap, +- (cmdline_parse_inst_t *)&cmd_set_l2_encap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_l2_decap, +- (cmdline_parse_inst_t *)&cmd_set_l2_decap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_mplsogre_encap, +- (cmdline_parse_inst_t *)&cmd_set_mplsogre_encap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_mplsogre_decap, +- (cmdline_parse_inst_t *)&cmd_set_mplsogre_decap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_mplsoudp_encap, +- (cmdline_parse_inst_t *)&cmd_set_mplsoudp_encap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_mplsoudp_decap, +- (cmdline_parse_inst_t *)&cmd_set_mplsoudp_decap_with_vlan, +- (cmdline_parse_inst_t *)&cmd_set_conntrack_common, +- (cmdline_parse_inst_t *)&cmd_set_conntrack_dir, +- (cmdline_parse_inst_t *)&cmd_show_vf_stats, +- (cmdline_parse_inst_t *)&cmd_clear_vf_stats, +- (cmdline_parse_inst_t *)&cmd_show_port_supported_ptypes, +- (cmdline_parse_inst_t *)&cmd_set_port_ptypes, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_cap, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_level_cap, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_node_cap, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_node_type, +- (cmdline_parse_inst_t *)&cmd_show_port_tm_node_stats, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_node_shaper_profile, +- (cmdline_parse_inst_t *)&cmd_del_port_tm_node_shaper_profile, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_node_shared_shaper, +- (cmdline_parse_inst_t *)&cmd_del_port_tm_node_shared_shaper, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_node_wred_profile, +- (cmdline_parse_inst_t *)&cmd_del_port_tm_node_wred_profile, +- (cmdline_parse_inst_t *)&cmd_set_port_tm_node_shaper_profile, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_nonleaf_node, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_nonleaf_node_pmode, +- (cmdline_parse_inst_t *)&cmd_add_port_tm_leaf_node, +- (cmdline_parse_inst_t *)&cmd_del_port_tm_node, +- (cmdline_parse_inst_t *)&cmd_set_port_tm_node_parent, +- (cmdline_parse_inst_t *)&cmd_suspend_port_tm_node, +- (cmdline_parse_inst_t *)&cmd_resume_port_tm_node, +- (cmdline_parse_inst_t *)&cmd_port_tm_hierarchy_commit, +- (cmdline_parse_inst_t *)&cmd_port_tm_mark_ip_ecn, +- (cmdline_parse_inst_t *)&cmd_port_tm_mark_ip_dscp, +- (cmdline_parse_inst_t *)&cmd_port_tm_mark_vlan_dei, +- (cmdline_parse_inst_t *)&cmd_cfg_tunnel_udp_port, +- (cmdline_parse_inst_t *)&cmd_rx_offload_get_capa, +- (cmdline_parse_inst_t *)&cmd_rx_offload_get_configuration, +- (cmdline_parse_inst_t *)&cmd_config_per_port_rx_offload, +- (cmdline_parse_inst_t *)&cmd_config_per_queue_rx_offload, +- (cmdline_parse_inst_t *)&cmd_tx_offload_get_capa, +- (cmdline_parse_inst_t *)&cmd_tx_offload_get_configuration, +- (cmdline_parse_inst_t *)&cmd_config_per_port_tx_offload, +- (cmdline_parse_inst_t *)&cmd_config_per_queue_tx_offload, ++ &cmd_link_flow_control_set, ++ &cmd_link_flow_control_set_rx, ++ &cmd_link_flow_control_set_tx, ++ &cmd_link_flow_control_set_hw, ++ &cmd_link_flow_control_set_lw, ++ &cmd_link_flow_control_set_pt, ++ &cmd_link_flow_control_set_xon, ++ &cmd_link_flow_control_set_macfwd, ++ &cmd_link_flow_control_set_autoneg, ++ &cmd_link_flow_control_show, ++ &cmd_priority_flow_control_set, ++ &cmd_queue_priority_flow_control_set, ++ &cmd_config_dcb, ++ &cmd_read_rxd_txd, ++ &cmd_stop, ++ &cmd_mac_addr, ++ &cmd_set_fwd_eth_peer, ++ &cmd_set_qmap, ++ &cmd_set_xstats_hide_zero, ++ &cmd_set_record_core_cycles, ++ &cmd_set_record_burst_stats, ++ &cmd_operate_port, ++ &cmd_operate_specific_port, ++ &cmd_operate_attach_port, ++ &cmd_operate_detach_port, ++ &cmd_operate_detach_device, ++ &cmd_set_port_setup_on, ++ &cmd_config_speed_all, ++ &cmd_config_speed_specific, ++ &cmd_config_loopback_all, ++ &cmd_config_loopback_specific, ++ &cmd_config_rx_tx, ++ &cmd_config_mtu, ++ &cmd_config_max_pkt_len, ++ &cmd_config_max_lro_pkt_size, ++ &cmd_config_rx_mode_flag, ++ &cmd_config_rss, ++ &cmd_config_rxtx_ring_size, ++ &cmd_config_rxtx_queue, ++ &cmd_config_deferred_start_rxtx_queue, ++ &cmd_setup_rxtx_queue, ++ &cmd_config_rss_reta, ++ &cmd_showport_reta, ++ &cmd_showport_macs, ++ &cmd_show_port_flow_transfer_proxy, ++ &cmd_config_burst, ++ &cmd_config_thresh, ++ &cmd_config_threshold, ++ &cmd_set_uc_hash_filter, ++ &cmd_set_uc_all_hash_filter, ++ &cmd_vf_mac_addr_filter, ++ &cmd_queue_rate_limit, ++ &cmd_tunnel_udp_config, ++ &cmd_showport_rss_hash, ++ &cmd_showport_rss_hash_key, ++ &cmd_config_rss_hash_key, ++ &cmd_cleanup_txq_mbufs, ++ &cmd_dump, ++ &cmd_dump_one, ++ &cmd_flow, ++ &cmd_show_port_meter_cap, ++ &cmd_add_port_meter_profile_srtcm, ++ &cmd_add_port_meter_profile_trtcm, ++ &cmd_add_port_meter_profile_trtcm_rfc4115, ++ &cmd_del_port_meter_profile, ++ &cmd_create_port_meter, ++ &cmd_enable_port_meter, ++ &cmd_disable_port_meter, ++ &cmd_del_port_meter, ++ &cmd_del_port_meter_policy, ++ &cmd_set_port_meter_profile, ++ &cmd_set_port_meter_dscp_table, ++ &cmd_set_port_meter_vlan_table, ++ &cmd_set_port_meter_in_proto, ++ &cmd_get_port_meter_in_proto, ++ &cmd_get_port_meter_in_proto_prio, ++ &cmd_set_port_meter_stats_mask, ++ &cmd_show_port_meter_stats, ++ &cmd_mcast_addr, ++ &cmd_set_vf_vlan_anti_spoof, ++ &cmd_set_vf_mac_anti_spoof, ++ &cmd_set_vf_vlan_stripq, ++ &cmd_set_vf_vlan_insert, ++ &cmd_set_tx_loopback, ++ &cmd_set_all_queues_drop_en, ++ &cmd_set_vf_traffic, ++ &cmd_set_vf_rxmode, ++ &cmd_vf_rate_limit, ++ &cmd_vf_rxvlan_filter, ++ &cmd_set_vf_mac_addr, ++ &cmd_set_vxlan, ++ &cmd_set_vxlan_tos_ttl, ++ &cmd_set_vxlan_with_vlan, ++ &cmd_set_nvgre, ++ &cmd_set_nvgre_with_vlan, ++ &cmd_set_l2_encap, ++ &cmd_set_l2_encap_with_vlan, ++ &cmd_set_l2_decap, ++ &cmd_set_l2_decap_with_vlan, ++ &cmd_set_mplsogre_encap, ++ &cmd_set_mplsogre_encap_with_vlan, ++ &cmd_set_mplsogre_decap, ++ &cmd_set_mplsogre_decap_with_vlan, ++ &cmd_set_mplsoudp_encap, ++ &cmd_set_mplsoudp_encap_with_vlan, ++ &cmd_set_mplsoudp_decap, ++ &cmd_set_mplsoudp_decap_with_vlan, ++ &cmd_set_conntrack_common, ++ &cmd_set_conntrack_dir, ++ &cmd_show_vf_stats, ++ &cmd_clear_vf_stats, ++ &cmd_show_port_supported_ptypes, ++ &cmd_set_port_ptypes, ++ &cmd_show_port_tm_cap, ++ &cmd_show_port_tm_level_cap, ++ &cmd_show_port_tm_node_cap, ++ &cmd_show_port_tm_node_type, ++ &cmd_show_port_tm_node_stats, ++ &cmd_add_port_tm_node_shaper_profile, ++ &cmd_del_port_tm_node_shaper_profile, ++ &cmd_add_port_tm_node_shared_shaper, ++ &cmd_del_port_tm_node_shared_shaper, ++ &cmd_add_port_tm_node_wred_profile, ++ &cmd_del_port_tm_node_wred_profile, ++ &cmd_set_port_tm_node_shaper_profile, ++ &cmd_add_port_tm_nonleaf_node, ++ &cmd_add_port_tm_nonleaf_node_pmode, ++ &cmd_add_port_tm_leaf_node, ++ &cmd_del_port_tm_node, ++ &cmd_set_port_tm_node_parent, ++ &cmd_suspend_port_tm_node, ++ &cmd_resume_port_tm_node, ++ &cmd_port_tm_hierarchy_commit, ++ &cmd_port_tm_mark_ip_ecn, ++ &cmd_port_tm_mark_ip_dscp, ++ &cmd_port_tm_mark_vlan_dei, ++ &cmd_cfg_tunnel_udp_port, ++ &cmd_rx_offload_get_capa, ++ &cmd_rx_offload_get_configuration, ++ &cmd_config_per_port_rx_offload, ++ &cmd_config_per_queue_rx_offload, ++ &cmd_tx_offload_get_capa, ++ &cmd_tx_offload_get_configuration, ++ &cmd_config_per_port_tx_offload, ++ &cmd_config_per_queue_tx_offload, + #ifdef RTE_LIB_BPF +- (cmdline_parse_inst_t *)&cmd_operate_bpf_ld_parse, +- (cmdline_parse_inst_t *)&cmd_operate_bpf_unld_parse, ++ &cmd_operate_bpf_ld_parse, ++ &cmd_operate_bpf_unld_parse, + #endif +- (cmdline_parse_inst_t *)&cmd_config_tx_metadata_specific, +- (cmdline_parse_inst_t *)&cmd_show_tx_metadata, +- (cmdline_parse_inst_t *)&cmd_show_rx_tx_desc_status, +- (cmdline_parse_inst_t *)&cmd_show_rx_queue_desc_used_count, +- (cmdline_parse_inst_t *)&cmd_set_raw, +- (cmdline_parse_inst_t *)&cmd_show_set_raw, +- (cmdline_parse_inst_t *)&cmd_show_set_raw_all, +- (cmdline_parse_inst_t *)&cmd_config_tx_dynf_specific, +- (cmdline_parse_inst_t *)&cmd_show_fec_mode, +- (cmdline_parse_inst_t *)&cmd_set_fec_mode, +- (cmdline_parse_inst_t *)&cmd_set_rxq_avail_thresh, +- (cmdline_parse_inst_t *)&cmd_show_capability, +- (cmdline_parse_inst_t *)&cmd_set_flex_is_pattern, +- (cmdline_parse_inst_t *)&cmd_set_flex_spec_pattern, ++ &cmd_config_tx_metadata_specific, ++ &cmd_show_tx_metadata, ++ &cmd_show_rx_tx_desc_status, ++ &cmd_show_rx_queue_desc_used_count, ++ &cmd_set_raw, ++ &cmd_show_set_raw, ++ &cmd_show_set_raw_all, ++ &cmd_config_tx_dynf_specific, ++ &cmd_show_fec_mode, ++ &cmd_set_fec_mode, ++ &cmd_set_rxq_avail_thresh, ++ &cmd_show_capability, ++ &cmd_set_flex_is_pattern, ++ &cmd_set_flex_spec_pattern, + NULL, + }; + @@ -12917,32 +12907,25 @@ cmdline_read_from_file(const char *filename) printf("Read CLI commands from %s\n", filename); } @@ -23866,10 +24417,26 @@ index b32dc8bfd4..820332df50 100644 void diff --git a/dpdk/app/test-pmd/cmdline_flow.c b/dpdk/app/test-pmd/cmdline_flow.c -index 88108498e0..698b4d9601 100644 +index 88108498e0..a0a7d3da14 100644 --- a/dpdk/app/test-pmd/cmdline_flow.c +++ b/dpdk/app/test-pmd/cmdline_flow.c -@@ -2940,6 +2940,7 @@ static const struct token token_list[] = { +@@ -100,7 +100,6 @@ enum index { + PULL, + + /* Flex arguments */ +- FLEX_ITEM_INIT, + FLEX_ITEM_CREATE, + FLEX_ITEM_DESTROY, + +@@ -1084,7 +1083,6 @@ struct parse_action_priv { + }) + + static const enum index next_flex_item[] = { +- FLEX_ITEM_INIT, + FLEX_ITEM_CREATE, + FLEX_ITEM_DESTROY, + ZERO, +@@ -2940,6 +2938,7 @@ static const struct token token_list[] = { NEXT_ENTRY(COMMON_UNSIGNED)), .args = ARGS(ARGS_ENTRY(struct buffer, args.table.attr.nb_flows)), @@ -23877,7 +24444,7 @@ index 88108498e0..698b4d9601 100644 }, [TABLE_PATTERN_TEMPLATE] = { .name = "pattern_template", -@@ -2979,7 +2980,7 @@ static const struct token token_list[] = { +@@ -2979,7 +2978,7 @@ static const struct token token_list[] = { [QUEUE_DESTROY] = { .name = "destroy", .help = "destroy a flow rule", @@ -23886,7 +24453,23 @@ index 88108498e0..698b4d9601 100644 NEXT_ENTRY(COMMON_QUEUE_ID)), .args = ARGS(ARGS_ENTRY(struct buffer, queue)), .call = parse_qo_destroy, -@@ -4771,9 +4772,12 @@ static const struct token token_list[] = { +@@ -3296,15 +3295,6 @@ static const struct token token_list[] = { + .next = NEXT(next_flex_item), + .call = parse_flex, + }, +- [FLEX_ITEM_INIT] = { +- .name = "init", +- .help = "flex item init", +- .args = ARGS(ARGS_ENTRY(struct buffer, args.flex.token), +- ARGS_ENTRY(struct buffer, port)), +- .next = NEXT(NEXT_ENTRY(COMMON_FLEX_TOKEN), +- NEXT_ENTRY(COMMON_PORT_ID)), +- .call = parse_flex +- }, + [FLEX_ITEM_CREATE] = { + .name = "create", + .help = "flex item create", +@@ -4771,9 +4761,12 @@ static const struct token token_list[] = { [ITEM_CONNTRACK] = { .name = "conntrack", .help = "conntrack state", @@ -23899,7 +24482,7 @@ index 88108498e0..698b4d9601 100644 }, [ITEM_PORT_REPRESENTOR] = { .name = "port_representor", -@@ -7737,15 +7741,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token, +@@ -7737,15 +7730,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token, l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -23919,7 +24502,7 @@ index 88108498e0..698b4d9601 100644 } action_encap_data->conf.size = header - action_encap_data->data; -@@ -7793,11 +7797,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token, +@@ -7793,11 +7786,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token, header = action_decap_data->data; if (l2_decap_conf.select_vlan) eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); @@ -23935,7 +24518,7 @@ index 88108498e0..698b4d9601 100644 } action_decap_data->conf.size = header - action_decap_data->data; -@@ -7877,15 +7881,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token, +@@ -7877,15 +7870,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token, mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -23955,7 +24538,7 @@ index 88108498e0..698b4d9601 100644 } if (mplsogre_encap_conf.select_ipv4) { memcpy(header, &ipv4, sizeof(ipv4)); -@@ -7972,15 +7976,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, +@@ -7972,15 +7965,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -23975,7 +24558,7 @@ index 88108498e0..698b4d9601 100644 } if (mplsogre_encap_conf.select_ipv4) { memcpy(header, &ipv4, sizeof(ipv4)); -@@ -8071,15 +8075,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token, +@@ -8071,15 +8064,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token, mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -23995,7 +24578,7 @@ index 88108498e0..698b4d9601 100644 } if (mplsoudp_encap_conf.select_ipv4) { memcpy(header, &ipv4, sizeof(ipv4)); -@@ -8168,15 +8172,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token, +@@ -8168,15 +8161,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token, mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); memcpy(eth.src.addr_bytes, mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); @@ -24015,7 +24598,7 @@ index 88108498e0..698b4d9601 100644 } if (mplsoudp_encap_conf.select_ipv4) { memcpy(header, &ipv4, sizeof(ipv4)); -@@ -8993,6 +8997,11 @@ parse_table(struct context *ctx, const struct token *token, +@@ -8993,6 +8986,11 @@ parse_table(struct context *ctx, const struct token *token, case TABLE_TRANSFER: out->args.table.attr.flow_attr.transfer = 1; return len; @@ -24027,8 +24610,16 @@ index 88108498e0..698b4d9601 100644 default: return -1; } +@@ -9224,7 +9222,6 @@ parse_flex(struct context *ctx, const struct token *token, + switch (ctx->curr) { + default: + break; +- case FLEX_ITEM_INIT: + case FLEX_ITEM_CREATE: + case FLEX_ITEM_DESTROY: + out->command = ctx->curr; diff --git a/dpdk/app/test-pmd/config.c b/dpdk/app/test-pmd/config.c -index acccb6b035..4ff0e72115 100644 +index acccb6b035..189bf68223 100644 --- a/dpdk/app/test-pmd/config.c +++ b/dpdk/app/test-pmd/config.c @@ -1875,6 +1875,7 @@ port_action_handle_update(portid_t port_id, uint32_t id, @@ -24057,6 +24648,15 @@ index acccb6b035..4ff0e72115 100644 default: update = action; break; +@@ -2125,7 +2137,7 @@ port_meter_policy_add(portid_t port_id, uint32_t policy_id, + for (act_n = 0, start = act; + act->type != RTE_FLOW_ACTION_TYPE_END; act++) + act_n++; +- if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END) ++ if (act_n > 0) + policy.actions[i] = start; + else + policy.actions[i] = NULL; @@ -2688,8 +2700,7 @@ port_queue_flow_create(portid_t port_id, queueid_t queue_id, flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, pattern, pattern_idx, actions, actions_idx, job, &error); @@ -24080,7 +24680,31 @@ index acccb6b035..4ff0e72115 100644 mtr_update.color_mode_valid = 1; mtr_update.init_color_valid = 1; mtr_update.state_valid = 1; -@@ -4231,9 +4244,9 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, +@@ -3548,8 +3561,10 @@ port_flow_aged(portid_t port_id, uint8_t destroy) + } + type = (enum age_action_context_type *)contexts[idx]; + switch (*type) { +- case ACTION_AGE_CONTEXT_TYPE_FLOW: ++ case ACTION_AGE_CONTEXT_TYPE_FLOW: { ++ uint32_t flow_id; + ctx.pf = container_of(type, struct port_flow, age_type); ++ flow_id = ctx.pf->id; + printf("%-20s\t%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 + "\t%c%c%c\t\n", + "Flow", +@@ -3560,9 +3575,10 @@ port_flow_aged(portid_t port_id, uint8_t destroy) + ctx.pf->rule.attr->egress ? 'e' : '-', + ctx.pf->rule.attr->transfer ? 't' : '-'); + if (destroy && !port_flow_destroy(port_id, 1, +- &ctx.pf->id)) ++ &flow_id)) + total++; + break; ++ } + case ACTION_AGE_CONTEXT_TYPE_INDIRECT_ACTION: + ctx.pia = container_of(type, + struct port_indirect_action, age_type); +@@ -4231,9 +4247,9 @@ fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc, continue; printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n", share_group, share_rxq); @@ -24092,7 +24716,7 @@ index acccb6b035..4ff0e72115 100644 lc_id, fs->rx_port, fs->rx_queue); printf("Please use --nb-cores=%hu to limit number of forwarding cores\n", nb_rxq); -@@ -4414,7 +4427,6 @@ rss_fwd_config_setup(void) +@@ -4414,7 +4430,6 @@ rss_fwd_config_setup(void) queueid_t nb_q; streamid_t sm_id; int start; @@ -24100,7 +24724,7 @@ index acccb6b035..4ff0e72115 100644 nb_q = nb_rxq; if (nb_q > nb_txq) -@@ -4422,7 +4434,7 @@ rss_fwd_config_setup(void) +@@ -4422,7 +4437,7 @@ rss_fwd_config_setup(void) cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; cur_fwd_config.nb_fwd_ports = nb_fwd_ports; cur_fwd_config.nb_fwd_streams = @@ -24109,7 +24733,7 @@ index acccb6b035..4ff0e72115 100644 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) cur_fwd_config.nb_fwd_lcores = -@@ -4444,7 +4456,6 @@ rss_fwd_config_setup(void) +@@ -4444,7 +4459,6 @@ rss_fwd_config_setup(void) * the 2~3 queue for secondary process. */ start = proc_id * nb_q / num_procs; @@ -24117,7 +24741,7 @@ index acccb6b035..4ff0e72115 100644 rxp = 0; rxq = start; for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { -@@ -4463,8 +4474,6 @@ rss_fwd_config_setup(void) +@@ -4463,8 +4477,6 @@ rss_fwd_config_setup(void) continue; rxp = 0; rxq++; @@ -24126,7 +24750,7 @@ index acccb6b035..4ff0e72115 100644 } } -@@ -4609,7 +4618,7 @@ icmp_echo_config_setup(void) +@@ -4609,7 +4621,7 @@ icmp_echo_config_setup(void) lcoreid_t lc_id; uint16_t sm_id; @@ -25131,8 +25755,61 @@ index 2f596affee..a5e3d8699c 100644 return 0; } +diff --git a/dpdk/app/test/test_common.c b/dpdk/app/test/test_common.c +index f89e1eb7ee..372bb8f6ba 100644 +--- a/dpdk/app/test/test_common.c ++++ b/dpdk/app/test/test_common.c +@@ -8,11 +8,12 @@ + #include + #include + #include ++#include + #include + + #include "test.h" + +-#define MAX_NUM 1 << 20 ++#define MAX_NUM (1 << 20) + + #define FAIL(x)\ + {printf(x "() test failed!\n");\ +@@ -217,19 +218,21 @@ test_align(void) + } + } + +- for (p = 1; p <= MAX_NUM / 2; p++) { +- for (i = 1; i <= MAX_NUM / 2; i++) { +- val = RTE_ALIGN_MUL_CEIL(i, p); +- if (val % p != 0 || val < i) +- FAIL_ALIGN("RTE_ALIGN_MUL_CEIL", i, p); +- val = RTE_ALIGN_MUL_FLOOR(i, p); +- if (val % p != 0 || val > i) +- FAIL_ALIGN("RTE_ALIGN_MUL_FLOOR", i, p); +- val = RTE_ALIGN_MUL_NEAR(i, p); +- if (val % p != 0 || ((val != RTE_ALIGN_MUL_CEIL(i, p)) +- & (val != RTE_ALIGN_MUL_FLOOR(i, p)))) +- FAIL_ALIGN("RTE_ALIGN_MUL_NEAR", i, p); +- } ++ /* testing the whole space of 2^20^2 takes too long. */ ++ for (j = 1; j <= MAX_NUM ; j++) { ++ i = rte_rand_max(MAX_NUM - 1) + 1; ++ p = rte_rand_max(MAX_NUM - 1) + 1; ++ ++ val = RTE_ALIGN_MUL_CEIL(i, p); ++ if (val % p != 0 || val < i) ++ FAIL_ALIGN("RTE_ALIGN_MUL_CEIL", i, p); ++ val = RTE_ALIGN_MUL_FLOOR(i, p); ++ if (val % p != 0 || val > i) ++ FAIL_ALIGN("RTE_ALIGN_MUL_FLOOR", i, p); ++ val = RTE_ALIGN_MUL_NEAR(i, p); ++ if (val % p != 0 || ((val != RTE_ALIGN_MUL_CEIL(i, p)) ++ & (val != RTE_ALIGN_MUL_FLOOR(i, p)))) ++ FAIL_ALIGN("RTE_ALIGN_MUL_NEAR", i, p); + } + + return 0; diff --git a/dpdk/app/test/test_cryptodev.c b/dpdk/app/test/test_cryptodev.c -index d6ae762df9..2069ccad3b 100644 +index d6ae762df9..b093fd4ae9 100644 --- a/dpdk/app/test/test_cryptodev.c +++ b/dpdk/app/test/test_cryptodev.c @@ -136,6 +136,17 @@ security_proto_supported(enum rte_security_session_action_type action, @@ -25760,12 +26437,16 @@ index d6ae762df9..2069ccad3b 100644 struct rte_cryptodev_info dev_info; struct rte_cryptodev_qp_conf qp_conf = { .nb_descriptors = MAX_NUM_OPS_INFLIGHT -@@ -12630,6 +12690,19 @@ test_enq_callback_setup(void) +@@ -12630,6 +12690,23 @@ test_enq_callback_setup(void) struct rte_cryptodev_cb *cb; uint16_t qp_id = 0; + int j = 0; + ++ /* Skip test if synchronous API is used */ ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + /* Verify the crypto capabilities for which enqueue/dequeue is done. */ + cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; + cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL; @@ -25780,7 +26461,7 @@ index d6ae762df9..2069ccad3b 100644 /* Stop the device in case it's started so it can be configured */ rte_cryptodev_stop(ts_params->valid_devs[0]); -@@ -12653,9 +12726,16 @@ test_enq_callback_setup(void) +@@ -12653,9 +12730,16 @@ test_enq_callback_setup(void) qp_conf.nb_descriptors, qp_id, ts_params->valid_devs[0]); @@ -25797,7 +26478,7 @@ index d6ae762df9..2069ccad3b 100644 TEST_ASSERT_NULL(cb, "Add callback on qp %u on " "cryptodev %u did not fail", qp_id, RTE_CRYPTO_MAX_DEVS); -@@ -12685,12 +12765,11 @@ test_enq_callback_setup(void) +@@ -12685,12 +12769,11 @@ test_enq_callback_setup(void) rte_cryptodev_start(ts_params->valid_devs[0]); @@ -25814,7 +26495,7 @@ index d6ae762df9..2069ccad3b 100644 /* Test with invalid crypto device */ TEST_ASSERT_FAIL(rte_cryptodev_remove_enq_callback( -@@ -12715,6 +12794,8 @@ test_enq_callback_setup(void) +@@ -12715,6 +12798,8 @@ test_enq_callback_setup(void) "qp %u on cryptodev %u", qp_id, ts_params->valid_devs[0]); @@ -25823,7 +26504,7 @@ index d6ae762df9..2069ccad3b 100644 return TEST_SUCCESS; } -@@ -12722,6 +12803,7 @@ static int +@@ -12722,6 +12807,7 @@ static int test_deq_callback_setup(void) { struct crypto_testsuite_params *ts_params = &testsuite_params; @@ -25831,12 +26512,16 @@ index d6ae762df9..2069ccad3b 100644 struct rte_cryptodev_info dev_info; struct rte_cryptodev_qp_conf qp_conf = { .nb_descriptors = MAX_NUM_OPS_INFLIGHT -@@ -12729,6 +12811,19 @@ test_deq_callback_setup(void) +@@ -12729,6 +12815,23 @@ test_deq_callback_setup(void) struct rte_cryptodev_cb *cb; uint16_t qp_id = 0; + int j = 0; + ++ /* Skip test if synchronous API is used */ ++ if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) ++ return TEST_SKIPPED; ++ + /* Verify the crypto capabilities for which enqueue/dequeue is done. */ + cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; + cap_idx.algo.auth = RTE_CRYPTO_AUTH_NULL; @@ -25851,7 +26536,7 @@ index d6ae762df9..2069ccad3b 100644 /* Stop the device in case it's started so it can be configured */ rte_cryptodev_stop(ts_params->valid_devs[0]); -@@ -12752,9 +12847,16 @@ test_deq_callback_setup(void) +@@ -12752,9 +12855,16 @@ test_deq_callback_setup(void) qp_conf.nb_descriptors, qp_id, ts_params->valid_devs[0]); @@ -25868,7 +26553,7 @@ index d6ae762df9..2069ccad3b 100644 TEST_ASSERT_NULL(cb, "Add callback on qp %u on " "cryptodev %u did not fail", qp_id, RTE_CRYPTO_MAX_DEVS); -@@ -12784,12 +12886,11 @@ test_deq_callback_setup(void) +@@ -12784,12 +12894,11 @@ test_deq_callback_setup(void) rte_cryptodev_start(ts_params->valid_devs[0]); @@ -25885,7 +26570,7 @@ index d6ae762df9..2069ccad3b 100644 /* Test with invalid crypto device */ TEST_ASSERT_FAIL(rte_cryptodev_remove_deq_callback( -@@ -12814,6 +12915,8 @@ test_deq_callback_setup(void) +@@ -12814,6 +12923,8 @@ test_deq_callback_setup(void) "qp %u on cryptodev %u", qp_id, ts_params->valid_devs[0]); @@ -25894,7 +26579,7 @@ index d6ae762df9..2069ccad3b 100644 return TEST_SUCCESS; } -@@ -12990,7 +13093,7 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata) +@@ -12990,7 +13101,7 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata) retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_GENERATE); @@ -25903,7 +26588,7 @@ index d6ae762df9..2069ccad3b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -13121,7 +13224,7 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata) +@@ -13121,7 +13232,7 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata) retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_VERIFY); @@ -25912,7 +26597,7 @@ index d6ae762df9..2069ccad3b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -13250,7 +13353,7 @@ test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata, +@@ -13250,7 +13361,7 @@ test_AES_GMAC_authentication_SGL(const struct gmac_test_data *tdata, retval = create_gmac_session(ts_params->valid_devs[0], tdata, RTE_CRYPTO_AUTH_OP_GENERATE); @@ -25921,7 +26606,7 @@ index d6ae762df9..2069ccad3b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -13867,7 +13970,7 @@ test_authentication_verify_fail_when_data_corruption( +@@ -13867,7 +13978,7 @@ test_authentication_verify_fail_when_data_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY); @@ -25930,7 +26615,7 @@ index d6ae762df9..2069ccad3b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -13954,6 +14057,8 @@ test_authentication_verify_GMAC_fail_when_corruption( +@@ -13954,6 +14065,8 @@ test_authentication_verify_GMAC_fail_when_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_CIPHER_OP_DECRYPT); @@ -25939,7 +26624,7 @@ index d6ae762df9..2069ccad3b 100644 if (retval < 0) return retval; -@@ -14044,8 +14149,7 @@ test_authenticated_decryption_fail_when_corruption( +@@ -14044,8 +14157,7 @@ test_authenticated_decryption_fail_when_corruption( reference, RTE_CRYPTO_AUTH_OP_VERIFY, RTE_CRYPTO_CIPHER_OP_DECRYPT); @@ -25949,7 +26634,7 @@ index d6ae762df9..2069ccad3b 100644 return TEST_SKIPPED; if (retval < 0) return retval; -@@ -14450,8 +14554,13 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, +@@ -14450,8 +14562,13 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, &cap_idx) == NULL) return TEST_SKIPPED; @@ -25965,7 +26650,7 @@ index d6ae762df9..2069ccad3b 100644 return TEST_SKIPPED; /* Detailed check for the particular SGL support flag */ -@@ -14514,7 +14623,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, +@@ -14514,7 +14631,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata, } /* @@ -27283,10 +27968,10 @@ index 6fdc4cd9e3..56d4884529 100644 (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, diff --git a/dpdk/app/test/test_eal_flags.c b/dpdk/app/test/test_eal_flags.c -index d2b91e2075..f4740ee6e5 100644 +index d2b91e2075..767a9f3134 100644 --- a/dpdk/app/test/test_eal_flags.c +++ b/dpdk/app/test/test_eal_flags.c -@@ -671,7 +671,7 @@ test_missing_c_flag(void) +@@ -671,14 +671,14 @@ test_missing_c_flag(void) launch_proc(argv26) == 0 || launch_proc(argv27) == 0 || launch_proc(argv28) == 0 || launch_proc(argv30) == 0) { printf("Error - " @@ -27295,8 +27980,17 @@ index d2b91e2075..f4740ee6e5 100644 return -1; } + if (rte_lcore_is_enabled(0) && rte_lcore_is_enabled(1) && + rte_lcore_is_enabled(2) && rte_lcore_is_enabled(3) && +- rte_lcore_is_enabled(3) && rte_lcore_is_enabled(5) && +- rte_lcore_is_enabled(4) && rte_lcore_is_enabled(7) && ++ rte_lcore_is_enabled(4) && rte_lcore_is_enabled(5) && ++ rte_lcore_is_enabled(6) && rte_lcore_is_enabled(7) && + launch_proc(argv29) != 0) { + printf("Error - " + "process did not run ok with valid corelist value\n"); diff --git a/dpdk/app/test/test_event_crypto_adapter.c b/dpdk/app/test/test_event_crypto_adapter.c -index a38e389abd..3d720fe68b 100644 +index a38e389abd..7e38f8bfb9 100644 --- a/dpdk/app/test/test_event_crypto_adapter.c +++ b/dpdk/app/test/test_event_crypto_adapter.c @@ -958,11 +958,10 @@ configure_cryptodev(void) @@ -27314,6 +28008,38 @@ index a38e389abd..3d720fe68b 100644 ret = rte_vdev_init( RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); +@@ -1041,21 +1040,17 @@ configure_cryptodev(void) + + static inline void + evdev_set_conf_values(struct rte_event_dev_config *dev_conf, +- struct rte_event_dev_info *info) ++ const struct rte_event_dev_info *info) + { +- memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); +- dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; +- dev_conf->nb_event_ports = NB_TEST_PORTS; +- dev_conf->nb_event_queues = NB_TEST_QUEUES; +- dev_conf->nb_event_queue_flows = info->max_event_queue_flows; +- dev_conf->nb_event_port_dequeue_depth = +- info->max_event_port_dequeue_depth; +- dev_conf->nb_event_port_enqueue_depth = +- info->max_event_port_enqueue_depth; +- dev_conf->nb_event_port_enqueue_depth = +- info->max_event_port_enqueue_depth; +- dev_conf->nb_events_limit = +- info->max_num_events; ++ *dev_conf = (struct rte_event_dev_config) { ++ .dequeue_timeout_ns = info->min_dequeue_timeout_ns, ++ .nb_event_ports = NB_TEST_PORTS, ++ .nb_event_queues = NB_TEST_QUEUES, ++ .nb_event_queue_flows = info->max_event_queue_flows, ++ .nb_event_port_dequeue_depth = info->max_event_port_dequeue_depth, ++ .nb_event_port_enqueue_depth = info->max_event_port_enqueue_depth, ++ .nb_events_limit = info->max_num_events, ++ }; + } + + static int diff --git a/dpdk/app/test/test_event_eth_tx_adapter.c b/dpdk/app/test/test_event_eth_tx_adapter.c index c19a87a86a..2aeb28d8e9 100644 --- a/dpdk/app/test/test_event_eth_tx_adapter.c @@ -28076,7 +28802,7 @@ index 6373e62d33..9cc5f3487c 100644 } diff --git a/dpdk/app/test/test_link_bonding.c b/dpdk/app/test/test_link_bonding.c -index 5c496352c2..53f5c13a24 100644 +index 5c496352c2..3307f590f0 100644 --- a/dpdk/app/test/test_link_bonding.c +++ b/dpdk/app/test/test_link_bonding.c @@ -2,7 +2,7 @@ @@ -28098,6 +28824,15 @@ index 5c496352c2..53f5c13a24 100644 current_slave_count = rte_eth_bond_slaves_get(test_params->bonded_port_id, slaves, RTE_MAX_ETHPORTS); +@@ -785,7 +786,7 @@ test_set_primary_slave(void) + TEST_ASSERT_SUCCESS(rte_eth_macaddr_get(test_params->bonded_port_id, &read_mac_addr), + "Failed to get mac address (port %d)", + test_params->bonded_port_id); +- TEST_ASSERT_SUCCESS(memcmp(&read_mac_addr, &read_mac_addr, ++ TEST_ASSERT_SUCCESS(memcmp(expected_mac_addr, &read_mac_addr, + sizeof(read_mac_addr)), + "bonded port mac address not set to that of primary port\n"); + @@ -4261,7 +4262,7 @@ test_tlb_tx_burst(void) burst_size); TEST_ASSERT_EQUAL(nb_tx, 0, " bad number of packet in burst"); @@ -28122,7 +28857,7 @@ index 21c512c94b..7410f99617 100644 #define TEST_LACP_SLAVE_COUT RTE_DIM(test_params.slave_ports) diff --git a/dpdk/app/test/test_link_bonding_rssconf.c b/dpdk/app/test/test_link_bonding_rssconf.c -index 464fb2dbd0..7aecee9117 100644 +index 464fb2dbd0..95b2a2bda2 100644 --- a/dpdk/app/test/test_link_bonding_rssconf.c +++ b/dpdk/app/test/test_link_bonding_rssconf.c @@ -324,7 +324,7 @@ test_propagate(void) @@ -28134,6 +28869,14 @@ index 464fb2dbd0..7aecee9117 100644 int retval = 0; uint64_t rss_hf = 0; +@@ -616,7 +616,6 @@ test_setup(void) + mac_addr.addr_bytes[5] = 0x10 + port->port_id; + rte_eth_dev_default_mac_addr_set(port->port_id, &mac_addr); + +- rte_eth_dev_info_get(port->port_id, &port->dev_info); + retval = rte_eth_dev_info_get(port->port_id, &port->dev_info); + TEST_ASSERT((retval == 0), + "Error during getting device (port %u) info: %s\n", diff --git a/dpdk/app/test/test_malloc.c b/dpdk/app/test/test_malloc.c index de40e50611..ff081dd931 100644 --- a/dpdk/app/test/test_malloc.c @@ -28321,6 +29064,34 @@ index 53fe898a38..7444be9399 100644 return 0; fail: +diff --git a/dpdk/app/test/test_pcapng.c b/dpdk/app/test/test_pcapng.c +index a7acbdc058..e9c85f2e02 100644 +--- a/dpdk/app/test/test_pcapng.c ++++ b/dpdk/app/test/test_pcapng.c +@@ -81,6 +81,14 @@ mbuf1_prepare(struct dummy_mbuf *dm, uint32_t plen) + + rte_eth_random_addr(pkt.eth.src_addr.addr_bytes); + memcpy(rte_pktmbuf_mtod(dm->mb, void *), &pkt, RTE_MIN(sizeof(pkt), plen)); ++ ++ /* Idea here is to create mbuf chain big enough that after mbuf deep copy they won't be ++ * compressed into single mbuf to properly test store of chained mbufs ++ */ ++ dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[1]), pkt_len); ++ dummy_mbuf_prep(&dm->mb[2], dm->buf[2], sizeof(dm->buf[2]), pkt_len); ++ rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]); ++ rte_pktmbuf_chain(&dm->mb[0], &dm->mb[2]); + } + + static int +@@ -138,7 +146,7 @@ test_write_packets(void) + for (i = 0; i < NUM_PACKETS; i++) { + struct rte_mbuf *mc; + +- mc = rte_pcapng_copy(port_id, 0, orig, mp, pkt_len, ++ mc = rte_pcapng_copy(port_id, 0, orig, mp, rte_pktmbuf_pkt_len(orig), + rte_get_tsc_cycles(), 0); + if (mc == NULL) { + fprintf(stderr, "Cannot copy packet\n"); diff --git a/dpdk/app/test/test_power.c b/dpdk/app/test/test_power.c index b7b5561348..a1b32adf58 100644 --- a/dpdk/app/test/test_power.c @@ -28334,6 +29105,66 @@ index b7b5561348..a1b32adf58 100644 return -1; } +diff --git a/dpdk/app/test/test_power_cpufreq.c b/dpdk/app/test/test_power_cpufreq.c +index 4d013cd7bb..051b9036dc 100644 +--- a/dpdk/app/test/test_power_cpufreq.c ++++ b/dpdk/app/test/test_power_cpufreq.c +@@ -9,6 +9,7 @@ + #include + #include + #include ++#include + + #include "test.h" + +@@ -46,9 +47,10 @@ test_power_caps(void) + + static uint32_t total_freq_num; + static uint32_t freqs[TEST_POWER_FREQS_NUM_MAX]; ++static uint32_t cpu_id; + + static int +-check_cur_freq(unsigned int lcore_id, uint32_t idx, bool turbo) ++check_cur_freq(__rte_unused unsigned int lcore_id, uint32_t idx, bool turbo) + { + #define TEST_POWER_CONVERT_TO_DECIMAL 10 + #define MAX_LOOP 100 +@@ -62,13 +64,13 @@ check_cur_freq(unsigned int lcore_id, uint32_t idx, bool turbo) + int i; + + if (snprintf(fullpath, sizeof(fullpath), +- TEST_POWER_SYSFILE_CPUINFO_FREQ, lcore_id) < 0) { ++ TEST_POWER_SYSFILE_CPUINFO_FREQ, cpu_id) < 0) { + return 0; + } + f = fopen(fullpath, "r"); + if (f == NULL) { + if (snprintf(fullpath, sizeof(fullpath), +- TEST_POWER_SYSFILE_SCALING_FREQ, lcore_id) < 0) { ++ TEST_POWER_SYSFILE_SCALING_FREQ, cpu_id) < 0) { + return 0; + } + f = fopen(fullpath, "r"); +@@ -486,6 +488,19 @@ test_power_cpufreq(void) + { + int ret = -1; + enum power_management_env env; ++ rte_cpuset_t lcore_cpus; ++ ++ lcore_cpus = rte_lcore_cpuset(TEST_POWER_LCORE_ID); ++ if (CPU_COUNT(&lcore_cpus) != 1) { ++ printf("Power management doesn't support lcore %u mapping to %u CPUs\n", ++ TEST_POWER_LCORE_ID, ++ CPU_COUNT(&lcore_cpus)); ++ return TEST_SKIPPED; ++ } ++ for (cpu_id = 0; cpu_id < CPU_SETSIZE; cpu_id++) { ++ if (CPU_ISSET(cpu_id, &lcore_cpus)) ++ break; ++ } + + /* Test initialisation of a valid lcore */ + ret = rte_power_init(TEST_POWER_LCORE_ID); diff --git a/dpdk/app/test/test_power_intel_uncore.c b/dpdk/app/test/test_power_intel_uncore.c index 31163af84e..38c72fb371 100644 --- a/dpdk/app/test/test_power_intel_uncore.c @@ -29090,6 +29921,20 @@ index 003537e200..d0a4b948e4 100644 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +diff --git a/dpdk/buildtools/chkincs/meson.build b/dpdk/buildtools/chkincs/meson.build +index 378c2f19ef..0f14e02104 100644 +--- a/dpdk/buildtools/chkincs/meson.build ++++ b/dpdk/buildtools/chkincs/meson.build +@@ -21,6 +21,9 @@ sources += gen_c_files.process(dpdk_chkinc_headers) + # some driver SDK headers depend on these two buses, which are mandatory in build + # so we always include them in deps list + deps = [get_variable('shared_rte_bus_vdev'), get_variable('shared_rte_bus_pci')] ++if dpdk_conf.has('RTE_BUS_VMBUS') ++ deps += get_variable('shared_rte_bus_vmbus') ++endif + # add the rest of the libs to the dependencies + foreach l:enabled_libs + deps += get_variable('shared_rte_' + l) diff --git a/dpdk/buildtools/meson.build b/dpdk/buildtools/meson.build index e1c600e40f..c4f6fe9271 100644 --- a/dpdk/buildtools/meson.build @@ -29467,7 +30312,7 @@ index fce85fb0d8..24e6539a15 100644 [host_machine] diff --git a/dpdk/config/arm/meson.build b/dpdk/config/arm/meson.build -index 6442ec9596..5028c74613 100644 +index 6442ec9596..45806d701e 100644 --- a/dpdk/config/arm/meson.build +++ b/dpdk/config/arm/meson.build @@ -43,7 +43,9 @@ implementer_generic = { @@ -29481,7 +30326,19 @@ index 6442ec9596..5028c74613 100644 'flags': [ ['RTE_ARCH_ARM_NEON_MEMCPY', false], ['RTE_ARCH_STRICT_ALIGN', true], -@@ -613,21 +615,25 @@ if update_flags +@@ -542,9 +544,8 @@ else + # native build + # The script returns ['Implementer', 'Variant', 'Architecture', + # 'Primary Part number', 'Revision'] +- detect_vendor = find_program(join_paths(meson.current_source_dir(), +- 'armv8_machine.py')) +- cmd = run_command(detect_vendor.path(), check: false) ++ detect_vendor = py3 + files('armv8_machine.py') ++ cmd = run_command(detect_vendor, check: false) + if cmd.returncode() == 0 + cmd_output = cmd.stdout().to_lower().strip().split(' ') + implementer_id = cmd_output[0] +@@ -613,21 +614,25 @@ if update_flags # probe supported archs and their features candidate_march = '' if part_number_config.has_key('march') @@ -29522,7 +30379,7 @@ index 6442ec9596..5028c74613 100644 if candidate_march == '' error('No suitable armv8 march version found.') endif -@@ -659,7 +665,7 @@ if update_flags +@@ -659,7 +664,7 @@ if update_flags # apply supported compiler options if part_number_config.has_key('compiler_options') foreach flag: part_number_config['compiler_options'] @@ -29639,6 +30496,42 @@ index cddebda5b5..11597eaa26 100644 objdump = 'x86_64-w64-mingw32-objdump' [host_machine] +diff --git a/dpdk/devtools/check-forbidden-tokens.awk b/dpdk/devtools/check-forbidden-tokens.awk +index 026844141c..2419d3565a 100755 +--- a/dpdk/devtools/check-forbidden-tokens.awk ++++ b/dpdk/devtools/check-forbidden-tokens.awk +@@ -32,14 +32,11 @@ BEGIN { + for (i in deny_expr) { + forbidden_added = "^\\+.*" deny_expr[i]; + forbidden_removed="^-.*" deny_expr[i]; +- current = expressions[deny_expr[i]] + if ($0 ~ forbidden_added) { +- count = count + 1; +- expressions[deny_expr[i]] = current + 1 ++ count = count + 1 + } + if ($0 ~ forbidden_removed) { +- count = count - 1; +- expressions[deny_expr[i]] = current - 1 ++ count = count - 1 + } + } + } +@@ -49,12 +46,13 @@ BEGIN { + } + } + # switch to next file , check if the balance of add/remove +-# of previous filehad new additions ++# of previous file had new additions + ($0 ~ "^\\+\\+\\+ b/") { + in_file = 0; + if (count > 0) { + exit; + } ++ count = 0 + for (i in deny_folders) { + re = "^\\+\\+\\+ b/" deny_folders[i]; + if ($0 ~ re) { diff --git a/dpdk/devtools/check-git-log.sh b/dpdk/devtools/check-git-log.sh index 01d8aa0717..2ee7f2db64 100755 --- a/dpdk/devtools/check-git-log.sh @@ -29724,6 +30617,19 @@ index be1cb03ea7..a07bbc83cb 100755 if [ "$tmpinput" != "$1" ]; then rm -f "$tmpinput" trap - INT +diff --git a/dpdk/devtools/git-log-fixes.sh b/dpdk/devtools/git-log-fixes.sh +index 8a4a8470c2..4690dd4545 100755 +--- a/dpdk/devtools/git-log-fixes.sh ++++ b/dpdk/devtools/git-log-fixes.sh +@@ -68,7 +68,7 @@ origin_version () # ... + { + for origin in $* ; do + # check hash is valid +- git rev-parse -q --verify $1 >&- || continue ++ git rev-parse -q --verify $origin >&- || continue + # get version of this bug origin + local origver=$(commit_version $origin) + local roothashes="$(origin_filter $origin)" diff --git a/dpdk/doc/api/doxy-api-index.md b/dpdk/doc/api/doxy-api-index.md index de488c7abf..bbca14be3d 100644 --- a/dpdk/doc/api/doxy-api-index.md @@ -29993,7 +30899,7 @@ index 293eab8787..871d14142c 100644 Listed below are the rte_flow actions supported: diff --git a/dpdk/doc/guides/nics/features.rst b/dpdk/doc/guides/nics/features.rst -index 1a1dc16c1e..4332ff0a31 100644 +index 1a1dc16c1e..5e662ba42b 100644 --- a/dpdk/doc/guides/nics/features.rst +++ b/dpdk/doc/guides/nics/features.rst @@ -34,6 +34,17 @@ Supports getting the speed capabilities that the current device is capable of. @@ -30014,7 +30920,61 @@ index 1a1dc16c1e..4332ff0a31 100644 .. _nic_features_link_status: Link status -@@ -740,6 +751,19 @@ Supports congestion management. +@@ -694,14 +705,32 @@ Basic stats + Support basic statistics such as: ipackets, opackets, ibytes, obytes, + imissed, ierrors, oerrors, rx_nombuf. + +-And per queue stats: q_ipackets, q_opackets, q_ibytes, q_obytes, q_errors. +- + These apply to all drivers. + + * **[implements] eth_dev_ops**: ``stats_get``, ``stats_reset``. + * **[related] API**: ``rte_eth_stats_get``, ``rte_eth_stats_reset()``. + + ++.. _nic_features_stats_per_queue: ++ ++Stats per queue ++--------------- ++ ++Supports per queue stats: q_ipackets, q_opackets, q_ibytes, q_obytes, q_errors. ++Statistics only supplied for first ``RTE_ETHDEV_QUEUE_STAT_CNTRS`` (16) queues. ++If driver does not support this feature the per queue stats will be zero. ++ ++* **[implements] eth_dev_ops**: ``stats_get``, ``stats_reset``. ++* **[related] API**: ``rte_eth_stats_get``, ``rte_eth_stats_reset()``. ++ ++May also support configuring per-queue stat counter mapping. ++Used by some drivers to workaround HW limitations. ++ ++* **[implements] eth_dev_ops**: ``queue_stats_mapping_set``. ++* **[related] API**: ``rte_eth_dev_set_rx_queue_stats_mapping()``, ++ ``rte_eth_dev_set_tx_queue_stats_mapping()``. ++ ++ + .. _nic_features_extended_stats: + + Extended stats +@@ -716,18 +745,6 @@ Supports Extended Statistics, changes from driver to driver. + ``rte_eth_xstats_get_names_by_id()``, ``rte_eth_xstats_get_id_by_name()``. + + +-.. _nic_features_stats_per_queue: +- +-Stats per queue +---------------- +- +-Supports configuring per-queue stat counter mapping. +- +-* **[implements] eth_dev_ops**: ``queue_stats_mapping_set``. +-* **[related] API**: ``rte_eth_dev_set_rx_queue_stats_mapping()``, +- ``rte_eth_dev_set_tx_queue_stats_mapping()``. +- +- + .. _nic_features_congestion_management: + + Congestion management +@@ -740,6 +757,19 @@ Supports congestion management. ``rte_eth_cman_config_set()``, ``rte_eth_cman_config_get()``. @@ -30672,7 +31632,7 @@ index 005c0b2ca7..341146c4e7 100644 ------------------------------ diff --git a/dpdk/doc/guides/nics/mlx5.rst b/dpdk/doc/guides/nics/mlx5.rst -index 51f51259e3..b047d7db58 100644 +index 51f51259e3..d2f741a472 100644 --- a/dpdk/doc/guides/nics/mlx5.rst +++ b/dpdk/doc/guides/nics/mlx5.rst @@ -455,8 +455,12 @@ Limitations @@ -30726,6 +31686,84 @@ index 51f51259e3..b047d7db58 100644 Notes for metadata ------------------ +@@ -1574,6 +1589,77 @@ directly but neither destroyed nor flushed. + The application should re-create the flows as required after the port restart. + + ++Notes for flow counters ++----------------------- ++ ++mlx5 PMD supports the ``COUNT`` flow action, ++which provides an ability to count packets (and bytes) ++matched against a given flow rule. ++This section describes the high level overview of ++how this support is implemented and limitations. ++ ++HW steering flow engine ++~~~~~~~~~~~~~~~~~~~~~~~ ++ ++Flow counters are allocated from HW in bulks. ++A set of bulks forms a flow counter pool managed by PMD. ++When flow counters are queried from HW, ++each counter is identified by an offset in a given bulk. ++Querying HW flow counter requires sending a request to HW, ++which will request a read of counter values for given offsets. ++HW will asynchronously provide these values through a DMA write. ++ ++In order to optimize HW to SW communication, ++these requests are handled in a separate counter service thread ++spawned by mlx5 PMD. ++This service thread will refresh the counter values stored in memory, ++in cycles, each spanning ``svc_cycle_time`` milliseconds. ++By default, ``svc_cycle_time`` is set to 500. ++When applications query the ``COUNT`` flow action, ++PMD returns the values stored in host memory. ++ ++mlx5 PMD manages 3 global rings of allocated counter offsets: ++ ++- ``free`` ring - Counters which were not used at all. ++- ``wait_reset`` ring - Counters which were used in some flow rules, ++ but were recently freed (flow rule was destroyed ++ or an indirect action was destroyed). ++ Since the count value might have changed ++ between the last counter service thread cycle and the moment it was freed, ++ the value in host memory might be stale. ++ During the next service thread cycle, ++ such counters will be moved to ``reuse`` ring. ++- ``reuse`` ring - Counters which were used at least once ++ and can be reused in new flow rules. ++ ++When counters are assigned to a flow rule (or allocated to indirect action), ++the PMD first tries to fetch a counter from ``reuse`` ring. ++If it's empty, the PMD fetches a counter from ``free`` ring. ++ ++The counter service thread works as follows: ++ ++#. Record counters stored in ``wait_reset`` ring. ++#. Read values of all counters which were used at least once ++ or are currently in use. ++#. Move recorded counters from ``wait_reset`` to ``reuse`` ring. ++#. Sleep for ``(query time) - svc_cycle_time`` milliseconds ++#. Repeat. ++ ++Because freeing a counter (by destroying a flow rule or destroying indirect action) ++does not immediately make it available for the application, ++the PMD might return: ++ ++- ``ENOENT`` if no counter is available in ``free``, ``reuse`` ++ or ``wait_reset`` rings. ++ No counter will be available until the application releases some of them. ++- ``EAGAIN`` if no counter is available in ``free`` and ``reuse`` rings, ++ but there are counters in ``wait_reset`` ring. ++ This means that after the next service thread cycle new counters will be available. ++ ++The application has to be aware that flow rule create or indirect action create ++might need be retried. ++ ++ + Notes for hairpin + ----------------- + diff --git a/dpdk/doc/guides/nics/tap.rst b/dpdk/doc/guides/nics/tap.rst index 2f7417bddd..07df0d35a2 100644 --- a/dpdk/doc/guides/nics/tap.rst @@ -31964,10 +33002,10 @@ index 2bb115d13f..f8befc6594 100644 +- 3rd Generation Intel® Xeon® Scalable Processors. +- 2nd Generation Intel® Xeon® Scalable Processors. diff --git a/dpdk/doc/guides/rel_notes/release_22_11.rst b/dpdk/doc/guides/rel_notes/release_22_11.rst -index 26e0560725..db8d9405d2 100644 +index 26e0560725..b3795fb780 100644 --- a/dpdk/doc/guides/rel_notes/release_22_11.rst +++ b/dpdk/doc/guides/rel_notes/release_22_11.rst -@@ -805,3 +805,1738 @@ Tested Platforms +@@ -805,3 +805,2014 @@ Tested Platforms ~~~~~~~~~~~~~ * drivers: fix symbol exports when map is omitted @@ -33706,6 +34744,282 @@ index 26e0560725..db8d9405d2 100644 + * Ubuntu 20.04 + * Driver MLNX_OFED_LINUX-24.07-0.6.1.0 + * fw 22.42.1000 ++ ++22.11.7 Release Notes ++--------------------- ++ ++ ++22.11.7 Fixes ++~~~~~~~~~~~~~ ++ ++* app/dumpcap: fix handling of jumbo frames ++* app/dumpcap: remove unused struct array ++* app/procinfo: fix leak on exit ++* app/testpmd: fix aged flow destroy ++* app/testpmd: remove flex item init command leftover ++* app/testpmd: remove redundant policy action condition ++* app/testpmd: remove unnecessary cast ++* baseband/acc: fix access to deallocated mem ++* baseband/acc: fix ring memory allocation ++* baseband/la12xx: fix use after free in modem config ++* bpf: fix free function mismatch if convert fails ++* build: remove version check on compiler links function ++* buildtools/chkincs: check driver specific headers ++* bus/dpaa: fix lock condition during error handling ++* bus/dpaa: fix PFDRs leaks due to FQRNIs ++* bus/dpaa: fix the fman details status ++* bus/dpaa: fix VSP for 1G fm1-mac9 and 10 ++* bus/fslmc: fix Coverity warnings in QBMAN ++* common/cnxk: fix base log level ++* common/cnxk: fix build on Ubuntu 24.04 ++* common/cnxk: fix CPT HW word size for outbound SA ++* common/dpaax/caamflib: enable fallthrough warnings ++* common/dpaax/caamflib: fix PDCP SNOW-ZUC watchdog ++* common/idpf: fix use after free in mailbox init ++* common/mlx5: fix error CQE handling for 128 bytes CQE ++* common/mlx5: fix misalignment ++* config/arm: fix warning for native build with meson >= 0.55 ++* crypto/bcmfs: fix free function mismatch ++* crypto/dpaa2_sec: fix memory leak ++* crypto/openssl: fix 3DES-CTR with big endian CPUs ++* crypto/openssl: fix potential string overflow ++* crypto/qat: fix modexp/inv length ++* dev: fix callback lookup when unregistering device ++* devtools: fix check of multiple commits fixed at once ++* devtools: fix forbidden token check with multiple files ++* dmadev: fix potential null pointer access ++* dma/idxd: fix free function mismatch in device probe ++* doc: correct definition of stats per queue feature ++* eal/unix: optimize thread creation ++* eal/x86: fix 32-bit write combining store ++* ethdev: fix overflow in descriptor count ++* ethdev: verify queue ID in Tx done cleanup ++* event/cnxk: fix free function mismatch in port config ++* event/cnxk: fix Rx timestamp handling ++* eventdev: fix possible array underflow/overflow ++* event/octeontx: fix possible integer overflow ++* examples/eventdev: fix queue crash with generic pipeline ++* examples/ipsec-secgw: fix dequeue count from cryptodev ++* examples/l2fwd-event: fix spinlock handling ++* examples/l3fwd: fix read beyond boundaries ++* examples/ntb: check info query return ++* examples/vhost: fix free function mismatch ++* fib6: add runtime checks in AVX512 lookup ++* fib: fix AVX512 lookup ++* hash: fix thash LFSR initialization ++* member: fix choice of bucket for displacement ++* net/bnx2x: fix always true expression ++* net/bnx2x: fix duplicate branch ++* net/bnx2x: fix possible infinite loop at startup ++* net/bnx2x: remove dead conditional ++* net/bnxt: fix bad action offset in Tx BD ++* net/bnxt: fix reading SFF-8436 SFP EEPROMs ++* net/bnxt: fix TCP and UDP checksum flags ++* net/bnxt/tf_core: fix Thor TF EM key size check ++* net/bnxt/tf_ulp: fix parent child DB counters ++* net/cnxk: fix build on Ubuntu 24.04 ++* net/cnxk: fix Rx offloads to handle timestamp ++* net/cnxk: fix Rx timestamp handling for VF ++* net/dpaa2: fix memory corruption in TM ++* net/dpaa2: remove unnecessary check for null before free ++* net/dpaa: fix reallocate mbuf handling ++* net/dpaa: fix typecasting channel ID ++* net/e1000: fix link status crash in secondary process ++* net/e1000: fix use after free in filter flush ++* net/ena: revert redefining memcpy ++* net/gve/base: fix build with Fedora Rawhide ++* net/hns3: fix crash for NEON and SVE ++* net/hns3: fix dump counter of registers ++* net/hns3: fix error code for repeatedly create counter ++* net/hns3: fix fully use hardware flow director table ++* net/hns3: remove ROH devices ++* net/hns3: remove some basic address dump ++* net/hns3: restrict tunnel flow rule to one header ++* net/hns3: verify reset type from firmware ++* net/i40e/base: fix blinking X722 with X557 PHY ++* net/i40e/base: fix DDP loading with reserved track ID ++* net/i40e/base: fix loop bounds ++* net/i40e/base: fix misleading debug logs and comments ++* net/i40e/base: fix repeated register dumps ++* net/i40e/base: fix setting flags in init function ++* net/i40e/base: fix unchecked return value ++* net/i40e: check register read for outer VLAN ++* net/i40e: fix AVX-512 pointer copy on 32-bit ++* net/iavf: add segment-length check to Tx prep ++* net/iavf: fix AVX-512 pointer copy on 32-bit ++* net/iavf: fix crash when link is unstable ++* net/iavf: preserve MAC address with i40e PF Linux driver ++* net/ice/base: add bounds check ++* net/ice/base: fix iteration of TLVs in Preserved Fields Area ++* net/ice/base: fix link speed for 200G ++* net/ice/base: fix VLAN replay after reset ++* net/ice: detect stopping a flow director queue twice ++* net/ice: fix AVX-512 pointer copy on 32-bit ++* net/idpf: fix AVX-512 pointer copy on 32-bit ++* net/ionic: fix build on Fedora Rawhide ++* net/ixgbe/base: fix unchecked return value ++* net/ixgbe: fix link status delay on FreeBSD ++* net/mana: support rdma-core via pkg-config ++* net/memif: fix buffer overflow in zero copy Rx ++* net/mlx5: fix counter query loop getting stuck ++* net/mlx5: fix default RSS flows creation order ++* net/mlx5: fix flex item header length field translation ++* net/mlx5: fix GRE flow item translation for root table ++* net/mlx5: fix memory leak in metering ++* net/mlx5: fix miniCQEs number calculation ++* net/mlx5: fix next protocol validation after flex item ++* net/mlx5: fix non full word sample fields in flex item ++* net/mlx5: fix number of supported flex parsers ++* net/mlx5: fix real time counter reading from PCI BAR ++* net/mlx5: fix reported Rx/Tx descriptor limits ++* net/mlx5: fix Rx queue reference count in flushing flows ++* net/mlx5: fix shared queue port number in vector Rx ++* net/mlx5: fix SQ flow item size ++* net/mlx5: fix SWS meter state initialization ++* net/mlx5/hws: fix allocation of STCs ++* net/mlx5: update flex parser arc types support ++* net/mlx5: workaround list management of Rx queue control ++* net/mvneta: fix possible out-of-bounds write ++* net/netvsc: fix using Tx queue higher than Rx queues ++* net/netvsc: force Tx VLAN offload on 801.2Q packet ++* net/nfb: fix use after free ++* net/nfp: fix double free in flow destroy ++* net/nfp: fix link change return value ++* net/ngbe: fix driver load bit to inform firmware ++* net/ngbe: fix interrupt lost in legacy or MSI mode ++* net/ngbe: reconfigure more MAC Rx registers ++* net/ngbe: restrict configuration of VLAN strip offload ++* net/pcap: fix blocking Rx ++* net/pcap: set live interface as non-blocking ++* net/sfc: fix use after free in debug logs ++* net/tap: avoid memcpy with null argument ++* net/tap: restrict maximum number of MP FDs ++* net/txgbe: fix driver load bit to inform firmware ++* net/txgbe: fix SWFW mbox ++* net/txgbe: fix VF-PF mbox interrupt ++* net/txgbe: remove outer UDP checksum capability ++* net/virtio: fix Rx checksum calculation ++* net/virtio-user: reset used index counter ++* net/vmxnet3: fix crash after configuration failure ++* net/vmxnet3: fix potential out of bounds stats access ++* net/vmxnet3: support larger MTU with version 6 ++* pcapng: fix handling of chained mbufs ++* power: enable CPPC ++* power: fix log message when checking lcore ID ++* power: fix mapped lcore ID ++* raw/ifpga/base: fix use after free ++* raw/ifpga: fix free function mismatch in interrupt config ++* Revert "test/bonding: fix loop on members" ++* test/bonding: fix loop on members ++* test/bonding: fix MAC address comparison ++* test/bonding: remove redundant info query ++* test/crypto: fix synchronous API calls ++* test/eal: fix lcore check ++* test/eal: fix loop coverage for alignment macros ++* test/event: avoid duplicate initialization ++* vdpa: update used flags in used ring relay ++* version: 22.11.7-rc1 ++* vhost: fix offset while mapping log base address ++ ++22.11.7 Validation ++~~~~~~~~~~~~~~~~~~ ++ ++* Red Hat(R) Testing ++ ++ * Platform ++ ++ * RHEL 9.2 ++ * Kernel 5.14 ++ * Qemu 7.2.0 ++ * libvirt 9.0 ++ * openvswitch 3.1 ++ * X540-AT2 NIC(ixgbe, 10G) ++ ++ * Functionality ++ ++ * Guest with device assignment(PF) throughput testing(1G hugepage size) ++ * Guest with device assignment(PF) throughput testing(2M hugepage size) ++ * Guest with device assignment(VF) throughput testing ++ * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing ++ * PVP vhost-user 2Q throughput testing ++ * PVP vhost-user 1Q cross numa node throughput testing ++ * Guest with vhost-user 2 queues throughput testing ++ * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect ++ * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect ++ * PVP reconnect with dpdk-client, qemu-server: PASS ++ * PVP 1Q live migration testing ++ * PVP 1Q cross numa node live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) ++ * Guest with ovs+dpdk+vhost-user 2Q live migration testing ++ * Guest with ovs+dpdk+vhost-user 4Q live migration testing ++ * Host PF + DPDK testing ++ * Host VF + DPDK testing ++ ++ ++* Nvidia(R) Testing ++ ++ * Basic functionality via testpmd/example applications ++ ++ * Tx/Rx ++ * xstats ++ * Timestamps ++ * Link status ++ * RTE flow and flow_director ++ * RSS ++ * VLAN filtering, stripping and insertion ++ * Checksum/TSO ++ * ptype ++ * link_status_interrupt example application ++ * l3fwd-power example application ++ * Multi-process example applications ++ * Hardware LRO tests ++ * Buffer Split tests ++ * Tx scheduling tests ++ ++ * Build tests ++ ++ * Debian 12 with MLNX_OFED_LINUX-24.10-1.1.4.0. ++ * Ubuntu 22.04 with MLNX_OFED_LINUX-24.10-1.1.4.0. ++ * Ubuntu 24.04 with MLNX_OFED_LINUX-24.10-1.1.4.0. ++ * Ubuntu 24.04 with rdma-core v50.0. ++ * Fedora 40 with rdma-core v48.0. ++ * Fedora 42 (Rawhide) with rdma-core v51.0. ++ * OpenSUSE Leap 15.6 with rdma-core v49.1. ++ ++ * BlueField-2 ++ ++ * DOCA 2.9.1 ++ * fw 24.43.2026 ++ ++ * ConnectX-7 ++ ++ * Ubuntu 22.04 ++ * Driver MLNX_OFED_LINUX-24.10-1.1.4.0 ++ * fw 28.43.2026 ++ ++ * ConnectX-6 Dx ++ ++ * Ubuntu 22.04 ++ * Driver MLNX_OFED_LINUX-24.10-1.1.4.0 ++ * fw 22.43.2026 ++ ++ ++* Intel(R) Testing ++ ++ * Basic Intel(R) NIC testing ++ * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu24.04, Ubuntu24.10, Fedora40, RHEL8.10, RHEL9.4, FreeBSD14.1, SUSE15, Centos7.9, AzureLinux3.0, OpenAnolis8.9 etc. ++ * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. ++ * PPF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. ++ * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc. ++ * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc. ++ ++ * Basic cryptodev and virtio testing ++ * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc. ++ * Cryptodev: ++ * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. ++ * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc. diff --git a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst b/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst index 3ada3575ba..51621b692f 100644 --- a/dpdk/doc/guides/sample_app_ug/l2_forward_cat.rst @@ -33981,7 +35295,7 @@ index 0000000000..f8d2b19570 + +RTE_LOG_REGISTER_SUFFIX(acc_common_logtype, common, INFO); diff --git a/dpdk/drivers/baseband/acc/acc_common.h b/dpdk/drivers/baseband/acc/acc_common.h -index c076dc72cc..7ea3cc9a02 100644 +index c076dc72cc..ddd7d63f8a 100644 --- a/dpdk/drivers/baseband/acc/acc_common.h +++ b/dpdk/drivers/baseband/acc/acc_common.h @@ -131,9 +131,11 @@ @@ -33997,6 +35311,15 @@ index c076dc72cc..7ea3cc9a02 100644 ##__VA_ARGS__) /* ACC100 DMA Descriptor triplet */ +@@ -727,7 +729,7 @@ alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc_device *d, + sw_rings_base, ACC_SIZE_64MBYTE); + next_64mb_align_addr_iova = sw_rings_base_iova + + next_64mb_align_offset; +- sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size; ++ sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size - 1; + + /* Check if the end of the sw ring memory block is before the + * start of next 64MB aligned mem address @@ -962,6 +964,9 @@ acc_dma_enqueue(struct acc_queue *q, uint16_t n, req_elem_addr, (void *)q->mmio_reg_enqueue); @@ -34029,7 +35352,7 @@ index 77c393b533..1cbb06d107 100644 headers = files('rte_acc_cfg.h') diff --git a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c -index ba8247d47e..955c0236e4 100644 +index ba8247d47e..a5b1bb66eb 100644 --- a/dpdk/drivers/baseband/acc/rte_acc100_pmd.c +++ b/dpdk/drivers/baseband/acc/rte_acc100_pmd.c @@ -26,9 +26,9 @@ @@ -34061,7 +35384,59 @@ index ba8247d47e..955c0236e4 100644 /* Report the AQ Index */ return (group_idx << ACC100_GRP_ID_SHIFT) + aq_idx; } -@@ -1220,7 +1221,7 @@ acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, +@@ -836,51 +837,15 @@ free_q: + return ret; + } + +-static inline void +-acc100_print_op(struct rte_bbdev_dec_op *op, enum rte_bbdev_op_type op_type, +- uint16_t index) +-{ +- if (op == NULL) +- return; +- if (op_type == RTE_BBDEV_OP_LDPC_DEC) +- rte_bbdev_log(DEBUG, +- " Op 5GUL %d %d %d %d %d %d %d %d %d %d %d %d", +- index, +- op->ldpc_dec.basegraph, op->ldpc_dec.z_c, +- op->ldpc_dec.n_cb, op->ldpc_dec.q_m, +- op->ldpc_dec.n_filler, op->ldpc_dec.cb_params.e, +- op->ldpc_dec.op_flags, op->ldpc_dec.rv_index, +- op->ldpc_dec.iter_max, op->ldpc_dec.iter_count, +- op->ldpc_dec.harq_combined_input.length +- ); +- else if (op_type == RTE_BBDEV_OP_LDPC_ENC) { +- struct rte_bbdev_enc_op *op_dl = (struct rte_bbdev_enc_op *) op; +- rte_bbdev_log(DEBUG, +- " Op 5GDL %d %d %d %d %d %d %d %d %d", +- index, +- op_dl->ldpc_enc.basegraph, op_dl->ldpc_enc.z_c, +- op_dl->ldpc_enc.n_cb, op_dl->ldpc_enc.q_m, +- op_dl->ldpc_enc.n_filler, op_dl->ldpc_enc.cb_params.e, +- op_dl->ldpc_enc.op_flags, op_dl->ldpc_enc.rv_index +- ); +- } +-} +- + static int + acc100_queue_stop(struct rte_bbdev *dev, uint16_t queue_id) + { + struct acc_queue *q; +- struct rte_bbdev_dec_op *op; +- uint16_t i; + + q = dev->data->queues[queue_id].queue_private; + rte_bbdev_log(INFO, "Queue Stop %d H/T/D %d %d %x OpType %d", + queue_id, q->sw_ring_head, q->sw_ring_tail, + q->sw_ring_depth, q->op_type); +- for (i = 0; i < q->sw_ring_depth; ++i) { +- op = (q->ring_addr + i)->req.op_addr; +- acc100_print_op(op, q->op_type, i); +- } + /* ignore all operations in flight and clear counters */ + q->sw_ring_tail = q->sw_ring_head; + q->aq_enqueued = 0; +@@ -1220,7 +1185,7 @@ acc100_fcw_ld_fill(struct rte_bbdev_dec_op *op, struct acc_fcw_ld *fcw, - op->ldpc_dec.n_filler); /* Alignment on next 64B - Already enforced from HC output */ @@ -34070,7 +35445,7 @@ index ba8247d47e..955c0236e4 100644 /* Stronger alignment requirement when in decompression mode */ if (fcw->hcin_decomp_mode > 0) -@@ -3422,9 +3423,9 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data, +@@ -3422,9 +3387,9 @@ acc100_enqueue_ldpc_enc_cb(struct rte_bbdev_queue_data *q_data, } avail--; enq = RTE_MIN(left, ACC_MUX_5GDL_DESC); @@ -34083,7 +35458,7 @@ index ba8247d47e..955c0236e4 100644 if (ret < 0) { acc_enqueue_invalid(q_data); break; -@@ -4034,8 +4035,12 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, +@@ -4034,8 +3999,12 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, /* CRC invalid if error exists */ if (!op->status) op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR; @@ -34098,7 +35473,7 @@ index ba8247d47e..955c0236e4 100644 /* Check if this is the last desc in batch (Atomic Queue) */ if (desc->req.last_desc_in_batch) { -@@ -4119,8 +4124,6 @@ acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, +@@ -4119,8 +4088,6 @@ acc100_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, struct rte_bbdev_enc_op *op; union acc_dma_desc *desc; @@ -34108,7 +35483,7 @@ index ba8247d47e..955c0236e4 100644 if (unlikely(ops == 0)) return 0; diff --git a/dpdk/drivers/baseband/acc/rte_acc200_pmd.c b/dpdk/drivers/baseband/acc/rte_acc200_pmd.c -index c5123cfef0..8bda3a8e07 100644 +index c5123cfef0..f2b7172e71 100644 --- a/dpdk/drivers/baseband/acc/rte_acc200_pmd.c +++ b/dpdk/drivers/baseband/acc/rte_acc200_pmd.c @@ -24,9 +24,9 @@ @@ -34123,7 +35498,60 @@ index c5123cfef0..8bda3a8e07 100644 #endif /* Calculate the offset of the enqueue register. */ -@@ -1848,6 +1848,9 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, +@@ -924,51 +924,16 @@ free_q: + return ret; + } + +-static inline void +-acc200_print_op(struct rte_bbdev_dec_op *op, enum rte_bbdev_op_type op_type, +- uint16_t index) +-{ +- if (op == NULL) +- return; +- if (op_type == RTE_BBDEV_OP_LDPC_DEC) +- rte_bbdev_log(INFO, +- " Op 5GUL %d %d %d %d %d %d %d %d %d %d %d %d", +- index, +- op->ldpc_dec.basegraph, op->ldpc_dec.z_c, +- op->ldpc_dec.n_cb, op->ldpc_dec.q_m, +- op->ldpc_dec.n_filler, op->ldpc_dec.cb_params.e, +- op->ldpc_dec.op_flags, op->ldpc_dec.rv_index, +- op->ldpc_dec.iter_max, op->ldpc_dec.iter_count, +- op->ldpc_dec.harq_combined_input.length +- ); +- else if (op_type == RTE_BBDEV_OP_LDPC_ENC) { +- struct rte_bbdev_enc_op *op_dl = (struct rte_bbdev_enc_op *) op; +- rte_bbdev_log(INFO, +- " Op 5GDL %d %d %d %d %d %d %d %d %d", +- index, +- op_dl->ldpc_enc.basegraph, op_dl->ldpc_enc.z_c, +- op_dl->ldpc_enc.n_cb, op_dl->ldpc_enc.q_m, +- op_dl->ldpc_enc.n_filler, op_dl->ldpc_enc.cb_params.e, +- op_dl->ldpc_enc.op_flags, op_dl->ldpc_enc.rv_index +- ); +- } +-} +- + /* Stop ACC200 queue and clear counters. */ + static int + acc200_queue_stop(struct rte_bbdev *dev, uint16_t queue_id) + { + struct acc_queue *q; +- struct rte_bbdev_dec_op *op; +- uint16_t i; ++ + q = dev->data->queues[queue_id].queue_private; + rte_bbdev_log(INFO, "Queue Stop %d H/T/D %d %d %x OpType %d", + queue_id, q->sw_ring_head, q->sw_ring_tail, + q->sw_ring_depth, q->op_type); +- for (i = 0; i < q->sw_ring_depth; ++i) { +- op = (q->ring_addr + i)->req.op_addr; +- acc200_print_op(op, q->op_type, i); +- } + /* ignore all operations in flight and clear counters */ + q->sw_ring_tail = q->sw_ring_head; + q->aq_enqueued = 0; +@@ -1848,6 +1813,9 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, r = op->turbo_enc.tb_params.r; while (mbuf_total_left > 0 && r < c) { @@ -34133,7 +35561,7 @@ index c5123cfef0..8bda3a8e07 100644 seg_total_left = rte_pktmbuf_data_len(input) - in_offset; /* Set up DMA descriptor */ desc = acc_desc(q, total_enqueued_cbs); -@@ -1882,6 +1885,10 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, +@@ -1882,6 +1850,10 @@ enqueue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, r++; } @@ -34144,7 +35572,7 @@ index c5123cfef0..8bda3a8e07 100644 /* Set SDone on last CB descriptor for TB mode. */ desc->req.sdone_enable = 1; -@@ -1903,7 +1910,8 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, +@@ -1903,7 +1875,8 @@ enqueue_ldpc_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op *op, uint16_t init_enq_descs = enq_descs; uint32_t in_offset = 0, out_offset = 0; @@ -34154,7 +35582,7 @@ index c5123cfef0..8bda3a8e07 100644 if (check_bit(op->ldpc_enc.op_flags, RTE_BBDEV_LDPC_CRC_24B_ATTACH)) input_len_B -= 3; -@@ -2079,6 +2087,10 @@ enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2079,6 +2052,10 @@ enqueue_ldpc_dec_one_op_cb(struct acc_queue *q, struct rte_bbdev_dec_op *op, } } @@ -34165,7 +35593,7 @@ index c5123cfef0..8bda3a8e07 100644 #ifdef RTE_LIBRTE_BBDEV_DEBUG rte_memdump(stderr, "FCW", &desc->req.fcw_ld, sizeof(desc->req.fcw_ld) - 8); -@@ -2128,6 +2140,9 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2128,6 +2105,9 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, } while (mbuf_total_left > 0 && r < c) { @@ -34175,7 +35603,7 @@ index c5123cfef0..8bda3a8e07 100644 if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DEC_SCATTER_GATHER)) seg_total_left = rte_pktmbuf_data_len(input) - in_offset; else -@@ -2173,6 +2188,10 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2173,6 +2153,10 @@ enqueue_ldpc_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, r++; } @@ -34186,7 +35614,7 @@ index c5123cfef0..8bda3a8e07 100644 #ifdef RTE_LIBRTE_BBDEV_DEBUG if (check_mbuf_total_left(mbuf_total_left) != 0) return -EINVAL; -@@ -2215,6 +2234,8 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2215,6 +2199,8 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, r = op->turbo_dec.tb_params.r; while (mbuf_total_left > 0 && r < c) { @@ -34195,7 +35623,7 @@ index c5123cfef0..8bda3a8e07 100644 seg_total_left = rte_pktmbuf_data_len(input) - in_offset; -@@ -2265,6 +2286,10 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, +@@ -2265,6 +2251,10 @@ enqueue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op *op, r++; } @@ -34206,7 +35634,7 @@ index c5123cfef0..8bda3a8e07 100644 /* Set SDone on last CB descriptor for TB mode */ desc->req.sdone_enable = 1; -@@ -2636,7 +2661,8 @@ acc200_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data, +@@ -2636,7 +2626,8 @@ acc200_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data, /* Dequeue one encode operations from ACC200 device in CB mode. */ static inline int dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, @@ -34216,7 +35644,7 @@ index c5123cfef0..8bda3a8e07 100644 { union acc_dma_desc *desc, atom_desc; union acc_dma_rsp_desc rsp; -@@ -2649,6 +2675,9 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, +@@ -2649,6 +2640,9 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, desc = q->ring_addr + desc_idx; atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); @@ -34226,7 +35654,7 @@ index c5123cfef0..8bda3a8e07 100644 /* Check fdone bit. */ if (!(atom_desc.rsp.val & ACC_FDONE)) return -1; -@@ -2690,7 +2719,7 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, +@@ -2690,7 +2684,7 @@ dequeue_enc_one_op_cb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, static inline int dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, uint16_t *dequeued_ops, uint32_t *aq_dequeued, @@ -34235,7 +35663,7 @@ index c5123cfef0..8bda3a8e07 100644 { union acc_dma_desc *desc, *last_desc, atom_desc; union acc_dma_rsp_desc rsp; -@@ -2701,6 +2730,9 @@ dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, +@@ -2701,6 +2695,9 @@ dequeue_enc_one_op_tb(struct acc_queue *q, struct rte_bbdev_enc_op **ref_op, desc = acc_desc_tail(q, *dequeued_descs); atom_desc.atom_hdr = __atomic_load_n((uint64_t *)desc, __ATOMIC_RELAXED); @@ -34245,7 +35673,7 @@ index c5123cfef0..8bda3a8e07 100644 /* Check fdone bit. */ if (!(atom_desc.rsp.val & ACC_FDONE)) return -1; -@@ -2864,7 +2896,7 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, +@@ -2864,7 +2861,7 @@ dequeue_ldpc_dec_one_op_cb(struct rte_bbdev_queue_data *q_data, return 1; } @@ -34254,7 +35682,7 @@ index c5123cfef0..8bda3a8e07 100644 static inline int dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, uint16_t dequeued_cbs, uint32_t *aq_dequeued) -@@ -2918,8 +2950,12 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, +@@ -2918,8 +2915,12 @@ dequeue_dec_one_op_tb(struct acc_queue *q, struct rte_bbdev_dec_op **ref_op, /* CRC invalid if error exists. */ if (!op->status) op->status |= rsp.crc_status << RTE_BBDEV_CRC_ERROR; @@ -34269,7 +35697,7 @@ index c5123cfef0..8bda3a8e07 100644 /* Check if this is the last desc in batch (Atomic Queue). */ if (desc->req.last_desc_in_batch) { -@@ -2961,25 +2997,23 @@ acc200_dequeue_enc(struct rte_bbdev_queue_data *q_data, +@@ -2961,25 +2962,23 @@ acc200_dequeue_enc(struct rte_bbdev_queue_data *q_data, cbm = op->turbo_enc.code_block_mode; @@ -34299,7 +35727,7 @@ index c5123cfef0..8bda3a8e07 100644 q_data->queue_stats.dequeued_count += dequeued_ops; return dequeued_ops; -@@ -3005,15 +3039,13 @@ acc200_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, +@@ -3005,15 +3004,13 @@ acc200_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data, if (cbm == RTE_BBDEV_TRANSPORT_BLOCK) ret = dequeue_enc_one_op_tb(q, &ops[dequeued_ops], &dequeued_ops, &aq_dequeued, @@ -34379,10 +35807,18 @@ index d520d5238f..171aed4d86 100644 mutex_ctrl = (q->ddr_mutex_uuid << 16) + 1; do { diff --git a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c -index bb754a5395..1a56e73abd 100644 +index bb754a5395..cad6f9490e 100644 --- a/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c +++ b/dpdk/drivers/baseband/la12xx/bbdev_la12xx.c -@@ -1084,6 +1084,9 @@ la12xx_bbdev_remove(struct rte_vdev_device *vdev) +@@ -789,6 +789,7 @@ setup_la12xx_dev(struct rte_bbdev *dev) + ipc_priv->hugepg_start.size = hp->len; + + rte_free(hp); ++ hp = NULL; + } + + dev_ipc = open_ipc_dev(priv->modem_id); +@@ -1084,6 +1085,9 @@ la12xx_bbdev_remove(struct rte_vdev_device *vdev) PMD_INIT_FUNC_TRACE(); @@ -34413,6 +35849,73 @@ index 417ec63394..aeb9a76f9e 100644 ext_deps += dep_turbo ext_deps += dependency('flexran_sdk_crc', required: true) ext_deps += dependency('flexran_sdk_rate_matching', required: true) +diff --git a/dpdk/drivers/bus/dpaa/base/fman/fman.c b/dpdk/drivers/bus/dpaa/base/fman/fman.c +index 1814372a40..8263d42bed 100644 +--- a/dpdk/drivers/bus/dpaa/base/fman/fman.c ++++ b/dpdk/drivers/bus/dpaa/base/fman/fman.c +@@ -153,7 +153,7 @@ static void fman_if_vsp_init(struct __fman_if *__if) + size_t lenp; + const uint8_t mac_idx[] = {-1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1}; + +- if (__if->__if.mac_type == fman_mac_1g) { ++ if (__if->__if.mac_idx <= 8) { + for_each_compatible_node(dev, NULL, + "fsl,fman-port-1g-rx-extended-args") { + prop = of_get_property(dev, "cell-index", &lenp); +@@ -176,7 +176,32 @@ static void fman_if_vsp_init(struct __fman_if *__if) + } + } + } +- } else if (__if->__if.mac_type == fman_mac_10g) { ++ ++ for_each_compatible_node(dev, NULL, ++ "fsl,fman-port-op-extended-args") { ++ prop = of_get_property(dev, "cell-index", &lenp); ++ ++ if (prop) { ++ cell_index = of_read_number(&prop[0], ++ lenp / sizeof(phandle)); ++ ++ if (cell_index == __if->__if.mac_idx) { ++ prop = of_get_property(dev, ++ "vsp-window", ++ &lenp); ++ ++ if (prop) { ++ __if->__if.num_profiles = ++ of_read_number(&prop[0], ++ 1); ++ __if->__if.base_profile_id = ++ of_read_number(&prop[1], ++ 1); ++ } ++ } ++ } ++ } ++ } else { + for_each_compatible_node(dev, NULL, + "fsl,fman-port-10g-rx-extended-args") { + prop = of_get_property(dev, "cell-index", &lenp); +diff --git a/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c b/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c +index 24a99f7235..97e792806f 100644 +--- a/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c ++++ b/dpdk/drivers/bus/dpaa/base/fman/fman_hw.c +@@ -243,10 +243,11 @@ fman_if_stats_get_all(struct fman_if *p, uint64_t *value, int n) + int i; + uint64_t base_offset = offsetof(struct memac_regs, reoct_l); + +- for (i = 0; i < n; i++) +- value[i] = (((u64)in_be32((char *)regs + base_offset + 8 * i) | +- (u64)in_be32((char *)regs + base_offset + +- 8 * i + 4)) << 32); ++ for (i = 0; i < n; i++) { ++ uint64_t a = in_be32((char *)regs + base_offset + 8 * i); ++ uint64_t b = in_be32((char *)regs + base_offset + 8 * i + 4); ++ value[i] = a | b << 32; ++ } + } + + void diff --git a/dpdk/drivers/bus/dpaa/base/qbman/process.c b/dpdk/drivers/bus/dpaa/base/qbman/process.c index 3504ec97db..3e4622f606 100644 --- a/dpdk/drivers/bus/dpaa/base/qbman/process.c @@ -34446,7 +35949,7 @@ index 3504ec97db..3e4622f606 100644 } diff --git a/dpdk/drivers/bus/dpaa/base/qbman/qman.c b/dpdk/drivers/bus/dpaa/base/qbman/qman.c -index 3949bf8712..83db0a534e 100644 +index 3949bf8712..3a1a843ba0 100644 --- a/dpdk/drivers/bus/dpaa/base/qbman/qman.c +++ b/dpdk/drivers/bus/dpaa/base/qbman/qman.c @@ -1,7 +1,7 @@ @@ -34458,7 +35961,76 @@ index 3949bf8712..83db0a534e 100644 * */ -@@ -897,7 +897,7 @@ mr_loop: +@@ -294,10 +294,32 @@ static inline void qman_stop_dequeues_ex(struct qman_portal *p) + qm_dqrr_set_maxfill(&p->p, 0); + } + ++static inline void qm_mr_pvb_update(struct qm_portal *portal) ++{ ++ register struct qm_mr *mr = &portal->mr; ++ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); ++ ++#ifdef RTE_LIBRTE_DPAA_HWDEBUG ++ DPAA_ASSERT(mr->pmode == qm_mr_pvb); ++#endif ++ /* when accessing 'verb', use __raw_readb() to ensure that compiler ++ * inlining doesn't try to optimise out "excess reads". ++ */ ++ if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) { ++ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); ++ if (!mr->pi) ++ mr->vbit ^= QM_MR_VERB_VBIT; ++ mr->fill++; ++ res = MR_INC(res); ++ } ++ dcbit_ro(res); ++} ++ + static int drain_mr_fqrni(struct qm_portal *p) + { + const struct qm_mr_entry *msg; + loop: ++ qm_mr_pvb_update(p); + msg = qm_mr_current(p); + if (!msg) { + /* +@@ -319,6 +341,7 @@ loop: + do { + now = mfatb(); + } while ((then + 10000) > now); ++ qm_mr_pvb_update(p); + msg = qm_mr_current(p); + if (!msg) + return 0; +@@ -481,27 +504,6 @@ static inline int qm_mr_init(struct qm_portal *portal, + return 0; + } + +-static inline void qm_mr_pvb_update(struct qm_portal *portal) +-{ +- register struct qm_mr *mr = &portal->mr; +- const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); +- +-#ifdef RTE_LIBRTE_DPAA_HWDEBUG +- DPAA_ASSERT(mr->pmode == qm_mr_pvb); +-#endif +- /* when accessing 'verb', use __raw_readb() to ensure that compiler +- * inlining doesn't try to optimise out "excess reads". +- */ +- if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) { +- mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); +- if (!mr->pi) +- mr->vbit ^= QM_MR_VERB_VBIT; +- mr->fill++; +- res = MR_INC(res); +- } +- dcbit_ro(res); +-} +- + struct qman_portal * + qman_init_portal(struct qman_portal *portal, + const struct qm_portal_config *c, +@@ -897,7 +899,7 @@ mr_loop: /* Lookup in the retirement table */ fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid)); @@ -34467,7 +36039,7 @@ index 3949bf8712..83db0a534e 100644 fq_state_change(p, fq, &swapped_msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, &swapped_msg); -@@ -909,6 +909,7 @@ mr_loop: +@@ -909,6 +911,7 @@ mr_loop: #else fq = (void *)(uintptr_t)msg->fq.contextB; #endif @@ -34475,6 +36047,39 @@ index 3949bf8712..83db0a534e 100644 fq_state_change(p, fq, msg, verb); if (fq->cb.fqs) fq->cb.fqs(p, fq, &swapped_msg); +@@ -1824,6 +1827,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags) + } + out: + FQUNLOCK(fq); ++ /* Draining FQRNIs, if any */ ++ drain_mr_fqrni(&p->p); + return rval; + } + +@@ -2164,8 +2169,10 @@ int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags) + + if (!p->vdqcr_owned) { + FQLOCK(fq); +- if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) ++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) { ++ FQUNLOCK(fq); + goto escape; ++ } + fq_set(fq, QMAN_FQ_STATE_VDQCR); + FQUNLOCK(fq); + p->vdqcr_owned = fq; +@@ -2198,8 +2205,10 @@ int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, + + if (!p->vdqcr_owned) { + FQLOCK(fq); +- if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) ++ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) { ++ FQUNLOCK(fq); + goto escape; ++ } + fq_set(fq, QMAN_FQ_STATE_VDQCR); + FQUNLOCK(fq); + p->vdqcr_owned = fq; diff --git a/dpdk/drivers/bus/dpaa/dpaa_bus.c b/dpdk/drivers/bus/dpaa/dpaa_bus.c index e57159f5d8..aaf2a5f43e 100644 --- a/dpdk/drivers/bus/dpaa/dpaa_bus.c @@ -34555,6 +36160,164 @@ index ab9a074835..76fdcd5c8a 100644 /* Read the response back into the command buffer */ mc_read_response(mc_io->regs, cmd); +diff --git a/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c b/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c +index eea06988ff..0e471ec3fd 100644 +--- a/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c ++++ b/dpdk/drivers/bus/fslmc/qbman/qbman_debug.c +@@ -1,6 +1,6 @@ + /* SPDX-License-Identifier: BSD-3-Clause + * Copyright (C) 2015 Freescale Semiconductor, Inc. +- * Copyright 2018-2020 NXP ++ * Copyright 2018-2020,2022 NXP + */ + + #include "compat.h" +@@ -37,6 +37,7 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, + struct qbman_bp_query_rslt *r) + { + struct qbman_bp_query_desc *p; ++ struct qbman_bp_query_rslt *bp_query_rslt; + + /* Start the management command */ + p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s); +@@ -47,14 +48,16 @@ int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, + p->bpid = bpid; + + /* Complete the management command */ +- *r = *(struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_BP_QUERY); +- if (!r) { ++ bp_query_rslt = (struct qbman_bp_query_rslt *)qbman_swp_mc_complete(s, ++ p, QBMAN_BP_QUERY); ++ if (!bp_query_rslt) { + pr_err("qbman: Query BPID %d failed, no response\n", + bpid); + return -EIO; + } + ++ *r = *bp_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY); + +@@ -202,20 +205,23 @@ int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, + struct qbman_fq_query_rslt *r) + { + struct qbman_fq_query_desc *p; ++ struct qbman_fq_query_rslt *fq_query_rslt; + + p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s); + if (!p) + return -EBUSY; + + p->fqid = fqid; +- *r = *(struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_FQ_QUERY); +- if (!r) { ++ fq_query_rslt = (struct qbman_fq_query_rslt *)qbman_swp_mc_complete(s, ++ p, QBMAN_FQ_QUERY); ++ if (!fq_query_rslt) { + pr_err("qbman: Query FQID %d failed, no response\n", + fqid); + return -EIO; + } + ++ *r = *fq_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY); + +@@ -398,20 +404,23 @@ int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, + struct qbman_cgr_query_rslt *r) + { + struct qbman_cgr_query_desc *p; ++ struct qbman_cgr_query_rslt *cgr_query_rslt; + + p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s); + if (!p) + return -EBUSY; + + p->cgid = cgid; +- *r = *(struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_CGR_QUERY); +- if (!r) { ++ cgr_query_rslt = (struct qbman_cgr_query_rslt *)qbman_swp_mc_complete(s, ++ p, QBMAN_CGR_QUERY); ++ if (!cgr_query_rslt) { + pr_err("qbman: Query CGID %d failed, no response\n", + cgid); + return -EIO; + } + ++ *r = *cgr_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_CGR_QUERY); + +@@ -473,20 +482,23 @@ int qbman_cgr_wred_query(struct qbman_swp *s, uint32_t cgid, + struct qbman_wred_query_rslt *r) + { + struct qbman_cgr_query_desc *p; ++ struct qbman_wred_query_rslt *wred_query_rslt; + + p = (struct qbman_cgr_query_desc *)qbman_swp_mc_start(s); + if (!p) + return -EBUSY; + + p->cgid = cgid; +- *r = *(struct qbman_wred_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_WRED_QUERY); +- if (!r) { ++ wred_query_rslt = (struct qbman_wred_query_rslt *)qbman_swp_mc_complete( ++ s, p, QBMAN_WRED_QUERY); ++ if (!wred_query_rslt) { + pr_err("qbman: Query CGID WRED %d failed, no response\n", + cgid); + return -EIO; + } + ++ *r = *wred_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WRED_QUERY); + +@@ -527,7 +539,7 @@ void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, + if (mn == 0) + *maxth = ma; + else +- *maxth = ((ma+256) * (1<<(mn-1))); ++ *maxth = ((uint64_t)(ma+256) * (1<<(mn-1))); + + if (step_s == 0) + *minth = *maxth - step_i; +@@ -630,6 +642,7 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, + struct qbman_wqchan_query_rslt *r) + { + struct qbman_wqchan_query_desc *p; ++ struct qbman_wqchan_query_rslt *wqchan_query_rslt; + + /* Start the management command */ + p = (struct qbman_wqchan_query_desc *)qbman_swp_mc_start(s); +@@ -640,14 +653,16 @@ int qbman_wqchan_query(struct qbman_swp *s, uint16_t chanid, + p->chid = chanid; + + /* Complete the management command */ +- *r = *(struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete(s, p, +- QBMAN_WQ_QUERY); +- if (!r) { ++ wqchan_query_rslt = (struct qbman_wqchan_query_rslt *)qbman_swp_mc_complete( ++ s, p, QBMAN_WQ_QUERY); ++ if (!wqchan_query_rslt) { + pr_err("qbman: Query WQ Channel %d failed, no response\n", + chanid); + return -EIO; + } + ++ *r = *wqchan_query_rslt; ++ + /* Decode the outcome */ + QBMAN_BUG_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_WQ_QUERY); + diff --git a/dpdk/drivers/bus/ifpga/bus_ifpga_driver.h b/dpdk/drivers/bus/ifpga/bus_ifpga_driver.h index 7b75c2ddbc..5bbe36d6e0 100644 --- a/dpdk/drivers/bus/ifpga/bus_ifpga_driver.h @@ -35486,6 +37249,17 @@ index 057ff95362..585522e7d3 100644 #define ROC_ONF_IPSEC_INB_MAX_L2_SZ 32UL #define ROC_ONF_IPSEC_OUTB_MAX_L2_SZ 30UL #define ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ (ROC_ONF_IPSEC_OUTB_MAX_L2_SZ + 2) +diff --git a/dpdk/drivers/common/cnxk/roc_ie_ot.c b/dpdk/drivers/common/cnxk/roc_ie_ot.c +index d0b7ad38f1..356bb8c5a5 100644 +--- a/dpdk/drivers/common/cnxk/roc_ie_ot.c ++++ b/dpdk/drivers/common/cnxk/roc_ie_ot.c +@@ -38,5 +38,6 @@ roc_ot_ipsec_outb_sa_init(struct roc_ot_ipsec_outb_sa *sa) + offset = offsetof(struct roc_ot_ipsec_outb_sa, ctx); + sa->w0.s.ctx_push_size = (offset / ROC_CTX_UNIT_8B) + 1; + sa->w0.s.ctx_size = ROC_IE_OT_CTX_ILEN; ++ sa->w0.s.ctx_hdr_size = ROC_IE_OT_SA_CTX_HDR_SIZE; + sa->w0.s.aop_valid = 1; + } diff --git a/dpdk/drivers/common/cnxk/roc_io.h b/dpdk/drivers/common/cnxk/roc_io.h index 13f98ed549..45cbb4e587 100644 --- a/dpdk/drivers/common/cnxk/roc_io.h @@ -35530,6 +37304,19 @@ index 13f98ed549..45cbb4e587 100644 } static __plt_always_inline void +diff --git a/dpdk/drivers/common/cnxk/roc_irq.c b/dpdk/drivers/common/cnxk/roc_irq.c +index 010b121176..fb11bcbb6f 100644 +--- a/dpdk/drivers/common/cnxk/roc_irq.c ++++ b/dpdk/drivers/common/cnxk/roc_irq.c +@@ -15,7 +15,7 @@ + + #define MSIX_IRQ_SET_BUF_LEN \ + (sizeof(struct vfio_irq_set) + sizeof(int) * \ +- (plt_intr_max_intr_get(intr_handle))) ++ ((uint32_t)plt_intr_max_intr_get(intr_handle))) + + static int + irq_get_info(struct plt_intr_handle *intr_handle) diff --git a/dpdk/drivers/common/cnxk/roc_mbox.h b/dpdk/drivers/common/cnxk/roc_mbox.h index 8b0384c737..9fc22d8a65 100644 --- a/dpdk/drivers/common/cnxk/roc_mbox.h @@ -36702,6 +38489,19 @@ index 8bdabc116d..fda3073cba 100644 npc_mask_is_supported(const char *mask, const char *hw_mask, int len) { /* +diff --git a/dpdk/drivers/common/cnxk/roc_platform.c b/dpdk/drivers/common/cnxk/roc_platform.c +index ce0f9b870c..963b74ee7c 100644 +--- a/dpdk/drivers/common/cnxk/roc_platform.c ++++ b/dpdk/drivers/common/cnxk/roc_platform.c +@@ -60,7 +60,7 @@ roc_plt_init(void) + return 0; + } + +-RTE_LOG_REGISTER(cnxk_logtype_base, pmd.cnxk.base, NOTICE); ++RTE_LOG_REGISTER(cnxk_logtype_base, pmd.cnxk.base, INFO); + RTE_LOG_REGISTER(cnxk_logtype_mbox, pmd.cnxk.mbox, NOTICE); + RTE_LOG_REGISTER(cnxk_logtype_cpt, pmd.crypto.cnxk, NOTICE); + RTE_LOG_REGISTER(cnxk_logtype_npa, pmd.mempool.cnxk, NOTICE); diff --git a/dpdk/drivers/common/cnxk/roc_ree.c b/dpdk/drivers/common/cnxk/roc_ree.c index 1eb2ae7272..b6392658c3 100644 --- a/dpdk/drivers/common/cnxk/roc_ree.c @@ -36864,7 +38664,7 @@ index 8ec6aac915..26bf52827e 100644 * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared * descriptor. diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h -index 289ee2a7d5..070cad0147 100644 +index 289ee2a7d5..328b307ae9 100644 --- a/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h +++ b/dpdk/drivers/common/dpaax/caamflib/desc/pdcp.h @@ -1023,6 +1023,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p, @@ -36891,6 +38691,30 @@ index 289ee2a7d5..070cad0147 100644 LOAD(p, CLRW_RESET_CLS1_CHA | CLRW_CLR_C1KEY | CLRW_CLR_C1CTX | +@@ -1210,6 +1220,11 @@ pdcp_insert_cplane_snow_aes_op(struct program *p, + SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1); + MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); ++ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | + CLRW_CLR_C1CTX | +@@ -1911,6 +1926,11 @@ pdcp_insert_cplane_zuc_aes_op(struct program *p, + + MOVEB(p, OFIFO, 0, MATH3, 0, 4, IMMED); + ++ /* conditional jump with calm added to ensure that the ++ * previous processing has been completed ++ */ ++ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM); ++ + LOAD(p, CLRW_RESET_CLS1_CHA | + CLRW_CLR_C1KEY | + CLRW_CLR_C1CTX | diff --git a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h b/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h index b38c15a24f..d41bacf8f9 100644 --- a/dpdk/drivers/common/dpaax/caamflib/desc/sdap.h @@ -36924,6 +38748,21 @@ index b38c15a24f..d41bacf8f9 100644 /* Reset class 1 CHA */ LOAD(p, CLRW_RESET_CLS1_CHA | CLRW_CLR_C1KEY | +diff --git a/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h b/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h +index fe1ac37ee8..563735eb88 100644 +--- a/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h ++++ b/dpdk/drivers/common/dpaax/caamflib/rta/operation_cmd.h +@@ -7,10 +7,6 @@ + #ifndef __RTA_OPERATION_CMD_H__ + #define __RTA_OPERATION_CMD_H__ + +-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 70000) +-#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" +-#endif +- + extern enum rta_sec_era rta_sec_era; + + static inline int diff --git a/dpdk/drivers/common/dpaax/dpaax_iova_table.c b/dpdk/drivers/common/dpaax/dpaax_iova_table.c index 9daac4bc03..860e702333 100644 --- a/dpdk/drivers/common/dpaax/dpaax_iova_table.c @@ -37124,6 +38963,29 @@ index 9cd4f757d9..13c5c5a7da 100644 #define VF_INT_ITRN_MAX_INDEX 2 #define VF_INT_ITRN_INTERVAL_S 0 #define VF_INT_ITRN_INTERVAL_M MAKEMASK(0xFFF, VF_INT_ITRN_INTERVAL_S) +diff --git a/dpdk/drivers/common/idpf/base/idpf_osdep.h b/dpdk/drivers/common/idpf/base/idpf_osdep.h +index 99ae9cf60a..b6124ab083 100644 +--- a/dpdk/drivers/common/idpf/base/idpf_osdep.h ++++ b/dpdk/drivers/common/idpf/base/idpf_osdep.h +@@ -349,10 +349,16 @@ idpf_hweight32(u32 num) + #define LIST_ENTRY_TYPE(type) LIST_ENTRY(type) + #endif + ++#ifndef LIST_FOREACH_SAFE ++#define LIST_FOREACH_SAFE(var, head, field, tvar) \ ++ for ((var) = LIST_FIRST((head)); \ ++ (var) && ((tvar) = LIST_NEXT((var), field), 1); \ ++ (var) = (tvar)) ++#endif ++ + #ifndef LIST_FOR_EACH_ENTRY_SAFE + #define LIST_FOR_EACH_ENTRY_SAFE(pos, temp, head, entry_type, list) \ +- LIST_FOREACH(pos, head, list) +- ++ LIST_FOREACH_SAFE(pos, head, list, temp) + #endif + + #ifndef LIST_FOR_EACH_ENTRY diff --git a/dpdk/drivers/common/idpf/base/idpf_prototype.h b/dpdk/drivers/common/idpf/base/idpf_prototype.h index 529b62212d..3ce25e644d 100644 --- a/dpdk/drivers/common/idpf/base/idpf_prototype.h @@ -37330,8 +39192,21 @@ index 73178ce0f3..fdf03f2a53 100644 { switch (pci_dev->id.device_id) { case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: +diff --git a/dpdk/drivers/common/mlx5/mlx5_common_utils.h b/dpdk/drivers/common/mlx5/mlx5_common_utils.h +index ae15119a33..6db0105c53 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_common_utils.h ++++ b/dpdk/drivers/common/mlx5/mlx5_common_utils.h +@@ -131,7 +131,7 @@ struct mlx5_list_inconst { + * For huge amount of entries, please consider hash list. + * + */ +-struct mlx5_list { ++struct __rte_aligned(16) mlx5_list { + struct mlx5_list_const l_const; + struct mlx5_list_inconst l_inconst; + }; diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c -index 59cebb530f..9a0fc3501d 100644 +index 59cebb530f..6740bb5222 100644 --- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.c @@ -543,7 +543,7 @@ mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, @@ -37372,7 +39247,15 @@ index 59cebb530f..9a0fc3501d 100644 attr->vdpa.valid = !!(general_obj_types_supported & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); attr->vdpa.queue_counters_valid = -@@ -998,10 +989,11 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, +@@ -962,6 +953,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, + attr->log_max_qp = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp); + attr->log_max_cq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq_sz); + attr->log_max_qp_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp_sz); ++ attr->log_max_wq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_wq_sz); + attr->log_max_mrw_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_mrw_sz); + attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd); + attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq); +@@ -998,10 +990,11 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled); attr->wait_on_time = MLX5_GET(cmd_hca_cap, hcattr, wait_on_time); attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto); @@ -37386,7 +39269,7 @@ index 59cebb530f..9a0fc3501d 100644 attr->max_flow_counter_15_0 = MLX5_GET(cmd_hca_cap, hcattr, max_flow_counter_15_0); attr->max_flow_counter_31_16 = MLX5_GET(cmd_hca_cap, hcattr, -@@ -1013,7 +1005,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, +@@ -1013,7 +1006,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->flow_access_aso_opc_mod = MLX5_GET(cmd_hca_cap, hcattr, flow_access_aso_opc_mod); if (attr->crypto) { @@ -37397,7 +39280,7 @@ index 59cebb530f..9a0fc3501d 100644 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc, MLX5_GET_HCA_CAP_OP_MOD_CRYPTO | MLX5_HCA_CAP_OPMOD_GET_CUR); -@@ -1667,7 +1661,7 @@ mlx5_devx_cmd_create_rqt(void *ctx, +@@ -1667,7 +1662,7 @@ mlx5_devx_cmd_create_rqt(void *ctx, uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0}; void *rqt_ctx; struct mlx5_devx_obj *rqt = NULL; @@ -37406,7 +39289,7 @@ index 59cebb530f..9a0fc3501d 100644 in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); if (!in) { -@@ -1720,7 +1714,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, +@@ -1720,7 +1715,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); void *rqt_ctx; @@ -37415,7 +39298,7 @@ index 59cebb530f..9a0fc3501d 100644 int ret; if (!in) { -@@ -1733,7 +1727,6 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, +@@ -1733,7 +1728,6 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1); rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context); MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type); @@ -37423,11 +39306,85 @@ index 59cebb530f..9a0fc3501d 100644 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size); for (i = 0; i < rqt_attr->rqt_actual_size; i++) MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); +diff --git a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h +index c94b9eac06..49356aec37 100644 +--- a/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h ++++ b/dpdk/drivers/common/mlx5/mlx5_devx_cmds.h +@@ -245,6 +245,7 @@ struct mlx5_hca_attr { + struct mlx5_hca_vdpa_attr vdpa; + struct mlx5_hca_flow_attr flow; + struct mlx5_hca_flex_attr flex; ++ uint8_t log_max_wq_sz; + int log_max_qp_sz; + int log_max_cq_sz; + int log_max_qp; diff --git a/dpdk/drivers/common/mlx5/mlx5_prm.h b/dpdk/drivers/common/mlx5/mlx5_prm.h -index 2b5c43ee6e..f2cd353672 100644 +index 2b5c43ee6e..cf525c14df 100644 --- a/dpdk/drivers/common/mlx5/mlx5_prm.h +++ b/dpdk/drivers/common/mlx5/mlx5_prm.h -@@ -1679,7 +1679,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { +@@ -262,8 +262,12 @@ + /* Maximum number of DS in WQE. Limited by 6-bit field. */ + #define MLX5_DSEG_MAX 63 + +-/* The 32 bit syndrome offset in struct mlx5_err_cqe. */ ++/* The 32 bit syndrome offset in struct mlx5_error_cqe. */ ++#if (RTE_CACHE_LINE_SIZE == 128) ++#define MLX5_ERROR_CQE_SYNDROME_OFFSET 116 ++#else + #define MLX5_ERROR_CQE_SYNDROME_OFFSET 52 ++#endif + + /* The completion mode offset in the WQE control segment line 2. */ + #define MLX5_COMP_MODE_OFFSET 2 +@@ -403,6 +407,29 @@ struct mlx5_wqe_mprq { + + #define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2 + ++struct mlx5_error_cqe { ++#if (RTE_CACHE_LINE_SIZE == 128) ++ uint8_t padding[64]; ++#endif ++ uint8_t rsvd0[2]; ++ uint16_t eth_wqe_id; ++ uint8_t rsvd1[16]; ++ uint16_t ib_stride_index; ++ uint8_t rsvd2[10]; ++ uint32_t srqn; ++ uint8_t rsvd3[8]; ++ uint32_t byte_cnt; ++ uint8_t rsvd4[4]; ++ uint8_t hw_err_synd; ++ uint8_t hw_synd_type; ++ uint8_t vendor_err_synd; ++ uint8_t syndrome; ++ uint32_t s_wqe_opcode_qpn; ++ uint16_t wqe_counter; ++ uint8_t signature; ++ uint8_t op_own; ++}; ++ + /* CQ element structure - should be equal to the cache line size */ + struct mlx5_cqe { + #if (RTE_CACHE_LINE_SIZE == 128) +@@ -784,7 +811,7 @@ struct mlx5_modification_cmd { + unsigned int field:12; + unsigned int action_type:4; + }; +- }; ++ } __rte_packed; + union { + uint32_t data1; + uint8_t data[4]; +@@ -795,7 +822,7 @@ struct mlx5_modification_cmd { + unsigned int dst_field:12; + unsigned int rsvd4:4; + }; +- }; ++ } __rte_packed; + }; + + typedef uint64_t u64; +@@ -1679,7 +1706,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_min_hairpin_wq_data_sz[0x5]; u8 reserved_at_3e8[0x3]; u8 log_max_vlan_list[0x5]; @@ -37438,7 +39395,7 @@ index 2b5c43ee6e..f2cd353672 100644 u8 log_max_current_mc_list[0x5]; u8 reserved_at_3f8[0x3]; u8 log_max_current_uc_list[0x5]; -@@ -2114,17 +2116,18 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { +@@ -2114,17 +2143,18 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_d0[0x3]; u8 log_conn_track_max_alloc[0x5]; u8 reserved_at_d8[0x3]; @@ -37462,7 +39419,7 @@ index 2b5c43ee6e..f2cd353672 100644 u8 general_obj_types_127_64[0x40]; u8 reserved_at_200[0x53]; u8 flow_counter_bulk_log_max_alloc[0x5]; -@@ -3040,6 +3043,7 @@ struct mlx5_ifc_health_buffer_bits { +@@ -3040,6 +3070,7 @@ struct mlx5_ifc_health_buffer_bits { u8 ext_synd[0x10]; }; @@ -37470,7 +39427,7 @@ index 2b5c43ee6e..f2cd353672 100644 struct mlx5_ifc_initial_seg_bits { u8 fw_rev_minor[0x10]; u8 fw_rev_major[0x10]; -@@ -3067,7 +3071,9 @@ struct mlx5_ifc_initial_seg_bits { +@@ -3067,7 +3098,9 @@ struct mlx5_ifc_initial_seg_bits { u8 clear_int[0x1]; u8 health_syndrome[0x8]; u8 health_counter[0x18]; @@ -37481,7 +39438,7 @@ index 2b5c43ee6e..f2cd353672 100644 }; struct mlx5_ifc_create_cq_out_bits { -@@ -3305,7 +3311,7 @@ struct mlx5_ifc_stc_ste_param_vport_bits { +@@ -3305,7 +3338,7 @@ struct mlx5_ifc_stc_ste_param_vport_bits { u8 eswitch_owner_vhca_id[0x10]; u8 vport_number[0x10]; u8 eswitch_owner_vhca_id_valid[0x1]; @@ -37503,7 +39460,7 @@ index 4f72900519..03c8ce5593 100644 mlx5_dev_mempool_subscribe; diff --git a/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h b/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h -index 3554e4a7ff..65da820c5e 100644 +index 3554e4a7ff..1ddf5c553d 100644 --- a/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h +++ b/dpdk/drivers/common/mlx5/windows/mlx5_win_defs.h @@ -2,8 +2,10 @@ @@ -37643,7 +39600,26 @@ index 3554e4a7ff..65da820c5e 100644 }; enum ibv_flow_attr_type { -@@ -240,11 +251,11 @@ struct mlx5_wqe_data_seg { +@@ -208,18 +219,6 @@ struct mlx5_action { + } dest_tir; + }; + +-struct mlx5_err_cqe { +- uint8_t rsvd0[32]; +- uint32_t srqn; +- uint8_t rsvd1[18]; +- uint8_t vendor_err_synd; +- uint8_t syndrome; +- uint32_t s_wqe_opcode_qpn; +- uint16_t wqe_counter; +- uint8_t signature; +- uint8_t op_own; +-}; +- + struct mlx5_wqe_srq_next_seg { + uint8_t rsvd0[2]; + rte_be16_t next_wqe_index; +@@ -240,11 +239,11 @@ struct mlx5_wqe_data_seg { rte_be64_t addr; }; @@ -37660,7 +39636,7 @@ index 3554e4a7ff..65da820c5e 100644 enum { MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0x0, -@@ -254,8 +265,9 @@ enum { +@@ -254,8 +253,9 @@ enum { }; enum { @@ -38225,7 +40201,7 @@ index a54aab0a08..d9b04a611d 100644 efx_mae_action_set_populate_set_src_mac; efx_mae_action_set_populate_vlan_pop; diff --git a/dpdk/drivers/compress/mlx5/mlx5_compress.c b/dpdk/drivers/compress/mlx5/mlx5_compress.c -index fb2bda9745..c4bf62ed41 100644 +index fb2bda9745..5a4095b3a0 100644 --- a/dpdk/drivers/compress/mlx5/mlx5_compress.c +++ b/dpdk/drivers/compress/mlx5/mlx5_compress.c @@ -96,9 +96,7 @@ static const struct rte_compressdev_capabilities mlx5_caps[] = { @@ -38259,6 +40235,24 @@ index fb2bda9745..c4bf62ed41 100644 DRV_LOG(ERR, "SHA is not supported."); return -ENOTSUP; } +@@ -538,7 +536,7 @@ mlx5_compress_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe, + size_t i; + + DRV_LOG(ERR, "Error cqe:"); +- for (i = 0; i < sizeof(struct mlx5_err_cqe) >> 2; i += 4) ++ for (i = 0; i < sizeof(struct mlx5_error_cqe) >> 2; i += 4) + DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1], + cqe[i + 2], cqe[i + 3]); + DRV_LOG(ERR, "\nError wqe:"); +@@ -556,7 +554,7 @@ mlx5_compress_cqe_err_handle(struct mlx5_compress_qp *qp, + struct rte_comp_op *op) + { + const uint32_t idx = qp->ci & (qp->entries_n - 1); +- volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *) ++ volatile struct mlx5_error_cqe *cqe = (volatile struct mlx5_error_cqe *) + &qp->cq.cqes[idx]; + volatile struct mlx5_gga_wqe *wqes = (volatile struct mlx5_gga_wqe *) + qp->qp.wqes; @@ -635,7 +633,7 @@ mlx5_compress_dequeue_burst(void *queue_pair, struct rte_comp_op **ops, break; case RTE_COMP_CHECKSUM_ADLER32: @@ -38497,6 +40491,28 @@ index da7b9a6eec..dc220cd6e3 100644 + #endif #endif +diff --git a/dpdk/drivers/crypto/bcmfs/bcmfs_device.c b/dpdk/drivers/crypto/bcmfs/bcmfs_device.c +index ada7ba342c..46522970d5 100644 +--- a/dpdk/drivers/crypto/bcmfs/bcmfs_device.c ++++ b/dpdk/drivers/crypto/bcmfs/bcmfs_device.c +@@ -139,7 +139,7 @@ fsdev_allocate_one_dev(struct rte_vdev_device *vdev, + return fsdev; + + cleanup: +- free(fsdev); ++ rte_free(fsdev); + + return NULL; + } +@@ -163,7 +163,7 @@ fsdev_release(struct bcmfs_device *fsdev) + return; + + TAILQ_REMOVE(&fsdev_list, fsdev, next); +- free(fsdev); ++ rte_free(fsdev); + } + + static int diff --git a/dpdk/drivers/crypto/ccp/ccp_crypto.c b/dpdk/drivers/crypto/ccp/ccp_crypto.c index b21b32e507..4b84b3303e 100644 --- a/dpdk/drivers/crypto/ccp/ccp_crypto.c @@ -39501,7 +41517,7 @@ index b07fc22858..32e2b2cd64 100644 cpt_inst_w5.s.gather_sz = ((i + 2) / 3); diff --git a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c -index c25e40030b..eab7091251 100644 +index c25e40030b..a8c99bc4af 100644 --- a/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -1676,7 +1676,7 @@ dpaa2_sec_dump(struct rte_crypto_op *op) @@ -39522,7 +41538,15 @@ index c25e40030b..eab7091251 100644 printf("PDCP session params:\n" "\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:" "\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n" -@@ -4093,7 +4093,7 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, +@@ -3444,6 +3444,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev, + } + } else { + DPAA2_SEC_ERR("Invalid crypto type"); ++ rte_free(priv); + return -EINVAL; + } + +@@ -4093,7 +4094,7 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev, cfg.dest_cfg.priority = priority; cfg.options |= DPSECI_QUEUE_OPT_USER_CTX; @@ -39660,7 +41684,7 @@ index 50b789a29b..64f2b4b604 100644 return 0; diff --git a/dpdk/drivers/crypto/ipsec_mb/meson.build b/dpdk/drivers/crypto/ipsec_mb/meson.build -index ec147d2110..4100d921ff 100644 +index ec147d2110..3981325173 100644 --- a/dpdk/drivers/crypto/ipsec_mb/meson.build +++ b/dpdk/drivers/crypto/ipsec_mb/meson.build @@ -16,6 +16,11 @@ lib = cc.find_library('IPSec_MB', required: false) @@ -39668,7 +41692,7 @@ index ec147d2110..4100d921ff 100644 build = false reason = 'missing dependency, "libIPSec_MB"' +# if the lib is found, check it's the right format -+elif meson.version().version_compare('>=0.60') and not cc.links( ++elif not cc.links( + 'int main(void) {return 0;}', dependencies: lib) + build = false + reason = 'incompatible dependency, "libIPSec_MB"' @@ -39981,10 +42005,41 @@ index ed6841e460..d67e39cddb 100644 /** Reset OPENSSL crypto session parameters */ extern void diff --git a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -index 05449b6e98..9fc8194366 100644 +index 05449b6e98..7eaa9650b6 100644 --- a/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c -@@ -349,7 +349,8 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen, +@@ -2,6 +2,7 @@ + * Copyright(c) 2016-2017 Intel Corporation + */ + ++#include + #include + #include + #include +@@ -98,22 +99,6 @@ digest_name_get(enum rte_crypto_auth_algorithm algo) + + static int cryptodev_openssl_remove(struct rte_vdev_device *vdev); + +-/*----------------------------------------------------------------------------*/ +- +-/** +- * Increment counter by 1 +- * Counter is 64 bit array, big-endian +- */ +-static void +-ctr_inc(uint8_t *ctr) +-{ +- uint64_t *ctr64 = (uint64_t *)ctr; +- +- *ctr64 = __builtin_bswap64(*ctr64); +- (*ctr64)++; +- *ctr64 = __builtin_bswap64(*ctr64); +-} +- + /* + *------------------------------------------------------------------------------ + * Session Prepare +@@ -349,7 +334,8 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen, static int openssl_set_sess_aead_enc_param(struct openssl_session *sess, enum rte_crypto_aead_algorithm algo, @@ -39994,7 +42049,7 @@ index 05449b6e98..9fc8194366 100644 { int iv_type = 0; unsigned int do_ccm; -@@ -377,7 +378,7 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, +@@ -377,7 +363,7 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, } sess->cipher.mode = OPENSSL_CIPHER_LIB; @@ -40003,7 +42058,7 @@ index 05449b6e98..9fc8194366 100644 if (get_aead_algo(algo, sess->cipher.key.length, &sess->cipher.evp_algo) != 0) -@@ -387,19 +388,19 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, +@@ -387,19 +373,19 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, sess->chain_order = OPENSSL_CHAIN_COMBINED; @@ -40027,7 +42082,7 @@ index 05449b6e98..9fc8194366 100644 return -EINVAL; return 0; -@@ -409,7 +410,8 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, +@@ -409,7 +395,8 @@ openssl_set_sess_aead_enc_param(struct openssl_session *sess, static int openssl_set_sess_aead_dec_param(struct openssl_session *sess, enum rte_crypto_aead_algorithm algo, @@ -40037,7 +42092,7 @@ index 05449b6e98..9fc8194366 100644 { int iv_type = 0; unsigned int do_ccm = 0; -@@ -436,7 +438,7 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, +@@ -436,7 +423,7 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, } sess->cipher.mode = OPENSSL_CIPHER_LIB; @@ -40046,7 +42101,7 @@ index 05449b6e98..9fc8194366 100644 if (get_aead_algo(algo, sess->cipher.key.length, &sess->cipher.evp_algo) != 0) -@@ -446,24 +448,46 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, +@@ -446,24 +433,46 @@ openssl_set_sess_aead_dec_param(struct openssl_session *sess, sess->chain_order = OPENSSL_CHAIN_COMBINED; @@ -40097,7 +42152,7 @@ index 05449b6e98..9fc8194366 100644 /** Set session cipher parameters */ static int openssl_set_session_cipher_parameters(struct openssl_session *sess, -@@ -520,6 +544,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, +@@ -520,6 +529,15 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, sess->cipher.key.length, sess->cipher.key.data) != 0) return -EINVAL; @@ -40113,7 +42168,7 @@ index 05449b6e98..9fc8194366 100644 break; case RTE_CRYPTO_CIPHER_DES_CBC: -@@ -585,6 +618,8 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, +@@ -585,6 +603,8 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess, return -ENOTSUP; } @@ -40122,7 +42177,7 @@ index 05449b6e98..9fc8194366 100644 return 0; } -@@ -622,12 +657,14 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, +@@ -622,12 +642,14 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, return openssl_set_sess_aead_enc_param(sess, RTE_CRYPTO_AEAD_AES_GCM, xform->auth.digest_length, @@ -40139,7 +42194,16 @@ index 05449b6e98..9fc8194366 100644 break; case RTE_CRYPTO_AUTH_MD5: -@@ -696,7 +733,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, +@@ -654,7 +676,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, + else + return -EINVAL; + +- rte_memcpy(algo_name, algo, strlen(algo) + 1); ++ strlcpy(algo_name, algo, sizeof(algo_name)); + params[0] = OSSL_PARAM_construct_utf8_string( + OSSL_MAC_PARAM_CIPHER, algo_name, 0); + params[1] = OSSL_PARAM_construct_end(); +@@ -696,7 +718,7 @@ openssl_set_session_auth_parameters(struct openssl_session *sess, algo = digest_name_get(xform->auth.algo); if (!algo) return -EINVAL; @@ -40148,7 +42212,7 @@ index 05449b6e98..9fc8194366 100644 mac = EVP_MAC_fetch(NULL, "HMAC", NULL); sess->auth.hmac.ctx = EVP_MAC_CTX_new(mac); -@@ -769,16 +806,19 @@ openssl_set_session_aead_parameters(struct openssl_session *sess, +@@ -769,16 +791,19 @@ openssl_set_session_aead_parameters(struct openssl_session *sess, /* Select cipher direction */ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) return openssl_set_sess_aead_enc_param(sess, xform->aead.algo, @@ -40171,7 +42235,7 @@ index 05449b6e98..9fc8194366 100644 { const struct rte_crypto_sym_xform *cipher_xform = NULL; const struct rte_crypto_sym_xform *auth_xform = NULL; -@@ -840,6 +880,12 @@ openssl_set_session_parameters(struct openssl_session *sess, +@@ -840,6 +865,12 @@ openssl_set_session_parameters(struct openssl_session *sess, } } @@ -40184,7 +42248,7 @@ index 05449b6e98..9fc8194366 100644 return 0; } -@@ -847,33 +893,45 @@ openssl_set_session_parameters(struct openssl_session *sess, +@@ -847,33 +878,45 @@ openssl_set_session_parameters(struct openssl_session *sess, void openssl_reset_session(struct openssl_session *sess) { @@ -40246,7 +42310,7 @@ index 05449b6e98..9fc8194366 100644 } /** Provide session for operation */ -@@ -913,7 +971,7 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op) +@@ -913,7 +956,7 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op) sess = (struct openssl_session *)_sess->driver_priv_data; if (unlikely(openssl_set_session_parameters(sess, @@ -40255,7 +42319,7 @@ index 05449b6e98..9fc8194366 100644 rte_mempool_put(qp->sess_mp, _sess); sess = NULL; } -@@ -1067,8 +1125,6 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, +@@ -1067,8 +1110,6 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) goto process_cipher_encrypt_err; @@ -40264,7 +42328,7 @@ index 05449b6e98..9fc8194366 100644 if (process_openssl_encryption_update(mbuf_src, offset, &dst, srclen, ctx, inplace)) goto process_cipher_encrypt_err; -@@ -1117,8 +1173,6 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, +@@ -1117,8 +1158,6 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) goto process_cipher_decrypt_err; @@ -40273,7 +42337,7 @@ index 05449b6e98..9fc8194366 100644 if (process_openssl_decryption_update(mbuf_src, offset, &dst, srclen, ctx, inplace)) goto process_cipher_decrypt_err; -@@ -1135,8 +1189,7 @@ process_cipher_decrypt_err: +@@ -1135,10 +1174,10 @@ process_cipher_decrypt_err: /** Process cipher des 3 ctr encryption, decryption algorithm */ static int process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, @@ -40281,9 +42345,13 @@ index 05449b6e98..9fc8194366 100644 - EVP_CIPHER_CTX *ctx) + int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx) { - uint8_t ebuf[8], ctr[8]; +- uint8_t ebuf[8], ctr[8]; ++ uint8_t ebuf[8]; ++ uint64_t ctr; int unused, n; -@@ -1154,12 +1207,6 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, + struct rte_mbuf *m; + uint8_t *src; +@@ -1154,21 +1193,19 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); l = rte_pktmbuf_data_len(m) - offset; @@ -40293,10 +42361,25 @@ index 05449b6e98..9fc8194366 100644 - if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0) - goto process_cipher_des3ctr_err; - - memcpy(ctr, iv, 8); +- memcpy(ctr, iv, 8); ++ memcpy(&ctr, iv, 8); for (n = 0; n < srclen; n++) { -@@ -1195,8 +1242,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, + if (n % 8 == 0) { ++ uint64_t cpu_ctr; ++ + if (EVP_EncryptUpdate(ctx, + (unsigned char *)&ebuf, &unused, + (const unsigned char *)&ctr, 8) <= 0) + goto process_cipher_des3ctr_err; +- ctr_inc(ctr); ++ cpu_ctr = rte_be_to_cpu_64(ctr); ++ cpu_ctr++; ++ ctr = rte_cpu_to_be_64(cpu_ctr); + } + dst[n] = *(src++) ^ ebuf[n % 8]; + +@@ -1195,8 +1232,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, int srclen, uint8_t *aad, int aadlen, uint8_t *iv, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) { @@ -40309,7 +42392,7 @@ index 05449b6e98..9fc8194366 100644 if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0) goto process_auth_encryption_gcm_err; -@@ -1210,9 +1260,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1210,9 +1250,11 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, srclen, ctx, 0)) goto process_auth_encryption_gcm_err; @@ -40321,7 +42404,7 @@ index 05449b6e98..9fc8194366 100644 if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0) goto process_auth_encryption_gcm_err; -@@ -1274,8 +1326,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1274,8 +1316,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, int srclen, uint8_t *aad, int aadlen, uint8_t *iv, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx) { @@ -40334,7 +42417,7 @@ index 05449b6e98..9fc8194366 100644 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0) goto process_auth_decryption_gcm_err; -@@ -1292,9 +1347,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, +@@ -1292,9 +1337,11 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, srclen, ctx, 0)) goto process_auth_decryption_gcm_err; @@ -40346,7 +42429,7 @@ index 05449b6e98..9fc8194366 100644 if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0) return -EFAULT; -@@ -1416,6 +1473,9 @@ process_openssl_auth_mac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset, +@@ -1416,6 +1463,9 @@ process_openssl_auth_mac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset, if (m == 0) goto process_auth_err; @@ -40356,7 +42439,7 @@ index 05449b6e98..9fc8194366 100644 src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset); l = rte_pktmbuf_data_len(m) - offset; -@@ -1442,11 +1502,9 @@ process_auth_final: +@@ -1442,11 +1492,9 @@ process_auth_final: if (EVP_MAC_final(ctx, dst, &dstlen, DIGEST_LENGTH_MAX) != 1) goto process_auth_err; @@ -40368,7 +42451,7 @@ index 05449b6e98..9fc8194366 100644 OPENSSL_LOG(ERR, "Process openssl auth failed"); return -EINVAL; } -@@ -1558,11 +1616,151 @@ process_auth_err: +@@ -1558,11 +1606,151 @@ process_auth_err: # endif /*----------------------------------------------------------------------------*/ @@ -40523,7 +42606,7 @@ index 05449b6e98..9fc8194366 100644 { /* cipher */ uint8_t *dst = NULL, *iv, *tag, *aad; -@@ -1579,6 +1777,8 @@ process_openssl_combined_op +@@ -1579,6 +1767,8 @@ process_openssl_combined_op return; } @@ -40532,7 +42615,7 @@ index 05449b6e98..9fc8194366 100644 iv = rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv.offset); if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) { -@@ -1612,12 +1812,12 @@ process_openssl_combined_op +@@ -1612,12 +1802,12 @@ process_openssl_combined_op status = process_openssl_auth_encryption_gcm( mbuf_src, offset, srclen, aad, aadlen, iv, @@ -40547,7 +42630,7 @@ index 05449b6e98..9fc8194366 100644 } else { if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC || -@@ -1625,12 +1825,12 @@ process_openssl_combined_op +@@ -1625,12 +1815,12 @@ process_openssl_combined_op status = process_openssl_auth_decryption_gcm( mbuf_src, offset, srclen, aad, aadlen, iv, @@ -40562,7 +42645,7 @@ index 05449b6e98..9fc8194366 100644 } if (status != 0) { -@@ -1645,14 +1845,13 @@ process_openssl_combined_op +@@ -1645,14 +1835,13 @@ process_openssl_combined_op /** Process cipher operation */ static void @@ -40580,7 +42663,7 @@ index 05449b6e98..9fc8194366 100644 /* * Segmented OOP destination buffer is not supported for encryption/ -@@ -1671,25 +1870,22 @@ process_openssl_cipher_op +@@ -1671,25 +1860,22 @@ process_openssl_cipher_op iv = rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv.offset); @@ -40611,7 +42694,7 @@ index 05449b6e98..9fc8194366 100644 if (status != 0) op->status = RTE_CRYPTO_OP_STATUS_ERROR; } -@@ -1797,7 +1993,6 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, +@@ -1797,7 +1983,6 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, # if OPENSSL_VERSION_NUMBER >= 0x30000000L EVP_MAC_CTX *ctx_h; EVP_MAC_CTX *ctx_c; @@ -40619,7 +42702,7 @@ index 05449b6e98..9fc8194366 100644 # else HMAC_CTX *ctx_h; CMAC_CTX *ctx_c; -@@ -1809,48 +2004,40 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, +@@ -1809,48 +1994,40 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, switch (sess->auth.mode) { case OPENSSL_AUTH_AS_AUTH: @@ -40677,7 +42760,7 @@ index 05449b6e98..9fc8194366 100644 break; default: status = -1; -@@ -1927,7 +2114,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, +@@ -1927,7 +2104,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, if (EVP_PKEY_sign(dsa_ctx, dsa_sign_data, &outlen, op->message.data, op->message.length) <= 0) { @@ -40686,7 +42769,7 @@ index 05449b6e98..9fc8194366 100644 goto err_dsa_sign; } -@@ -1935,7 +2122,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, +@@ -1935,7 +2112,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, DSA_SIG *sign = d2i_DSA_SIG(NULL, &dsa_sign_data_p, outlen); if (!sign) { OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__); @@ -40695,7 +42778,7 @@ index 05449b6e98..9fc8194366 100644 goto err_dsa_sign; } else { const BIGNUM *r = NULL, *s = NULL; -@@ -1947,7 +2134,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, +@@ -1947,7 +2124,7 @@ process_openssl_dsa_sign_op_evp(struct rte_crypto_op *cop, } DSA_SIG_free(sign); @@ -40704,7 +42787,7 @@ index 05449b6e98..9fc8194366 100644 return 0; err_dsa_sign: -@@ -1957,6 +2144,7 @@ err_dsa_sign: +@@ -1957,6 +2134,7 @@ err_dsa_sign: EVP_PKEY_CTX_free(key_ctx); if (dsa_ctx) EVP_PKEY_CTX_free(dsa_ctx); @@ -40712,7 +42795,7 @@ index 05449b6e98..9fc8194366 100644 return -1; } -@@ -2633,7 +2821,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, +@@ -2633,7 +2811,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, if (EVP_PKEY_verify_recover(rsa_ctx, tmp, &outlen, op->rsa.sign.data, op->rsa.sign.length) <= 0) { @@ -40721,7 +42804,7 @@ index 05449b6e98..9fc8194366 100644 goto err_rsa; } -@@ -2645,7 +2833,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, +@@ -2645,7 +2823,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, op->rsa.message.length)) { OPENSSL_LOG(ERR, "RSA sign Verification failed"); } @@ -40730,7 +42813,7 @@ index 05449b6e98..9fc8194366 100644 break; default: -@@ -2861,13 +3049,13 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, +@@ -2861,13 +3039,13 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, switch (sess->chain_order) { case OPENSSL_CHAIN_ONLY_CIPHER: @@ -40746,7 +42829,7 @@ index 05449b6e98..9fc8194366 100644 /* OOP */ if (msrc != mdst) copy_plaintext(msrc, mdst, op); -@@ -2875,10 +3063,10 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, +@@ -2875,10 +3053,10 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, break; case OPENSSL_CHAIN_AUTH_CIPHER: process_openssl_auth_op(qp, op, sess, msrc, mdst); @@ -41499,6 +43582,26 @@ index 91d5cfa71d..888dea4ad9 100644 &vec->auth_iv[i], ofs, (uint32_t)data_len))) break; +diff --git a/dpdk/drivers/crypto/qat/qat_asym.c b/dpdk/drivers/crypto/qat/qat_asym.c +index 82e165538d..e67ebe4f9f 100644 +--- a/dpdk/drivers/crypto/qat/qat_asym.c ++++ b/dpdk/drivers/crypto/qat/qat_asym.c +@@ -268,6 +268,7 @@ modexp_collect(struct rte_crypto_asym_op *asym_op, + rte_memcpy(modexp_result, + cookie->output_array[0] + alg_bytesize + - n.length, n.length); ++ asym_op->modex.result.length = alg_bytesize; + HEXDUMP("ModExp result", cookie->output_array[0], + alg_bytesize); + return RTE_CRYPTO_OP_STATUS_SUCCESS; +@@ -329,6 +330,7 @@ modinv_collect(struct rte_crypto_asym_op *asym_op, + - n.length), + cookie->output_array[0] + alg_bytesize + - n.length, n.length); ++ asym_op->modinv.result.length = alg_bytesize; + HEXDUMP("ModInv result", cookie->output_array[0], + alg_bytesize); + return RTE_CRYPTO_OP_STATUS_SUCCESS; diff --git a/dpdk/drivers/crypto/qat/qat_sym.c b/dpdk/drivers/crypto/qat/qat_sym.c index 08e92191a3..f68d96f4f5 100644 --- a/dpdk/drivers/crypto/qat/qat_sym.c @@ -42048,6 +44151,19 @@ index 3b2d4c2b65..ba8076715d 100644 if (strncmp(name, "dpdk_", 5) == 0) retval = 1; +diff --git a/dpdk/drivers/dma/idxd/idxd_pci.c b/dpdk/drivers/dma/idxd/idxd_pci.c +index 781fa02db3..7222de3b7e 100644 +--- a/dpdk/drivers/dma/idxd/idxd_pci.c ++++ b/dpdk/drivers/dma/idxd/idxd_pci.c +@@ -292,7 +292,7 @@ init_pci_device(struct rte_pci_device *dev, struct idxd_dmadev *idxd, + return nb_wqs; + + err: +- free(pci); ++ rte_free(pci); + return err_code; + } + diff --git a/dpdk/drivers/dma/ioat/ioat_dmadev.c b/dpdk/drivers/dma/ioat/ioat_dmadev.c index 5906eb45aa..57c18c081d 100644 --- a/dpdk/drivers/dma/ioat/ioat_dmadev.c @@ -42137,7 +44253,7 @@ index 9b6da655fd..daf35eccce 100644 } diff --git a/dpdk/drivers/event/cnxk/cn10k_eventdev.c b/dpdk/drivers/event/cnxk/cn10k_eventdev.c -index 30c922b5fc..d8e7c83462 100644 +index 30c922b5fc..a1ddf402f1 100644 --- a/dpdk/drivers/event/cnxk/cn10k_eventdev.c +++ b/dpdk/drivers/event/cnxk/cn10k_eventdev.c @@ -197,12 +197,14 @@ cn10k_sso_hws_reset(void *arg, void *hws) @@ -42188,7 +44304,59 @@ index 30c922b5fc..d8e7c83462 100644 } ws->swtag_req = 0; plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); -@@ -1024,8 +1034,8 @@ static int +@@ -854,12 +864,40 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, u + } + } + ++static void ++eventdev_fops_tstamp_update(struct rte_eventdev *event_dev) ++{ ++ struct rte_event_fp_ops *fp_op = ++ rte_event_fp_ops + event_dev->data->dev_id; ++ ++ fp_op->dequeue = event_dev->dequeue; ++ fp_op->dequeue_burst = event_dev->dequeue_burst; ++} ++ ++static void ++cn10k_sso_tstamp_hdl_update(uint16_t port_id, uint16_t flags, bool ptp_en) ++{ ++ struct rte_eth_dev *dev = &rte_eth_devices[port_id]; ++ struct cnxk_eth_dev *cnxk_eth_dev = dev->data->dev_private; ++ struct rte_eventdev *event_dev = cnxk_eth_dev->evdev_priv; ++ struct cnxk_sso_evdev *evdev = cnxk_sso_pmd_priv(event_dev); ++ ++ evdev->rx_offloads |= flags; ++ if (ptp_en) ++ evdev->tstamp[port_id] = &cnxk_eth_dev->tstamp; ++ else ++ evdev->tstamp[port_id] = NULL; ++ cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); ++ eventdev_fops_tstamp_update(event_dev); ++} ++ + static int + cn10k_sso_rx_adapter_queue_add( + const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, + int32_t rx_queue_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) + { ++ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cn10k_eth_rxq *rxq; + uint64_t meta_aura; + void *lookup_mem; +@@ -873,6 +911,10 @@ cn10k_sso_rx_adapter_queue_add( + queue_conf); + if (rc) + return -EINVAL; ++ ++ cnxk_eth_dev->cnxk_sso_ptp_tstamp_cb = cn10k_sso_tstamp_hdl_update; ++ cnxk_eth_dev->evdev_priv = (struct rte_eventdev *)(uintptr_t)event_dev; ++ + rxq = eth_dev->data->rx_queues[0]; + lookup_mem = rxq->lookup_mem; + meta_aura = rxq->meta_aura; +@@ -1024,8 +1066,8 @@ static int cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, uint32_t *caps) { @@ -42199,7 +44367,7 @@ index 30c922b5fc..d8e7c83462 100644 *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA | -@@ -1043,8 +1053,8 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, +@@ -1043,8 +1085,8 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); int ret; @@ -42210,7 +44378,7 @@ index 30c922b5fc..d8e7c83462 100644 dev->is_ca_internal_port = 1; cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); -@@ -1059,8 +1069,8 @@ static int +@@ -1059,8 +1101,8 @@ static int cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, int32_t queue_pair_id) { @@ -42221,7 +44389,7 @@ index 30c922b5fc..d8e7c83462 100644 return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id); } -@@ -1078,8 +1088,8 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev, +@@ -1078,8 +1120,8 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, struct rte_event_crypto_adapter_vector_limits *limits) { @@ -42299,7 +44467,7 @@ index 75a2ff244a..1bce0631e5 100644 ev->event = gw.u64[0]; ev->u64 = gw.u64[1]; diff --git a/dpdk/drivers/event/cnxk/cn9k_eventdev.c b/dpdk/drivers/event/cnxk/cn9k_eventdev.c -index f5a42a86f8..803e7ddd07 100644 +index f5a42a86f8..88bce7e675 100644 --- a/dpdk/drivers/event/cnxk/cn9k_eventdev.c +++ b/dpdk/drivers/event/cnxk/cn9k_eventdev.c @@ -223,16 +223,16 @@ cn9k_sso_hws_reset(void *arg, void *hws) @@ -42351,7 +44519,58 @@ index f5a42a86f8..803e7ddd07 100644 } static int -@@ -1110,11 +1123,11 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev, +@@ -968,12 +981,40 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, + } + } + ++static void ++eventdev_fops_tstamp_update(struct rte_eventdev *event_dev) ++{ ++ struct rte_event_fp_ops *fp_op = ++ rte_event_fp_ops + event_dev->data->dev_id; ++ ++ fp_op->dequeue = event_dev->dequeue; ++ fp_op->dequeue_burst = event_dev->dequeue_burst; ++} ++ ++static void ++cn9k_sso_tstamp_hdl_update(uint16_t port_id, uint16_t flags, bool ptp_en) ++{ ++ struct rte_eth_dev *dev = &rte_eth_devices[port_id]; ++ struct cnxk_eth_dev *cnxk_eth_dev = dev->data->dev_private; ++ struct rte_eventdev *event_dev = cnxk_eth_dev->evdev_priv; ++ struct cnxk_sso_evdev *evdev = cnxk_sso_pmd_priv(event_dev); ++ ++ evdev->rx_offloads |= flags; ++ if (ptp_en) ++ evdev->tstamp[port_id] = &cnxk_eth_dev->tstamp; ++ else ++ evdev->tstamp[port_id] = NULL; ++ cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); ++ eventdev_fops_tstamp_update(event_dev); ++} ++ + static int + cn9k_sso_rx_adapter_queue_add( + const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev, + int32_t rx_queue_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) + { ++ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private; + struct cn9k_eth_rxq *rxq; + void *lookup_mem; + int rc; +@@ -987,6 +1028,9 @@ cn9k_sso_rx_adapter_queue_add( + if (rc) + return -EINVAL; + ++ cnxk_eth_dev->cnxk_sso_ptp_tstamp_cb = cn9k_sso_tstamp_hdl_update; ++ cnxk_eth_dev->evdev_priv = (struct rte_eventdev *)(uintptr_t)event_dev; ++ + rxq = eth_dev->data->rx_queues[0]; + lookup_mem = rxq->lookup_mem; + cn9k_sso_set_priv_mem(event_dev, lookup_mem, 0); +@@ -1110,11 +1154,11 @@ cn9k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev, } static int @@ -42367,7 +44586,7 @@ index f5a42a86f8..803e7ddd07 100644 *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD | RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA; -@@ -1131,8 +1144,8 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, +@@ -1131,8 +1175,8 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); int ret; @@ -42378,7 +44597,7 @@ index f5a42a86f8..803e7ddd07 100644 dev->is_ca_internal_port = 1; cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); -@@ -1147,8 +1160,8 @@ static int +@@ -1147,8 +1191,8 @@ static int cn9k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, int32_t queue_pair_id) { @@ -42415,9 +44634,19 @@ index 4c3932da47..0ccdb7baf3 100644 /* Write CPT instruction to lmt line */ vst1q_u64(lmt_addr, cmd01); diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev.c b/dpdk/drivers/event/cnxk/cnxk_eventdev.c -index db62d32a81..205cc76572 100644 +index db62d32a81..eff3c42f74 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev.c +++ b/dpdk/drivers/event/cnxk/cnxk_eventdev.c +@@ -109,8 +109,8 @@ cnxk_setup_event_ports(const struct rte_eventdev *event_dev, + return 0; + hws_fini: + for (i = i - 1; i >= 0; i--) { +- event_dev->data->ports[i] = NULL; + rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i])); ++ event_dev->data->ports[i] = NULL; + } + return -ENOMEM; + } @@ -150,16 +150,17 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev) deq_tmo_ns = conf->dequeue_timeout_ns; @@ -42519,9 +44748,18 @@ index 738e335ea4..44a39648e3 100644 uint8_t is_ca_internal_port; } __rte_cache_aligned; diff --git a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c -index 5ec436382c..e78d215630 100644 +index 5ec436382c..8fcbd085a9 100644 --- a/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c +++ b/dpdk/drivers/event/cnxk/cnxk_eventdev_adptr.c +@@ -212,7 +212,7 @@ static void + cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev, + struct cnxk_sso_evdev *dev) + { +- if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) ++ if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP || cnxk_eth_dev->ptp_en) + dev->tstamp[port_id] = &cnxk_eth_dev->tstamp; + } + @@ -635,6 +635,7 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused, if (dev->tx_adptr_data_sz && dev->tx_adptr_active_mask == 0) { dev->tx_adptr_data_sz = 0; @@ -42921,10 +45159,34 @@ index ffabf0d23d..abe8e68525 100644 return -EFAULT; diff --git a/dpdk/drivers/event/octeontx/ssovf_evdev.c b/dpdk/drivers/event/octeontx/ssovf_evdev.c -index 650266b996..d5e223077d 100644 +index 650266b996..62eeaf7136 100644 --- a/dpdk/drivers/event/octeontx/ssovf_evdev.c +++ b/dpdk/drivers/event/octeontx/ssovf_evdev.c -@@ -880,7 +880,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) +@@ -714,10 +714,20 @@ ssovf_close(struct rte_eventdev *dev) + } + + static int +-ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) ++ssovf_parsekv(const char *key, const char *value, void *opaque) + { +- int *flag = opaque; +- *flag = !!atoi(value); ++ uint8_t *flag = opaque; ++ uint64_t v; ++ char *end; ++ ++ errno = 0; ++ v = strtoul(value, &end, 0); ++ if ((errno != 0) || (value == end) || *end != '\0' || v > 1) { ++ ssovf_log_err("invalid %s value %s", key, value); ++ return -EINVAL; ++ } ++ ++ *flag = !!v; + return 0; + } + +@@ -880,7 +890,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev) } eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev), @@ -43145,7 +45407,7 @@ index ba826f0f01..9d6982fdab 100644 /* No partial alloc allowed. Free up allocated pointers */ cn10k_mempool_enq(mp, obj_table, count); diff --git a/dpdk/drivers/meson.build b/dpdk/drivers/meson.build -index 5188302057..b4d9252888 100644 +index 5188302057..175beec15e 100644 --- a/dpdk/drivers/meson.build +++ b/dpdk/drivers/meson.build @@ -159,7 +159,7 @@ foreach subpath:subdirs @@ -43157,6 +45419,14 @@ index 5188302057..b4d9252888 100644 endif continue endif +@@ -175,6 +175,7 @@ foreach subpath:subdirs + if get_option('enable_driver_sdk') + install_headers(driver_sdk_headers) + endif ++ dpdk_chkinc_headers += driver_sdk_headers + + if headers.length() > 0 + dpdk_includes += include_directories(drv_path) diff --git a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c b/dpdk/drivers/net/af_packet/rte_eth_af_packet.c index c13a0942aa..6b7b16f348 100644 --- a/dpdk/drivers/net/af_packet/rte_eth_af_packet.c @@ -44254,10 +46524,29 @@ index d97fbbfddd..12908d4e6f 100644 axgbe_phy_rrc(pdata); } diff --git a/dpdk/drivers/net/bnx2x/bnx2x.c b/dpdk/drivers/net/bnx2x/bnx2x.c -index 74e3018eab..55a91fad78 100644 +index 74e3018eab..7493563911 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x.c +++ b/dpdk/drivers/net/bnx2x/bnx2x.c -@@ -2389,7 +2389,7 @@ int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) +@@ -1623,16 +1623,12 @@ static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc) + } + + /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ +-static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode) ++static uint32_t bnx2x_send_unload_req(struct bnx2x_softc *sc, int unload_mode __rte_unused) + { + uint32_t reset_code = 0; + + /* Select the UNLOAD request mode */ +- if (unload_mode == UNLOAD_NORMAL) { +- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; +- } else { +- reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; +- } ++ reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + /* Send the request to the MCP */ + if (!BNX2X_NOMCP(sc)) { +@@ -2389,7 +2385,7 @@ int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc) static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) { sc->ilt->lines = rte_calloc("", @@ -44266,6 +46555,36 @@ index 74e3018eab..55a91fad78 100644 RTE_CACHE_LINE_SIZE); return sc->ilt->lines == NULL; } +@@ -10337,12 +10333,13 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc) + REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); + + if (!CHIP_IS_E1x(sc)) { +- int factor = 0; ++ int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : ++ (CHIP_REV_IS_FPGA(sc) ? 400 : 0); + + ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); + ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); + +-/* let the HW do it's magic... */ ++ /* let the HW do it's magic... */ + do { + DELAY(200000); + val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); +@@ -11195,11 +11192,9 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc) + /* Turn on a single ISR mode in IGU if driver is going to use + * INT#x or MSI + */ +- if ((sc->interrupt_mode != INTR_MODE_MSIX) +- || (sc->interrupt_mode != INTR_MODE_SINGLE_MSIX)) { ++ if (sc->interrupt_mode == INTR_MODE_INTX || ++ sc->interrupt_mode == INTR_MODE_MSI) + pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; +- } +- + /* + * Timers workaround bug: function init part. + * Need to wait 20msec after initializing ILT, diff --git a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c b/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c index 4448cf2de2..1327cbe912 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_ethdev.c @@ -44311,10 +46630,21 @@ index 4448cf2de2..1327cbe912 100644 } diff --git a/dpdk/drivers/net/bnx2x/bnx2x_stats.c b/dpdk/drivers/net/bnx2x/bnx2x_stats.c -index c07b01510a..69132c7c80 100644 +index c07b01510a..72a26ed5cc 100644 --- a/dpdk/drivers/net/bnx2x/bnx2x_stats.c +++ b/dpdk/drivers/net/bnx2x/bnx2x_stats.c -@@ -114,7 +114,7 @@ bnx2x_hw_stats_post(struct bnx2x_softc *sc) +@@ -75,10 +75,6 @@ bnx2x_storm_stats_post(struct bnx2x_softc *sc) + int rc; + + if (!sc->stats_pending) { +- if (sc->stats_pending) { +- return; +- } +- + sc->fw_stats_req->hdr.drv_stats_counter = + htole16(sc->stats_counter++); + +@@ -114,7 +110,7 @@ bnx2x_hw_stats_post(struct bnx2x_softc *sc) /* Update MCP's statistics if possible */ if (sc->func_stx) { @@ -44323,7 +46653,7 @@ index c07b01510a..69132c7c80 100644 sizeof(sc->func_stats)); } -@@ -817,10 +817,10 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc) +@@ -817,10 +813,10 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc) etherstatspktsover1522octets); } @@ -44337,7 +46667,7 @@ index c07b01510a..69132c7c80 100644 estats->brb_drop_hi = pstats->brb_drop_hi; estats->brb_drop_lo = pstats->brb_drop_lo; -@@ -1492,9 +1492,11 @@ bnx2x_stats_init(struct bnx2x_softc *sc) +@@ -1492,9 +1488,11 @@ bnx2x_stats_init(struct bnx2x_softc *sc) REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); if (!CHIP_IS_E3(sc)) { REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, @@ -44433,7 +46763,7 @@ index c9aa45ed3b..3044c9e079 100644 struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ diff --git a/dpdk/drivers/net/bnxt/bnxt_ethdev.c b/dpdk/drivers/net/bnxt/bnxt_ethdev.c -index b3de490d36..cb52ef2f84 100644 +index b3de490d36..e44fa4405c 100644 --- a/dpdk/drivers/net/bnxt/bnxt_ethdev.c +++ b/dpdk/drivers/net/bnxt/bnxt_ethdev.c @@ -1017,7 +1017,6 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, @@ -44476,7 +46806,15 @@ index b3de490d36..cb52ef2f84 100644 } static void bnxt_drv_uninit(struct bnxt *bp) -@@ -4697,7 +4700,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, +@@ -3956,7 +3959,6 @@ static int bnxt_get_module_eeprom(struct rte_eth_dev *dev, + + switch (module_info[0]) { + case SFF_MODULE_ID_SFP: +- module_info[SFF_DIAG_SUPPORT_OFFSET] = 0; + if (module_info[SFF_DIAG_SUPPORT_OFFSET]) { + pg_addr[2] = I2C_DEV_ADDR_A2; + pg_addr[3] = I2C_DEV_ADDR_A2; +@@ -4697,7 +4699,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; const struct rte_memzone *mz = NULL; @@ -44485,7 +46823,7 @@ index b3de490d36..cb52ef2f84 100644 rte_iova_t mz_phys_addr; uint64_t valid_bits = 0; uint32_t sz; -@@ -4709,6 +4712,19 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, +@@ -4709,6 +4711,19 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / BNXT_PAGE_SIZE; rmem->page_size = BNXT_PAGE_SIZE; @@ -44505,7 +46843,7 @@ index b3de490d36..cb52ef2f84 100644 rmem->pg_arr = ctx_pg->ctx_pg_arr; rmem->dma_arr = ctx_pg->ctx_dma_arr; rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; -@@ -4716,13 +4732,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, +@@ -4716,13 +4731,13 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, valid_bits = PTU_PTE_VALID; if (rmem->nr_pages > 1) { @@ -44523,7 +46861,7 @@ index b3de490d36..cb52ef2f84 100644 rmem->nr_pages * 8, bp->eth_dev->device->numa_node, RTE_MEMZONE_2MB | -@@ -4741,11 +4757,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, +@@ -4741,11 +4756,11 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, rmem->pg_tbl_mz = mz; } @@ -44538,7 +46876,7 @@ index b3de490d36..cb52ef2f84 100644 mem_size, bp->eth_dev->device->numa_node, RTE_MEMZONE_1GB | -@@ -4791,6 +4807,17 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) +@@ -4791,6 +4806,17 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) return; bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; @@ -44556,7 +46894,7 @@ index b3de490d36..cb52ef2f84 100644 rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); -@@ -4803,6 +4830,8 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) +@@ -4803,6 +4829,8 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { @@ -44565,7 +46903,7 @@ index b3de490d36..cb52ef2f84 100644 if (bp->ctx->tqm_mem[i]) rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); } -@@ -5859,6 +5888,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) +@@ -5859,6 +5887,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused) rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; @@ -44573,7 +46911,7 @@ index b3de490d36..cb52ef2f84 100644 bp = eth_dev->data->dev_private; -@@ -6067,13 +6097,6 @@ static int bnxt_init_rep_info(struct bnxt *bp) +@@ -6067,13 +6096,6 @@ static int bnxt_init_rep_info(struct bnxt *bp) for (i = 0; i < BNXT_MAX_CFA_CODE; i++) bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; @@ -44802,10 +47140,40 @@ index f3a03812ad..6e2d87de09 100644 unsigned int cp_nr_rings; diff --git a/dpdk/drivers/net/bnxt/bnxt_txr.c b/dpdk/drivers/net/bnxt/bnxt_txr.c -index 67e016775c..60bb3eea0c 100644 +index 67e016775c..7820d11cc2 100644 --- a/dpdk/drivers/net/bnxt/bnxt_txr.c +++ b/dpdk/drivers/net/bnxt/bnxt_txr.c -@@ -515,6 +515,19 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) +@@ -255,17 +255,24 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, + */ + txbd1->kid_or_ts_high_mss = 0; + +- if (txq->vfr_tx_cfa_action) +- txbd1->cfa_action = txq->vfr_tx_cfa_action; +- else +- txbd1->cfa_action = txq->bp->tx_cfa_action; ++ if (txq->vfr_tx_cfa_action) { ++ txbd1->cfa_action = txq->vfr_tx_cfa_action & 0xffff; ++ txbd1->cfa_action_high = (txq->vfr_tx_cfa_action >> 16) & ++ TX_BD_LONG_CFA_ACTION_HIGH_MASK; ++ } else { ++ txbd1->cfa_action = txq->bp->tx_cfa_action & 0xffff; ++ txbd1->cfa_action_high = (txq->bp->tx_cfa_action >> 16) & ++ TX_BD_LONG_CFA_ACTION_HIGH_MASK; ++ } + + if (tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { + uint16_t hdr_size; + + /* TSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO | +- TX_BD_LONG_LFLAGS_T_IPID; ++ TX_BD_LONG_LFLAGS_T_IPID | ++ TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | ++ TX_BD_LONG_LFLAGS_T_IP_CHKSUM; + hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + + tx_pkt->l4_len; + hdr_size += (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ? +@@ -515,6 +522,19 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -44825,7 +47193,7 @@ index 67e016775c..60bb3eea0c 100644 { int rc; uint16_t nb_tx_pkts = 0; -@@ -560,6 +573,12 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +@@ -560,6 +580,12 @@ int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (rc) return rc; @@ -44853,6 +47221,159 @@ index e11343c082..2be3ba4cac 100644 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +diff --git a/dpdk/drivers/net/bnxt/tf_core/tf_msg.c b/dpdk/drivers/net/bnxt/tf_core/tf_msg.c +index fbc96d374c..f468de564d 100644 +--- a/dpdk/drivers/net/bnxt/tf_core/tf_msg.c ++++ b/dpdk/drivers/net/bnxt/tf_core/tf_msg.c +@@ -25,7 +25,7 @@ + */ + #define TF_MSG_SET_GLOBAL_CFG_DATA_SIZE 16 + #define TF_MSG_EM_INSERT_KEY_SIZE 64 +-#define TF_MSG_EM_INSERT_RECORD_SIZE 80 ++#define TF_MSG_EM_INSERT_RECORD_SIZE 96 + #define TF_MSG_TBL_TYPE_SET_DATA_SIZE 88 + + /* Compile check - Catch any msg changes that we depend on, like the +diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c +index 85c9cbb7f2..69a244bd65 100644 +--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c ++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c +@@ -720,18 +720,21 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt, + + } while (!rc && nxt_resource_index); + +- bnxt_ulp_cntxt_release_fdb_lock(ctxt); +- +- if (rc || !found_cntr_resource) ++ if (rc || !found_cntr_resource) { ++ bnxt_ulp_cntxt_release_fdb_lock(ctxt); + return rc; ++ } + + dir = params.direction; + hw_cntr_id = params.resource_hndl; + if (!found_parent_flow && + params.resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) { +- if (!ulp_fc_info->num_counters) +- return ulp_fc_tf_flow_stat_get(ctxt, ¶ms, count); ++ if (!ulp_fc_info->num_counters) { ++ rc = ulp_fc_tf_flow_stat_get(ctxt, ¶ms, count); ++ bnxt_ulp_cntxt_release_fdb_lock(ctxt); ++ return rc; ++ } + + /* TODO: + * Think about optimizing with try_lock later +@@ -755,9 +758,14 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt, + params.resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) { + /* Get stats from the parent child table */ +- ulp_flow_db_parent_flow_count_get(ctxt, pc_idx, +- &count->hits, &count->bytes, +- count->reset); ++ if (ulp_flow_db_parent_flow_count_get(ctxt, flow_id, ++ pc_idx, ++ &count->hits, ++ &count->bytes, ++ count->reset)) { ++ bnxt_ulp_cntxt_release_fdb_lock(ctxt); ++ return -EIO; ++ } + if (count->hits) + count->hits_set = 1; + if (count->bytes) +@@ -766,7 +774,7 @@ int ulp_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt, + /* TBD: Handle External counters */ + rc = -EINVAL; + } +- ++ bnxt_ulp_cntxt_release_fdb_lock(ctxt); + return rc; + } + +diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c +index 9968311c44..51ea3203bc 100644 +--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c ++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.c +@@ -1361,13 +1361,12 @@ ulp_flow_db_pc_db_parent_flow_set(struct bnxt_ulp_context *ulp_ctxt, + + if (set_flag) { + pc_entry->parent_fid = parent_fid; ++ pc_entry->parent_ref_cnt++; + } else { +- if (pc_entry->parent_fid != parent_fid) +- BNXT_TF_DBG(ERR, "Panic: invalid parent id\n"); +- pc_entry->parent_fid = 0; +- ++ if (pc_entry->parent_ref_cnt > 0) ++ pc_entry->parent_ref_cnt--; + /* Free the parent child db entry if no user present */ +- if (!pc_entry->f2_cnt) ++ if (!pc_entry->parent_ref_cnt && !pc_entry->f2_cnt) + ulp_flow_db_pc_db_entry_free(ulp_ctxt, pc_entry); + } + return 0; +@@ -1422,7 +1421,7 @@ ulp_flow_db_pc_db_child_flow_set(struct bnxt_ulp_context *ulp_ctxt, + ULP_INDEX_BITMAP_RESET(t[a_idx], child_fid); + if (pc_entry->f2_cnt) + pc_entry->f2_cnt--; +- if (!pc_entry->f2_cnt && !pc_entry->parent_fid) ++ if (!pc_entry->f2_cnt && !pc_entry->parent_ref_cnt) + ulp_flow_db_pc_db_entry_free(ulp_ctxt, pc_entry); + } + return 0; +@@ -1514,7 +1513,7 @@ ulp_flow_db_parent_flow_count_accum_set(struct bnxt_ulp_context *ulp_ctxt, + /* check for parent idx validity */ + p_pdb = &flow_db->parent_child_db; + if (pc_idx >= p_pdb->entries_count || +- !p_pdb->parent_flow_tbl[pc_idx].parent_fid) { ++ !p_pdb->parent_flow_tbl[pc_idx].parent_ref_cnt) { + BNXT_TF_DBG(ERR, "Invalid parent child index %x\n", pc_idx); + return -EINVAL; + } +@@ -1761,6 +1760,7 @@ ulp_flow_db_parent_flow_count_update(struct bnxt_ulp_context *ulp_ctxt, + */ + int32_t + ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt, ++ uint32_t flow_id, + uint32_t pc_idx, uint64_t *packet_count, + uint64_t *byte_count, uint8_t count_reset) + { +@@ -1773,6 +1773,13 @@ ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt, + return -EINVAL; + } + ++ /* stale parent fid */ ++ if (flow_id != pc_entry->parent_fid) { ++ *packet_count = 0; ++ *byte_count = 0; ++ return 0; ++ } ++ + if (pc_entry->counter_acc) { + *packet_count = pc_entry->pkt_count; + *byte_count = pc_entry->byte_count; +diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h +index 2b02836a40..31afd55094 100644 +--- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h ++++ b/dpdk/drivers/net/bnxt/tf_ulp/ulp_flow_db.h +@@ -60,6 +60,7 @@ struct bnxt_ulp_flow_tbl { + struct ulp_fdb_parent_info { + uint32_t valid; + uint32_t parent_fid; ++ uint32_t parent_ref_cnt; + uint32_t counter_acc; + uint64_t pkt_count; + uint64_t byte_count; +@@ -385,6 +386,7 @@ ulp_flow_db_parent_flow_count_update(struct bnxt_ulp_context *ulp_ctxt, + */ + int32_t + ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt, ++ uint32_t flow_id, + uint32_t pc_idx, + uint64_t *packet_count, + uint64_t *byte_count, diff --git a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c b/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c index 0030a487f5..897410cc0a 100644 --- a/dpdk/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c @@ -45091,6 +47612,51 @@ index b9bcebc6cb..8df632fa6e 100644 return 0; parse_error: +diff --git a/dpdk/drivers/net/cnxk/cn10k_ethdev.c b/dpdk/drivers/net/cnxk/cn10k_ethdev.c +index 4658713591..3dd71fc70d 100644 +--- a/dpdk/drivers/net/cnxk/cn10k_ethdev.c ++++ b/dpdk/drivers/net/cnxk/cn10k_ethdev.c +@@ -30,7 +30,7 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) + flags |= NIX_RX_MULTI_SEG_F; + +- if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) ++ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) + flags |= NIX_RX_OFFLOAD_TSTAMP_F; + + if (!dev->ptype_disable) +@@ -381,7 +381,7 @@ cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) + struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix; + struct rte_eth_dev *eth_dev; + struct cn10k_eth_rxq *rxq; +- int i; ++ int i, rc; + + if (!dev) + return -EINVAL; +@@ -404,8 +404,22 @@ cn10k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) + * and MTU setting also requires MBOX message to be + * sent(VF->PF) + */ ++ if (dev->ptp_en) { ++ rc = rte_mbuf_dyn_rx_timestamp_register ++ (&dev->tstamp.tstamp_dynfield_offset, ++ &dev->tstamp.rx_tstamp_dynflag); ++ if (rc != 0) { ++ plt_err("Failed to register Rx timestamp field/flag"); ++ return -EINVAL; ++ } ++ } + eth_dev->rx_pkt_burst = nix_ptp_vf_burst; ++ rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst; + rte_mb(); ++ if (dev->cnxk_sso_ptp_tstamp_cb) ++ dev->cnxk_sso_ptp_tstamp_cb(eth_dev->data->port_id, ++ NIX_RX_OFFLOAD_TSTAMP_F, dev->ptp_en); ++ + } + + return 0; diff --git a/dpdk/drivers/net/cnxk/cn10k_rx.h b/dpdk/drivers/net/cnxk/cn10k_rx.h index 721127dddd..b60c158d55 100644 --- a/dpdk/drivers/net/cnxk/cn10k_rx.h @@ -45263,6 +47829,50 @@ index 815cd2ff1f..cd9b1f225e 100644 len_olflags3 = vld1q_u64(mbuf3 + 3); /* Move mbufs to point pool */ +diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev.c b/dpdk/drivers/net/cnxk/cn9k_ethdev.c +index 3b702d9696..e5ab0f3697 100644 +--- a/dpdk/drivers/net/cnxk/cn9k_ethdev.c ++++ b/dpdk/drivers/net/cnxk/cn9k_ethdev.c +@@ -30,7 +30,7 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) + flags |= NIX_RX_MULTI_SEG_F; + +- if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) ++ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) + flags |= NIX_RX_OFFLOAD_TSTAMP_F; + + if (!dev->ptype_disable) +@@ -362,7 +362,7 @@ cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) + struct cnxk_eth_dev *dev = (struct cnxk_eth_dev *)nix; + struct rte_eth_dev *eth_dev; + struct cn9k_eth_rxq *rxq; +- int i; ++ int i, rc; + + if (!dev) + return -EINVAL; +@@ -385,8 +385,21 @@ cn9k_nix_ptp_info_update_cb(struct roc_nix *nix, bool ptp_en) + * and MTU setting also requires MBOX message to be + * sent(VF->PF) + */ ++ if (dev->ptp_en) { ++ rc = rte_mbuf_dyn_rx_timestamp_register ++ (&dev->tstamp.tstamp_dynfield_offset, ++ &dev->tstamp.rx_tstamp_dynflag); ++ if (rc != 0) { ++ plt_err("Failed to register Rx timestamp field/flag"); ++ return -EINVAL; ++ } ++ } + eth_dev->rx_pkt_burst = nix_ptp_vf_burst; ++ rte_eth_fp_ops[eth_dev->data->port_id].rx_pkt_burst = eth_dev->rx_pkt_burst; + rte_mb(); ++ if (dev->cnxk_sso_ptp_tstamp_cb) ++ dev->cnxk_sso_ptp_tstamp_cb(eth_dev->data->port_id, ++ NIX_RX_OFFLOAD_TSTAMP_F, dev->ptp_en); + } + + return 0; diff --git a/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c b/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c index 67966a4e49..327f221e38 100644 --- a/dpdk/drivers/net/cnxk/cn9k_ethdev_sec.c @@ -45426,7 +48036,7 @@ index 404edd6aed..33db781abe 100644 } diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.c b/dpdk/drivers/net/cnxk/cnxk_ethdev.c -index bf1585fe67..56b26a9650 100644 +index bf1585fe67..73f59dede1 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev.c +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.c @@ -884,6 +884,27 @@ cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss, @@ -45487,6 +48097,15 @@ index bf1585fe67..56b26a9650 100644 rc |= roc_nix_lf_free(nix); fail_configure: dev->configured = 0; +@@ -1654,7 +1684,7 @@ cnxk_nix_dev_start(struct rte_eth_dev *eth_dev) + else + cnxk_eth_dev_ops.timesync_disable(eth_dev); + +- if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) { ++ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP || dev->ptp_en) { + rc = rte_mbuf_dyn_rx_timestamp_register + (&dev->tstamp.tstamp_dynfield_offset, + &dev->tstamp.rx_tstamp_dynflag); @@ -1980,6 +2010,11 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) /* Free ROC RQ's, SQ's and CQ's memory */ nix_free_queue_mem(dev); @@ -45500,10 +48119,24 @@ index bf1585fe67..56b26a9650 100644 rc = roc_nix_lf_free(nix); if (rc) diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev.h b/dpdk/drivers/net/cnxk/cnxk_ethdev.h -index 651ef45ea8..83d6e9398f 100644 +index 651ef45ea8..4202b647f4 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev.h +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev.h -@@ -494,6 +494,7 @@ int cnxk_nix_probe(struct rte_pci_driver *pci_drv, +@@ -443,6 +443,13 @@ struct cnxk_eth_dev { + /* Reassembly dynfield/flag offsets */ + int reass_dynfield_off; + int reass_dynflag_bit; ++ ++ /* SSO event dev */ ++ void *evdev_priv; ++ ++ /* SSO event dev ptp */ ++ void (*cnxk_sso_ptp_tstamp_cb) ++ (uint16_t port_id, uint16_t flags, bool ptp_en); + }; + + struct cnxk_eth_rxq_sp { +@@ -494,6 +501,7 @@ int cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev); int cnxk_nix_remove(struct rte_pci_device *pci_dev); int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu); @@ -45528,7 +48161,7 @@ index d5e647c64d..a7ccdfb756 100644 } diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c -index d28509dbda..0c89e0424f 100644 +index d28509dbda..fda619ac5f 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c +++ b/dpdk/drivers/net/cnxk/cnxk_ethdev_devargs.c @@ -75,7 +75,7 @@ parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args) @@ -45540,6 +48173,15 @@ index d28509dbda..0c89e0424f 100644 return 0; } +@@ -269,7 +269,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev) + uint16_t custom_sa_act = 0; + struct rte_kvargs *kvlist; + uint16_t no_inl_dev = 0; +- uint8_t lock_rx_ctx = 0; ++ uint16_t lock_rx_ctx = 0; + + memset(&sdp_chan, 0, sizeof(sdp_chan)); + memset(&pre_l2_info, 0, sizeof(struct flow_pre_l2_size_info)); diff --git a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c b/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c index 8f7287161b..5fd39149cb 100644 --- a/dpdk/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -45803,7 +48445,7 @@ index 45bbeaef0c..8cc3d9f257 100644 } diff --git a/dpdk/drivers/net/dpaa/dpaa_ethdev.c b/dpdk/drivers/net/dpaa/dpaa_ethdev.c -index a6c86113d1..bcb28f33ee 100644 +index a6c86113d1..6fdbe80334 100644 --- a/dpdk/drivers/net/dpaa/dpaa_ethdev.c +++ b/dpdk/drivers/net/dpaa/dpaa_ethdev.c @@ -14,6 +14,7 @@ @@ -45926,8 +48568,41 @@ index a6c86113d1..bcb28f33ee 100644 return 0; } +@@ -929,7 +971,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + struct fman_if *fif = dev->process_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx]; + struct qm_mcc_initfq opts = {0}; +- u32 flags = 0; ++ u32 ch_id, flags = 0; + int ret; + u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; + uint32_t max_rx_pktlen; +@@ -1053,7 +1095,9 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + DPAA_IF_RX_CONTEXT_STASH; + + /*Create a channel and associate given queue with the channel*/ +- qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0); ++ qman_alloc_pool_range(&ch_id, 1, 1, 0); ++ rxq->ch_id = (u16)ch_id; ++ + opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ; + opts.fqd.dest.channel = rxq->ch_id; + opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY; +diff --git a/dpdk/drivers/net/dpaa/dpaa_rxtx.c b/dpdk/drivers/net/dpaa/dpaa_rxtx.c +index ce4f3d6c85..018d55bbdc 100644 +--- a/dpdk/drivers/net/dpaa/dpaa_rxtx.c ++++ b/dpdk/drivers/net/dpaa/dpaa_rxtx.c +@@ -1034,7 +1034,7 @@ reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf) + /* Copy the data */ + data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy); + +- rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf, ++ rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(temp_mbuf, + void *, offset1), bytes_to_copy); + + /* Set new offsets and the temp buffers */ diff --git a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c -index 679f33ae1a..8e610b6bba 100644 +index 679f33ae1a..834f904c14 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c @@ -1278,6 +1278,11 @@ dpaa2_dev_start(struct rte_eth_dev *dev) @@ -45962,6 +48637,16 @@ index 679f33ae1a..8e610b6bba 100644 return 0; } +@@ -1379,8 +1390,7 @@ dpaa2_dev_close(struct rte_eth_dev *dev) + for (i = 0; i < MAX_TCS; i++) + rte_free((void *)(size_t)priv->extract.tc_extract_param[i]); + +- if (priv->extract.qos_extract_param) +- rte_free((void *)(size_t)priv->extract.qos_extract_param); ++ rte_free((void *)(size_t)priv->extract.qos_extract_param); + + DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name); + return 0; diff --git a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c index f60e78e1fd..85910bbd8f 100644 --- a/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c @@ -45994,6 +48679,96 @@ index f60e78e1fd..85910bbd8f 100644 if (dpaa2_enable_ts[mbuf->port]) { *dpaa2_timestamp_dynfield(mbuf) = annotation->word2; +diff --git a/dpdk/drivers/net/dpaa2/dpaa2_tm.c b/dpdk/drivers/net/dpaa2/dpaa2_tm.c +index 8fe5bfa013..0633259624 100644 +--- a/dpdk/drivers/net/dpaa2/dpaa2_tm.c ++++ b/dpdk/drivers/net/dpaa2/dpaa2_tm.c +@@ -684,6 +684,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + struct dpaa2_tm_node *leaf_node, *temp_leaf_node, *channel_node; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int ret, t; ++ bool conf_schedule = false; + + /* Populate TCs */ + LIST_FOREACH(channel_node, &priv->nodes, next) { +@@ -757,7 +758,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + } + + LIST_FOREACH(channel_node, &priv->nodes, next) { +- int wfq_grp = 0, is_wfq_grp = 0, conf[DPNI_MAX_TC]; ++ int wfq_grp = 0, is_wfq_grp = 0, conf[priv->nb_tx_queues]; + struct dpni_tx_priorities_cfg prio_cfg; + + memset(&prio_cfg, 0, sizeof(prio_cfg)); +@@ -767,6 +768,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + if (channel_node->level_id != CHANNEL_LEVEL) + continue; + ++ conf_schedule = false; + LIST_FOREACH(leaf_node, &priv->nodes, next) { + struct dpaa2_queue *leaf_dpaa2_q; + uint8_t leaf_tc_id; +@@ -789,6 +791,7 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + if (leaf_node->parent != channel_node) + continue; + ++ conf_schedule = true; + leaf_dpaa2_q = (struct dpaa2_queue *)dev->data->tx_queues[leaf_node->id]; + leaf_tc_id = leaf_dpaa2_q->tc_index; + /* Process sibling leaf nodes */ +@@ -829,8 +832,8 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + goto out; + } + is_wfq_grp = 1; +- conf[temp_leaf_node->id] = 1; + } ++ conf[temp_leaf_node->id] = 1; + } + if (is_wfq_grp) { + if (wfq_grp == 0) { +@@ -851,6 +854,9 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + } + conf[leaf_node->id] = 1; + } ++ if (!conf_schedule) ++ continue; ++ + if (wfq_grp > 1) { + prio_cfg.separate_groups = 1; + if (prio_cfg.prio_group_B < prio_cfg.prio_group_A) { +@@ -864,6 +870,16 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + + prio_cfg.prio_group_A = 1; + prio_cfg.channel_idx = channel_node->channel_id; ++ DPAA2_PMD_DEBUG("########################################\n"); ++ DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx); ++ for (t = 0; t < DPNI_MAX_TC; t++) ++ DPAA2_PMD_DEBUG("tc = %d mode = %d, delta = %d\n", t, ++ prio_cfg.tc_sched[t].mode, ++ prio_cfg.tc_sched[t].delta_bandwidth); ++ ++ DPAA2_PMD_DEBUG("prioritya = %d, priorityb = %d, separate grps" ++ " = %d\n", prio_cfg.prio_group_A, ++ prio_cfg.prio_group_B, prio_cfg.separate_groups); + ret = dpni_set_tx_priorities(dpni, 0, priv->token, &prio_cfg); + if (ret) { + ret = -rte_tm_error_set(error, EINVAL, +@@ -871,15 +887,6 @@ dpaa2_hierarchy_commit(struct rte_eth_dev *dev, int clear_on_fail, + "Scheduling Failed\n"); + goto out; + } +- DPAA2_PMD_DEBUG("########################################\n"); +- DPAA2_PMD_DEBUG("Channel idx = %d\n", prio_cfg.channel_idx); +- for (t = 0; t < DPNI_MAX_TC; t++) { +- DPAA2_PMD_DEBUG("tc = %d mode = %d ", t, prio_cfg.tc_sched[t].mode); +- DPAA2_PMD_DEBUG("delta = %d\n", prio_cfg.tc_sched[t].delta_bandwidth); +- } +- DPAA2_PMD_DEBUG("prioritya = %d\n", prio_cfg.prio_group_A); +- DPAA2_PMD_DEBUG("priorityb = %d\n", prio_cfg.prio_group_B); +- DPAA2_PMD_DEBUG("separate grps = %d\n\n", prio_cfg.separate_groups); + } + return 0; + diff --git a/dpdk/drivers/net/e1000/base/e1000_base.c b/dpdk/drivers/net/e1000/base/e1000_base.c index ab73e1e59e..3ec32e7240 100644 --- a/dpdk/drivers/net/e1000/base/e1000_base.c @@ -46008,7 +48783,7 @@ index ab73e1e59e..3ec32e7240 100644 } diff --git a/dpdk/drivers/net/e1000/em_ethdev.c b/dpdk/drivers/net/e1000/em_ethdev.c -index 8ee9be12ad..18efa78ac3 100644 +index 8ee9be12ad..efe3665cec 100644 --- a/dpdk/drivers/net/e1000/em_ethdev.c +++ b/dpdk/drivers/net/e1000/em_ethdev.c @@ -1073,8 +1073,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) @@ -46022,6 +48797,16 @@ index 8ee9be12ad..18efa78ac3 100644 dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(); dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() | +@@ -1121,6 +1121,9 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) + struct rte_eth_link link; + int link_up, count; + ++ if (rte_eal_process_type() != RTE_PROC_PRIMARY) ++ return -1; ++ + link_up = 0; + hw->mac.get_link_status = 1; + diff --git a/dpdk/drivers/net/e1000/em_rxtx.c b/dpdk/drivers/net/e1000/em_rxtx.c index d48fd52404..df5fbb7823 100644 --- a/dpdk/drivers/net/e1000/em_rxtx.c @@ -46079,6 +48864,39 @@ index d48fd52404..df5fbb7823 100644 } /* Program the Transmit Control Register. */ +diff --git a/dpdk/drivers/net/e1000/igb_ethdev.c b/dpdk/drivers/net/e1000/igb_ethdev.c +index 8858f975f8..e9ad558c82 100644 +--- a/dpdk/drivers/net/e1000/igb_ethdev.c ++++ b/dpdk/drivers/net/e1000/igb_ethdev.c +@@ -3857,11 +3857,11 @@ igb_delete_2tuple_filter(struct rte_eth_dev *dev, + + filter_info->twotuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->twotuple_list, filter, entries); +- rte_free(filter); + + E1000_WRITE_REG(hw, E1000_TTQF(filter->index), E1000_TTQF_DISABLE_MASK); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); ++ rte_free(filter); + return 0; + } + +@@ -4298,7 +4298,6 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, + + filter_info->fivetuple_mask &= ~(1 << filter->index); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); +- rte_free(filter); + + E1000_WRITE_REG(hw, E1000_FTQF(filter->index), + E1000_FTQF_VF_BP | E1000_FTQF_MASK); +@@ -4307,6 +4306,7 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev, + E1000_WRITE_REG(hw, E1000_SPQF(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIR(filter->index), 0); + E1000_WRITE_REG(hw, E1000_IMIREXT(filter->index), 0); ++ rte_free(filter); + return 0; + } + diff --git a/dpdk/drivers/net/e1000/igb_rxtx.c b/dpdk/drivers/net/e1000/igb_rxtx.c index f32dee46df..6027cfbfb1 100644 --- a/dpdk/drivers/net/e1000/igb_rxtx.c @@ -46175,6 +48993,33 @@ index 5ca36ab6d9..98035f3cd4 100644 ENA_USLEEP(delay_us); } +diff --git a/dpdk/drivers/net/ena/base/ena_plat_dpdk.h b/dpdk/drivers/net/ena/base/ena_plat_dpdk.h +index 8f2b3a87c2..218bd4dd4b 100644 +--- a/dpdk/drivers/net/ena/base/ena_plat_dpdk.h ++++ b/dpdk/drivers/net/ena/base/ena_plat_dpdk.h +@@ -26,7 +26,6 @@ + #include + + #include +-#include + + typedef uint64_t u64; + typedef uint32_t u32; +@@ -67,14 +66,6 @@ typedef uint64_t dma_addr_t; + #define ENA_UDELAY(x) rte_delay_us_block(x) + + #define ENA_TOUCH(x) ((void)(x)) +-/* Redefine memcpy with caution: rte_memcpy can be simply aliased to memcpy, so +- * make the redefinition only if it's safe (and beneficial) to do so. +- */ +-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64_MEMCPY) || \ +- defined(RTE_ARCH_ARM_NEON_MEMCPY) +-#undef memcpy +-#define memcpy rte_memcpy +-#endif + #define wmb rte_wmb + #define rmb rte_rmb + #define mb rte_mb diff --git a/dpdk/drivers/net/ena/ena_ethdev.c b/dpdk/drivers/net/ena/ena_ethdev.c index efcb163027..e640bbae3d 100644 --- a/dpdk/drivers/net/ena/ena_ethdev.c @@ -46530,6 +49375,56 @@ index 8b83063f0a..a4e06a0cfa 100644 } static int +diff --git a/dpdk/drivers/net/gve/base/gve_osdep.h b/dpdk/drivers/net/gve/base/gve_osdep.h +index 7cb73002f4..26374bac5e 100644 +--- a/dpdk/drivers/net/gve/base/gve_osdep.h ++++ b/dpdk/drivers/net/gve/base/gve_osdep.h +@@ -24,18 +24,36 @@ + + #include "../gve_logs.h" + +-typedef uint8_t u8; +-typedef uint16_t u16; +-typedef uint32_t u32; +-typedef uint64_t u64; ++#ifndef u8 ++#define u8 uint8_t ++#endif ++#ifndef u16 ++#define u16 uint16_t ++#endif ++#ifndef u32 ++#define u32 uint32_t ++#endif ++#ifndef u64 ++#define u64 uint64_t ++#endif + +-typedef rte_be16_t __sum16; ++#ifndef __sum16 ++#define __sum16 rte_be16_t ++#endif + +-typedef rte_be16_t __be16; +-typedef rte_be32_t __be32; +-typedef rte_be64_t __be64; ++#ifndef __be16 ++#define __be16 rte_be16_t ++#endif ++#ifndef __be32 ++#define __be32 rte_be32_t ++#endif ++#ifndef __be64 ++#define __be64 rte_be64_t ++#endif + +-typedef rte_iova_t dma_addr_t; ++#ifndef dma_addr_t ++#define dma_addr_t rte_iova_t ++#endif + + #define ETH_MIN_MTU RTE_ETHER_MIN_MTU + #define ETH_ALEN RTE_ETHER_ADDR_LEN diff --git a/dpdk/drivers/net/gve/gve_ethdev.c b/dpdk/drivers/net/gve/gve_ethdev.c index 97781f0ed3..0796d37760 100644 --- a/dpdk/drivers/net/gve/gve_ethdev.c @@ -46651,7 +49546,7 @@ index 7aa5e7d8e9..adc9f75c81 100644 } diff --git a/dpdk/drivers/net/hns3/hns3_cmd.c b/dpdk/drivers/net/hns3/hns3_cmd.c -index bdfc85f934..fb515ed0ee 100644 +index bdfc85f934..7bdf7740c1 100644 --- a/dpdk/drivers/net/hns3/hns3_cmd.c +++ b/dpdk/drivers/net/hns3/hns3_cmd.c @@ -507,6 +507,8 @@ hns3_parse_capability(struct hns3_hw *hw, @@ -46663,7 +49558,7 @@ index bdfc85f934..fb515ed0ee 100644 } static uint32_t -@@ -519,6 +521,43 @@ hns3_build_api_caps(void) +@@ -519,6 +521,41 @@ hns3_build_api_caps(void) return rte_cpu_to_le_32(api_caps); } @@ -46685,9 +49580,7 @@ index bdfc85f934..fb515ed0ee 100644 + if (device_id == HNS3_DEV_ID_25GE_RDMA || + device_id == HNS3_DEV_ID_50GE_RDMA || + device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || -+ device_id == HNS3_DEV_ID_200G_RDMA || -+ device_id == HNS3_DEV_ID_100G_ROH || -+ device_id == HNS3_DEV_ID_200G_ROH) ++ device_id == HNS3_DEV_ID_200G_RDMA) + hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); +} + @@ -46707,7 +49600,7 @@ index bdfc85f934..fb515ed0ee 100644 static int hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) { -@@ -536,6 +575,9 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) +@@ -536,6 +573,9 @@ hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw) return ret; hw->fw_version = rte_le_to_cpu_32(resp->firmware); @@ -46717,7 +49610,7 @@ index bdfc85f934..fb515ed0ee 100644 /* * Make sure mask the capability before parse capability because it * may overwrite resp's data. -@@ -659,9 +701,6 @@ hns3_cmd_init(struct hns3_hw *hw) +@@ -659,9 +699,6 @@ hns3_cmd_init(struct hns3_hw *hw) hw->cmq.csq.next_to_use = 0; hw->cmq.crq.next_to_clean = 0; hw->cmq.crq.next_to_use = 0; @@ -47139,7 +50032,7 @@ index ae62bb56c8..bac4427227 100644 } diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.c b/dpdk/drivers/net/hns3/hns3_ethdev.c -index d326f70129..0050d46ae7 100644 +index d326f70129..9b0b3776f4 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.c +++ b/dpdk/drivers/net/hns3/hns3_ethdev.c @@ -15,6 +15,7 @@ @@ -48071,30 +50964,11 @@ index d326f70129..0050d46ae7 100644 sfp_type.type, sfp_type.ext_type); return -EINVAL; } -@@ -6707,6 +6650,8 @@ static const struct rte_pci_id pci_id_hns3_map[] = { - { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, - { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, - { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, -+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_ROH) }, -+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_ROH) }, - { .vendor_id = 0, }, /* sentinel */ - }; - diff --git a/dpdk/drivers/net/hns3/hns3_ethdev.h b/dpdk/drivers/net/hns3/hns3_ethdev.h -index 2457754b3d..1afe4c4ff7 100644 +index 2457754b3d..5445170c8b 100644 --- a/dpdk/drivers/net/hns3/hns3_ethdev.h +++ b/dpdk/drivers/net/hns3/hns3_ethdev.h -@@ -28,7 +28,9 @@ - #define HNS3_DEV_ID_25GE_RDMA 0xA222 - #define HNS3_DEV_ID_50GE_RDMA 0xA224 - #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 -+#define HNS3_DEV_ID_100G_ROH 0xA227 - #define HNS3_DEV_ID_200G_RDMA 0xA228 -+#define HNS3_DEV_ID_200G_ROH 0xA22C - #define HNS3_DEV_ID_100G_VF 0xA22E - #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F - -@@ -483,6 +485,9 @@ struct hns3_queue_intr { +@@ -483,6 +483,9 @@ struct hns3_queue_intr { #define HNS3_PKTS_DROP_STATS_MODE1 0 #define HNS3_PKTS_DROP_STATS_MODE2 1 @@ -48104,7 +50978,7 @@ index 2457754b3d..1afe4c4ff7 100644 struct hns3_hw { struct rte_eth_dev_data *data; void *io_base; -@@ -550,6 +555,11 @@ struct hns3_hw { +@@ -550,6 +553,11 @@ struct hns3_hw { * direction. */ uint8_t min_tx_pkt_len; @@ -48116,7 +50990,7 @@ index 2457754b3d..1afe4c4ff7 100644 struct hns3_queue_intr intr; /* -@@ -871,13 +881,6 @@ struct hns3_adapter { +@@ -871,13 +879,6 @@ struct hns3_adapter { struct hns3_ptype_table ptype_tbl __rte_cache_aligned; }; @@ -48130,7 +51004,7 @@ index 2457754b3d..1afe4c4ff7 100644 enum hns3_dev_cap { HNS3_DEV_SUPPORT_DCB_B, HNS3_DEV_SUPPORT_COPPER_B, -@@ -891,6 +894,7 @@ enum hns3_dev_cap { +@@ -891,6 +892,7 @@ enum hns3_dev_cap { HNS3_DEV_SUPPORT_RAS_IMP_B, HNS3_DEV_SUPPORT_TM_B, HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, @@ -48138,7 +51012,7 @@ index 2457754b3d..1afe4c4ff7 100644 }; #define hns3_dev_get_support(hw, _name) \ -@@ -996,15 +1000,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) +@@ -996,15 +998,6 @@ static inline uint32_t hns3_read_reg(void *base, uint32_t reg) #define hns3_read_dev(a, reg) \ hns3_read_reg((a)->io_base, (reg)) @@ -48154,7 +51028,7 @@ index 2457754b3d..1afe4c4ff7 100644 static inline uint64_t hns3_atomic_test_bit(unsigned int nr, volatile uint64_t *addr) { -@@ -1045,22 +1040,9 @@ void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); +@@ -1045,22 +1038,9 @@ void hns3_update_linkstatus_and_event(struct hns3_hw *hw, bool query); void hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status, uint32_t link_speed, uint8_t link_duplex); void hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported); @@ -48179,7 +51053,7 @@ index 2457754b3d..1afe4c4ff7 100644 static inline bool is_reset_pending(struct hns3_adapter *hns) -@@ -1073,4 +1055,15 @@ is_reset_pending(struct hns3_adapter *hns) +@@ -1073,4 +1053,15 @@ is_reset_pending(struct hns3_adapter *hns) return ret; } @@ -48921,10 +51795,18 @@ index d220522c43..6d7654206b 100644 rte_intr_enable(pci_dev->intr_handle); diff --git a/dpdk/drivers/net/hns3/hns3_fdir.c b/dpdk/drivers/net/hns3/hns3_fdir.c -index 48a91fb517..a3e79619ec 100644 +index 48a91fb517..73d4a25d63 100644 --- a/dpdk/drivers/net/hns3/hns3_fdir.c +++ b/dpdk/drivers/net/hns3/hns3_fdir.c -@@ -974,7 +974,7 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, +@@ -832,6 +832,7 @@ int hns3_fdir_filter_init(struct hns3_adapter *hns) + .key_len = sizeof(struct hns3_fdir_key_conf), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, ++ .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE, + }; + int ret; + +@@ -974,7 +975,7 @@ int hns3_fdir_filter_program(struct hns3_adapter *hns, rule->key_conf.spec.src_port, rule->key_conf.spec.dst_port, ret); else @@ -48934,7 +51816,7 @@ index 48a91fb517..a3e79619ec 100644 return ret; } diff --git a/dpdk/drivers/net/hns3/hns3_flow.c b/dpdk/drivers/net/hns3/hns3_flow.c -index a2c1589c39..da17fa6e69 100644 +index a2c1589c39..9fbdf1dbe9 100644 --- a/dpdk/drivers/net/hns3/hns3_flow.c +++ b/dpdk/drivers/net/hns3/hns3_flow.c @@ -10,6 +10,125 @@ @@ -49141,6 +52023,15 @@ index a2c1589c39..da17fa6e69 100644 } static inline struct hns3_flow_counter * +@@ -158,7 +283,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id, + cnt = hns3_counter_lookup(dev, id); + if (cnt) { + if (!cnt->indirect || cnt->indirect != indirect) +- return rte_flow_error_set(error, ENOTSUP, ++ return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + cnt, + "Counter id is used, indirect flag not match"); @@ -802,7 +927,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, @@ -49150,7 +52041,19 @@ index a2c1589c39..da17fa6e69 100644 if (sctp_mask->hdr.src_port) { hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); rule->key_conf.mask.src_port = -@@ -1246,7 +1371,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) +@@ -1057,6 +1182,11 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + "Tunnel packets must configure " + "with mask"); + ++ if (rule->key_conf.spec.tunnel_type != 0) ++ return rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, "Too many tunnel headers!"); ++ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: +@@ -1246,7 +1376,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct hns3_fdir_rule_ele *fdir_rule_ptr; @@ -49158,7 +52061,7 @@ index a2c1589c39..da17fa6e69 100644 struct hns3_flow_mem *flow_node; fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); -@@ -1256,13 +1380,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) +@@ -1256,13 +1385,6 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); } @@ -49172,7 +52075,7 @@ index a2c1589c39..da17fa6e69 100644 flow_node = TAILQ_FIRST(&hw->flow_list); while (flow_node) { TAILQ_REMOVE(&hw->flow_list, flow_node, entries); -@@ -1273,238 +1390,478 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) +@@ -1273,238 +1395,478 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) } static bool @@ -49209,6 +52112,9 @@ index a2c1589c39..da17fa6e69 100644 - !memcmp(comp->key, with->key, with->key_len); + if (comp->key == NULL && with->key == NULL) + return true; ++ ++ if (!(comp->key != NULL && with->key != NULL)) ++ return false; - return (func_is_same && rss_key_is_same && - comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) && @@ -49216,9 +52122,6 @@ index a2c1589c39..da17fa6e69 100644 - comp->queue_num == with->queue_num && - !memcmp(comp->queue, with->queue, - sizeof(*with->queue) * with->queue_num)); -+ if (!(comp->key != NULL && with->key != NULL)) -+ return false; -+ + return !memcmp(comp->key, with->key, with->key_len); } @@ -49259,11 +52162,9 @@ index a2c1589c39..da17fa6e69 100644 + return false; + + return !memcmp(comp->queue, with->queue, with->queue_num); - } - - static bool --hns3_rss_input_tuple_supported(struct hns3_hw *hw, -- const struct rte_flow_action_rss *rss) ++} ++ ++static bool +hns3_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ @@ -49278,9 +52179,11 @@ index a2c1589c39..da17fa6e69 100644 + return same_level && same_types && same_func && + hns3_flow_rule_key_same(comp, with) && + hns3_flow_rule_queues_same(comp, with); -+} -+ -+static bool + } + + static bool +-hns3_rss_input_tuple_supported(struct hns3_hw *hw, +- const struct rte_flow_action_rss *rss) +hns3_valid_ipv6_sctp_rss_types(struct hns3_hw *hw, uint64_t types) { /* @@ -49397,9 +52300,8 @@ index a2c1589c39..da17fa6e69 100644 + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, + "queue id must be less than queue number allocated to a TC"); - } - -- if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types) ++ } ++ + memcpy(rss_conf->queue, rss_act->queue, + rss_act->queue_num * sizeof(rss_conf->queue[0])); + rss_conf->conf.queue = rss_conf->queue; @@ -49421,8 +52323,9 @@ index a2c1589c39..da17fa6e69 100644 + /* Disable RSS hash of this packet type if types is zero. */ + rss_conf->hw_pctypes |= map->hw_pctype; + return 0; -+ } -+ + } + +- if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types) + /* + * Can not have extra types except rss_pctype and l3l4_type in this map. + */ @@ -49697,14 +52600,14 @@ index a2c1589c39..da17fa6e69 100644 + ret = hns3_flow_parse_hash_key(hw, rss_act, rss_conf, error); + if (ret != 0) + return ret; - } -- *func = algo_func; ++ } + + if (rss_act->queue_num > 0) { + ret = hns3_flow_parse_queues(hw, rss_act, rss_conf, error); + if (ret != 0) + return ret; -+ } + } +- *func = algo_func; + + ret = hns3_flow_parse_pattern_type(pattern, &rss_conf->pattern_type, + error); @@ -49815,7 +52718,7 @@ index a2c1589c39..da17fa6e69 100644 } static int -@@ -1516,8 +1873,6 @@ hns3_update_indir_table(struct hns3_hw *hw, +@@ -1516,8 +1878,6 @@ hns3_update_indir_table(struct hns3_hw *hw, uint32_t i; /* Fill in redirection table */ @@ -49824,7 +52727,7 @@ index a2c1589c39..da17fa6e69 100644 for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) { j %= num; if (conf->queue[j] >= hw->alloc_rss_size) { -@@ -1532,82 +1887,106 @@ hns3_update_indir_table(struct hns3_hw *hw, +@@ -1532,82 +1892,106 @@ hns3_update_indir_table(struct hns3_hw *hw, return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size); } @@ -49992,7 +52895,7 @@ index a2c1589c39..da17fa6e69 100644 } static int -@@ -1616,51 +1995,44 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev) +@@ -1616,51 +2000,44 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev) struct hns3_adapter *hns = dev->data->dev_private; struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_hw *hw = &hns->hw; @@ -50063,7 +52966,7 @@ index a2c1589c39..da17fa6e69 100644 pthread_mutex_unlock(&hw->flows_lock); return ret; -@@ -1679,23 +2051,6 @@ hns3_restore_filter(struct hns3_adapter *hns) +@@ -1679,23 +2056,6 @@ hns3_restore_filter(struct hns3_adapter *hns) return hns3_restore_rss_filter(hw); } @@ -50087,7 +52990,7 @@ index a2c1589c39..da17fa6e69 100644 static int hns3_flow_args_check(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], -@@ -1729,31 +2084,55 @@ static int +@@ -1729,31 +2089,55 @@ static int hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], @@ -50120,9 +53023,7 @@ index a2c1589c39..da17fa6e69 100644 +{ + struct hns3_hw *hw = &hns->hw; + int ret; - -- memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); -- return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); ++ + ret = hns3_config_rss(hns); + if (ret != 0) { + hns3_err(hw, "restore original RSS configuration failed, ret = %d.", @@ -50132,7 +53033,9 @@ index a2c1589c39..da17fa6e69 100644 + ret = hns3_reconfig_all_rss_filter(hw); + if (ret != 0) + hns3_err(hw, "rebuild all RSS filter failed, ret = %d.", ret); -+ + +- memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); +- return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); + return ret; } @@ -50152,7 +53055,7 @@ index a2c1589c39..da17fa6e69 100644 int ret; rss_filter_ptr = rte_zmalloc("hns3 rss filter", -@@ -1763,28 +2142,29 @@ hns3_flow_create_rss_rule(struct rte_eth_dev *dev, +@@ -1763,28 +2147,29 @@ hns3_flow_create_rss_rule(struct rte_eth_dev *dev, return -ENOMEM; } @@ -50197,7 +53100,7 @@ index a2c1589c39..da17fa6e69 100644 TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; flow->filter_type = RTE_ETH_FILTER_HASH; -@@ -1794,31 +2174,24 @@ hns3_flow_create_rss_rule(struct rte_eth_dev *dev, +@@ -1794,31 +2179,24 @@ hns3_flow_create_rss_rule(struct rte_eth_dev *dev, static int hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, @@ -50234,7 +53137,7 @@ index a2c1589c39..da17fa6e69 100644 } fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", -@@ -1834,11 +2207,11 @@ hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, +@@ -1834,11 +2212,11 @@ hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, * rules to the hardware to simplify the rollback of rules in the * hardware. */ @@ -50248,7 +53151,7 @@ index a2c1589c39..da17fa6e69 100644 sizeof(struct hns3_fdir_rule)); TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); flow->rule = fdir_rule_ptr; -@@ -1849,8 +2222,8 @@ hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, +@@ -1849,8 +2227,8 @@ hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, err_fdir_filter: rte_free(fdir_rule_ptr); err_malloc: @@ -50259,7 +53162,7 @@ index a2c1589c39..da17fa6e69 100644 return ret; } -@@ -1868,13 +2241,15 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -1868,13 +2246,15 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) { struct hns3_adapter *hns = dev->data->dev_private; @@ -50278,7 +53181,7 @@ index a2c1589c39..da17fa6e69 100644 if (ret) return NULL; -@@ -1894,13 +2269,12 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -1894,13 +2274,12 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } flow_node->flow = flow; @@ -50296,7 +53199,7 @@ index a2c1589c39..da17fa6e69 100644 error, flow); if (ret == 0) return flow; -@@ -1954,16 +2328,10 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, +@@ -1954,16 +2333,10 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, break; case RTE_ETH_FILTER_HASH: rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; @@ -50314,7 +53217,7 @@ index a2c1589c39..da17fa6e69 100644 break; default: return rte_flow_error_set(error, EINVAL, -@@ -2069,10 +2437,12 @@ hns3_flow_validate_wrap(struct rte_eth_dev *dev, +@@ -2069,10 +2442,12 @@ hns3_flow_validate_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -50378,10 +53281,23 @@ index e4b2fdf2e6..1b49673f11 100644 TAILQ_HEAD(hns3_flow_mem_list, hns3_flow_mem); diff --git a/dpdk/drivers/net/hns3/hns3_intr.c b/dpdk/drivers/net/hns3/hns3_intr.c -index 44a1119415..916bf30dcb 100644 +index 44a1119415..d37c7eba6b 100644 --- a/dpdk/drivers/net/hns3/hns3_intr.c +++ b/dpdk/drivers/net/hns3/hns3_intr.c -@@ -2434,8 +2434,8 @@ hns3_schedule_reset(struct hns3_adapter *hns) +@@ -2252,6 +2252,12 @@ hns3_handle_module_error_data(struct hns3_hw *hw, uint32_t *buf, + sum_err_info = (struct hns3_sum_err_info *)&buf[offset++]; + mod_num = sum_err_info->mod_num; + reset_type = sum_err_info->reset_type; ++ ++ if (reset_type >= HNS3_MAX_RESET) { ++ hns3_err(hw, "invalid reset type = %u", reset_type); ++ return; ++ } ++ + if (reset_type && reset_type != HNS3_NONE_RESET) + hns3_atomic_set_bit(reset_type, &hw->reset.request); + +@@ -2434,8 +2440,8 @@ hns3_schedule_reset(struct hns3_adapter *hns) if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) == SCHEDULE_DEFERRED) rte_eal_alarm_cancel(hw->reset.ops->reset_service, hns); @@ -50392,7 +53308,7 @@ index 44a1119415..916bf30dcb 100644 __ATOMIC_RELAXED); rte_eal_alarm_set(SWITCH_CONTEXT_US, hw->reset.ops->reset_service, hns); -@@ -2749,6 +2749,7 @@ hns3_reset_post(struct hns3_adapter *hns) +@@ -2749,6 +2755,7 @@ hns3_reset_post(struct hns3_adapter *hns) /* IMP will wait ready flag before reset */ hns3_notify_reset_ready(hw, false); hns3_clear_reset_level(hw, &hw->reset.pending); @@ -50400,7 +53316,7 @@ index 44a1119415..916bf30dcb 100644 __atomic_store_n(&hns->hw.reset.resetting, 0, __ATOMIC_RELAXED); hw->reset.attempts = 0; hw->reset.stats.success_cnt++; -@@ -2798,6 +2799,7 @@ hns3_reset_fail_handle(struct hns3_adapter *hns) +@@ -2798,6 +2805,7 @@ hns3_reset_fail_handle(struct hns3_adapter *hns) struct timeval tv; hns3_clear_reset_level(hw, &hw->reset.pending); @@ -51181,10 +54097,54 @@ index 0000000000..2b8717fa3c + +#endif /* HNS3_PTP_H */ diff --git a/dpdk/drivers/net/hns3/hns3_regs.c b/dpdk/drivers/net/hns3/hns3_regs.c -index 33392fd1f0..5d6f92e4bb 100644 +index 33392fd1f0..e92e43959f 100644 --- a/dpdk/drivers/net/hns3/hns3_regs.c +++ b/dpdk/drivers/net/hns3/hns3_regs.c -@@ -294,8 +294,9 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data) +@@ -17,13 +17,9 @@ + + static int hns3_get_dfx_reg_line(struct hns3_hw *hw, uint32_t *lines); + +-static const uint32_t cmdq_reg_addrs[] = {HNS3_CMDQ_TX_ADDR_L_REG, +- HNS3_CMDQ_TX_ADDR_H_REG, +- HNS3_CMDQ_TX_DEPTH_REG, ++static const uint32_t cmdq_reg_addrs[] = {HNS3_CMDQ_TX_DEPTH_REG, + HNS3_CMDQ_TX_TAIL_REG, + HNS3_CMDQ_TX_HEAD_REG, +- HNS3_CMDQ_RX_ADDR_L_REG, +- HNS3_CMDQ_RX_ADDR_H_REG, + HNS3_CMDQ_RX_DEPTH_REG, + HNS3_CMDQ_RX_TAIL_REG, + HNS3_CMDQ_RX_HEAD_REG, +@@ -44,9 +40,7 @@ static const uint32_t common_vf_reg_addrs[] = {HNS3_MISC_VECTOR_REG_BASE, + HNS3_FUN_RST_ING, + HNS3_GRO_EN_REG}; + +-static const uint32_t ring_reg_addrs[] = {HNS3_RING_RX_BASEADDR_L_REG, +- HNS3_RING_RX_BASEADDR_H_REG, +- HNS3_RING_RX_BD_NUM_REG, ++static const uint32_t ring_reg_addrs[] = {HNS3_RING_RX_BD_NUM_REG, + HNS3_RING_RX_BD_LEN_REG, + HNS3_RING_RX_EN_REG, + HNS3_RING_RX_MERGE_EN_REG, +@@ -57,8 +51,6 @@ static const uint32_t ring_reg_addrs[] = {HNS3_RING_RX_BASEADDR_L_REG, + HNS3_RING_RX_FBD_OFFSET_REG, + HNS3_RING_RX_STASH_REG, + HNS3_RING_RX_BD_ERR_REG, +- HNS3_RING_TX_BASEADDR_L_REG, +- HNS3_RING_TX_BASEADDR_H_REG, + HNS3_RING_TX_BD_NUM_REG, + HNS3_RING_TX_EN_REG, + HNS3_RING_TX_PRIORITY_REG, +@@ -135,7 +127,7 @@ hns3_get_regs_length(struct hns3_hw *hw, uint32_t *length) + tqp_intr_lines = sizeof(tqp_intr_reg_addrs) / REG_LEN_PER_LINE + 1; + + len = (cmdq_lines + common_lines + ring_lines * hw->tqps_num + +- tqp_intr_lines * hw->num_msi) * REG_NUM_PER_LINE; ++ tqp_intr_lines * hw->intr_tqps_num) * REG_NUM_PER_LINE; + + if (!hns->is_vf) { + ret = hns3_get_regs_num(hw, ®s_num_32_bit, ®s_num_64_bit); +@@ -294,8 +286,9 @@ hns3_direct_access_regs(struct hns3_hw *hw, uint32_t *data) struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); uint32_t *origin_data_ptr = data; uint32_t reg_offset; @@ -52475,7 +55435,7 @@ index 8e8b056f4e..0755760b45 100644 #endif /* HNS3_RSS_H */ diff --git a/dpdk/drivers/net/hns3/hns3_rxtx.c b/dpdk/drivers/net/hns3/hns3_rxtx.c -index f1163ce8a9..16cb174f4d 100644 +index f1163ce8a9..bdc162bc5c 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx.c +++ b/dpdk/drivers/net/hns3/hns3_rxtx.c @@ -50,6 +50,8 @@ hns3_rx_queue_release_mbufs(struct hns3_rx_queue *rxq) @@ -52483,7 +55443,7 @@ index f1163ce8a9..16cb174f4d 100644 } } + for (i = 0; i < rxq->rx_rearm_nb; i++) -+ rxq->sw_ring[rxq->rx_rearm_start + i].mbuf = NULL; ++ rxq->sw_ring[(rxq->rx_rearm_start + i) % rxq->nb_rx_desc].mbuf = NULL; } for (i = 0; i < rxq->bulk_mbuf_num; i++) @@ -53074,6 +56034,22 @@ index ea1a805491..fa39f6481a 100644 +void hns3_start_rxtx_datapath(struct rte_eth_dev *dev); #endif /* HNS3_RXTX_H */ +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec.c b/dpdk/drivers/net/hns3/hns3_rxtx_vec.c +index cd9264d91b..b6aee9af67 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx_vec.c ++++ b/dpdk/drivers/net/hns3/hns3_rxtx_vec.c +@@ -66,6 +66,11 @@ hns3_rxq_rearm_mbuf(struct hns3_rx_queue *rxq) + + if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, + HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) { ++ /* ++ * Clear VLD bit for the first descriptor rearmed in case ++ * of going to receive packets later. ++ */ ++ rxdp[0].rx.bd_base_info = 0; + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + return; + } diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h b/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h index 55d9bf817d..1048b9db87 100644 --- a/dpdk/drivers/net/hns3/hns3_rxtx_vec_neon.h @@ -53218,6 +56194,22 @@ index 55d9bf817d..1048b9db87 100644 rte_prefetch_non_temporal(rxdp + HNS3_DEFAULT_DESCS_PER_LOOP); parse_retcode = hns3_desc_parse_field(rxq, &sw_ring[pos], +diff --git a/dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c b/dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c +index 6f23ba674d..2ca6a70fed 100644 +--- a/dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c ++++ b/dpdk/drivers/net/hns3/hns3_rxtx_vec_sve.c +@@ -248,6 +248,11 @@ hns3_rxq_rearm_mbuf_sve(struct hns3_rx_queue *rxq) + + if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, + HNS3_DEFAULT_RXQ_REARM_THRESH) < 0)) { ++ /* ++ * Clear VLD bit for the first descriptor rearmed in case ++ * of going to receive packets later. ++ */ ++ rxdp[0].rx.bd_base_info = 0; + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + return; + } diff --git a/dpdk/drivers/net/hns3/hns3_stats.c b/dpdk/drivers/net/hns3/hns3_stats.c index bad65fcbed..9a1e8935e5 100644 --- a/dpdk/drivers/net/hns3/hns3_stats.c @@ -53490,6 +56482,232 @@ index e1089b6bd0..d969164014 100644 .hierarchy_commit = hns3_tm_hierarchy_commit_wrap, .node_shaper_update = hns3_tm_node_shaper_update_wrap, }; +diff --git a/dpdk/drivers/net/i40e/base/i40e_adminq.c b/dpdk/drivers/net/i40e/base/i40e_adminq.c +index 27c82d9b44..cd3b0f2e45 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_adminq.c ++++ b/dpdk/drivers/net/i40e/base/i40e_adminq.c +@@ -791,12 +791,26 @@ u16 i40e_clean_asq(struct i40e_hw *hw) + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; ++ u32 head = 0; ++ ++ if (ntc >= (1 << 10)) ++ goto clean_asq_exit; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); +- while (rd32(hw, hw->aq.asq.head) != ntc) { ++ while (true) { ++ head = rd32(hw, hw->aq.asq.head); ++ ++ if (head >= asq->count) { ++ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "Read head value is improper\n"); ++ return 0; ++ } ++ ++ if (head == ntc) ++ break; ++ + i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, +- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); ++ "ntc %d head %d.\n", ntc, head); + + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = +@@ -816,6 +830,7 @@ u16 i40e_clean_asq(struct i40e_hw *hw) + + asq->next_to_clean = ntc; + ++clean_asq_exit: + return I40E_DESC_UNUSED(asq); + } + +diff --git a/dpdk/drivers/net/i40e/base/i40e_common.c b/dpdk/drivers/net/i40e/base/i40e_common.c +index 9eee104063..e5651ad80b 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_common.c ++++ b/dpdk/drivers/net/i40e/base/i40e_common.c +@@ -1017,9 +1017,6 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw) + else + hw->pf_id = (u8)(func_rid & 0x7); + +- if (hw->mac.type == I40E_MAC_X722) +- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | +- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + /* NVMUpdate features structure initialization */ + hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR; + hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR; +@@ -1588,7 +1585,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) + **/ + u32 i40e_led_get(struct i40e_hw *hw) + { +- u32 current_mode = 0; + u32 mode = 0; + int i; + +@@ -1601,21 +1597,6 @@ u32 i40e_led_get(struct i40e_hw *hw) + if (!gpio_val) + continue; + +- /* ignore gpio LED src mode entries related to the activity +- * LEDs +- */ +- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) +- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); +- switch (current_mode) { +- case I40E_COMBINED_ACTIVITY: +- case I40E_FILTER_ACTIVITY: +- case I40E_MAC_ACTIVITY: +- case I40E_LINK_ACTIVITY: +- continue; +- default: +- break; +- } +- + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; + break; +@@ -1635,7 +1616,6 @@ u32 i40e_led_get(struct i40e_hw *hw) + **/ + void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) + { +- u32 current_mode = 0; + int i; + + if (mode & ~I40E_LED_MODE_VALID) { +@@ -1652,21 +1632,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) + if (!gpio_val) + continue; + +- /* ignore gpio LED src mode entries related to the activity +- * LEDs +- */ +- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) +- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); +- switch (current_mode) { +- case I40E_COMBINED_ACTIVITY: +- case I40E_FILTER_ACTIVITY: +- case I40E_MAC_ACTIVITY: +- case I40E_LINK_ACTIVITY: +- continue; +- default: +- break; +- } +- + if (I40E_IS_X710TL_DEVICE(hw->device_id)) { + u32 pin_func = 0; + +@@ -4261,8 +4226,8 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, + /* use AQ read to get the physical register offset instead + * of the port relative offset + */ +- i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); +- if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) ++ status = i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); ++ if ((status == I40E_SUCCESS) && (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))) + hw->num_ports++; + } + +@@ -8198,7 +8163,8 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, + u32 sec_off; + u32 i; + +- if (track_id == I40E_DDP_TRACKID_INVALID) { ++ if (track_id == I40E_DDP_TRACKID_INVALID || ++ track_id == I40E_DDP_TRACKID_RDONLY) { + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); + return I40E_NOT_SUPPORTED; + } +diff --git a/dpdk/drivers/net/i40e/base/i40e_diag.c b/dpdk/drivers/net/i40e/base/i40e_diag.c +index b3c4cfd3aa..4ca102cdd5 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_diag.c ++++ b/dpdk/drivers/net/i40e/base/i40e_diag.c +@@ -55,7 +55,7 @@ static enum i40e_status_code i40e_diag_reg_pattern_test(struct i40e_hw *hw, + return I40E_SUCCESS; + } + +-static struct i40e_diag_reg_test_info i40e_reg_list[] = { ++static const struct i40e_diag_reg_test_info i40e_reg_list[] = { + /* offset mask elements stride */ + {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, + {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, +@@ -81,28 +81,28 @@ enum i40e_status_code i40e_diag_reg_test(struct i40e_hw *hw) + { + enum i40e_status_code ret_code = I40E_SUCCESS; + u32 reg, mask; ++ u32 elements; + u32 i, j; + + for (i = 0; i40e_reg_list[i].offset != 0 && + ret_code == I40E_SUCCESS; i++) { + ++ elements = i40e_reg_list[i].elements; + /* set actual reg range for dynamically allocated resources */ + if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && + hw->func_caps.num_tx_qp != 0) +- i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; ++ elements = hw->func_caps.num_tx_qp; + if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || + i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || + i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || + i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && + hw->func_caps.num_msix_vectors != 0) +- i40e_reg_list[i].elements = +- hw->func_caps.num_msix_vectors - 1; ++ elements = hw->func_caps.num_msix_vectors - 1; + + /* test register access */ + mask = i40e_reg_list[i].mask; +- for (j = 0; j < i40e_reg_list[i].elements && +- ret_code == I40E_SUCCESS; j++) { ++ for (j = 0; j < elements && ret_code == I40E_SUCCESS; j++) { + reg = i40e_reg_list[i].offset + + (j * i40e_reg_list[i].stride); + ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); +diff --git a/dpdk/drivers/net/i40e/base/i40e_nvm.c b/dpdk/drivers/net/i40e/base/i40e_nvm.c +index f385042601..05816a4b79 100644 +--- a/dpdk/drivers/net/i40e/base/i40e_nvm.c ++++ b/dpdk/drivers/net/i40e/base/i40e_nvm.c +@@ -223,11 +223,11 @@ read_nvm_exit: + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start +- * @words: number of words to write +- * @data: buffer with words to write to the Shadow RAM ++ * @words: number of words to read ++ * @data: buffer with words to read from the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * +- * Writes a 16 bit words buffer to the Shadow RAM using the admin command. ++ * Reads a 16 bit words buffer to the Shadow RAM using the admin command. + **/ + STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, + u8 module_pointer, u32 offset, +@@ -249,18 +249,18 @@ STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, +- "NVM write error: offset %d beyond Shadow RAM limit %d\n", ++ "NVM read error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) +- /* We can write only up to 4KB (one sector), in one AQ write */ ++ /* We can read only up to 4KB (one sector), in one AQ read */ + i40e_debug(hw, I40E_DEBUG_NVM, +- "NVM write fail error: tried to write %d words, limit is %d.\n", ++ "NVM read fail error: tried to read %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) +- /* A single write cannot spread over two sectors */ ++ /* A single read cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, +- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", ++ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_read_nvm(hw, module_pointer, diff --git a/dpdk/drivers/net/i40e/i40e_ethdev.c b/dpdk/drivers/net/i40e/i40e_ethdev.c index 7726a89d99..2049c32c4e 100644 --- a/dpdk/drivers/net/i40e/i40e_ethdev.c @@ -53681,7 +56899,7 @@ index fe943a45ff..9b806d130e 100644 if (is_pf) interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; diff --git a/dpdk/drivers/net/i40e/i40e_flow.c b/dpdk/drivers/net/i40e/i40e_flow.c -index 65a826d51c..882152bd4a 100644 +index 65a826d51c..be82fac8f2 100644 --- a/dpdk/drivers/net/i40e/i40e_flow.c +++ b/dpdk/drivers/net/i40e/i40e_flow.c @@ -1236,6 +1236,14 @@ i40e_flow_parse_attr(const struct rte_flow_attr *attr, @@ -53699,16 +56917,162 @@ index 65a826d51c..882152bd4a 100644 /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, -@@ -1700,8 +1708,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, +@@ -1255,27 +1263,31 @@ i40e_flow_parse_attr(const struct rte_flow_attr *attr, + return 0; + } + +-static uint16_t +-i40e_get_outer_vlan(struct rte_eth_dev *dev) ++static int ++i40e_get_outer_vlan(struct rte_eth_dev *dev, uint16_t *tpid) + { + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int qinq = dev->data->dev_conf.rxmode.offloads & + RTE_ETH_RX_OFFLOAD_VLAN_EXTEND; + uint64_t reg_r = 0; + uint16_t reg_id; +- uint16_t tpid; ++ int ret; + + if (qinq) + reg_id = 2; + else + reg_id = 3; + +- i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), ++ ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ®_r, NULL); ++ if (ret != I40E_SUCCESS) { ++ PMD_DRV_LOG(ERR, "Failed to read from L2 tag ctrl register [%d]", reg_id); ++ return -EIO; ++ } + +- tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF; ++ *tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF; + +- return tpid; ++ return 0; + } + + /* 1. Last in item should be NULL as range is not supported. +@@ -1295,6 +1307,8 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, + const struct rte_flow_item_eth *eth_spec; + const struct rte_flow_item_eth *eth_mask; + enum rte_flow_item_type item_type; ++ int ret; ++ uint16_t tpid; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { +@@ -1353,8 +1367,23 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, + + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6 || +- filter->ether_type == RTE_ETHER_TYPE_LLDP || +- filter->ether_type == i40e_get_outer_vlan(dev)) { ++ filter->ether_type == RTE_ETHER_TYPE_LLDP) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Unsupported ether_type in control packet filter."); ++ return -rte_errno; ++ } ++ ++ ret = i40e_get_outer_vlan(dev, &tpid); ++ if (ret != 0) { ++ rte_flow_error_set(error, EIO, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Can not get the Ethertype identifying the L2 tag"); ++ return -rte_errno; ++ } ++ if (filter->ether_type == tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, +@@ -1362,6 +1391,7 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, + " control packet filter."); + return -rte_errno; + } ++ + break; + default: + break; +@@ -1633,6 +1663,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + bool outer_ip = true; + uint8_t field_idx; + int ret; ++ uint16_t tpid; + + memset(off_arr, 0, sizeof(off_arr)); + memset(len_arr, 0, sizeof(len_arr)); +@@ -1700,16 +1731,30 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, ether_type = rte_be_to_cpu_16(eth_spec->type); - if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || - ether_type == RTE_ETHER_TYPE_IPV4 || +- ether_type == RTE_ETHER_TYPE_IPV6 || +- ether_type == i40e_get_outer_vlan(dev)) { + if (ether_type == RTE_ETHER_TYPE_IPV4 || - ether_type == RTE_ETHER_TYPE_IPV6 || - ether_type == i40e_get_outer_vlan(dev)) { ++ ether_type == RTE_ETHER_TYPE_IPV6) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Unsupported ether_type."); ++ return -rte_errno; ++ } ++ ret = i40e_get_outer_vlan(dev, &tpid); ++ if (ret != 0) { ++ rte_flow_error_set(error, EIO, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Can not get the Ethertype identifying the L2 tag"); ++ return -rte_errno; ++ } ++ if (ether_type == tpid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported ether_type."); + return -rte_errno; + } ++ + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + eth_spec->type; +@@ -1756,14 +1801,29 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, + rte_be_to_cpu_16(vlan_spec->inner_type); + + if (ether_type == RTE_ETHER_TYPE_IPV4 || +- ether_type == RTE_ETHER_TYPE_IPV6 || +- ether_type == i40e_get_outer_vlan(dev)) { ++ ether_type == RTE_ETHER_TYPE_IPV6) { rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported inner_type."); + return -rte_errno; + } ++ ret = i40e_get_outer_vlan(dev, &tpid); ++ if (ret != 0) { ++ rte_flow_error_set(error, EIO, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Can not get the Ethertype identifying the L2 tag"); ++ return -rte_errno; ++ } ++ if (ether_type == tpid) { ++ rte_flow_error_set(error, EINVAL, ++ RTE_FLOW_ERROR_TYPE_ITEM, ++ item, ++ "Unsupported ether_type."); ++ return -rte_errno; ++ } ++ + input_set |= I40E_INSET_LAST_ETHER_TYPE; + filter->input.flow.l2_flow.ether_type = + vlan_spec->inner_type; diff --git a/dpdk/drivers/net/i40e/i40e_rxtx.c b/dpdk/drivers/net/i40e/i40e_rxtx.c index 788ffb51c2..6522f2b810 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx.c @@ -53889,7 +57253,7 @@ index 761edb9d20..60baff7970 100644 if (split_packet) { int j; diff --git a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c -index 60c97d5331..74ff54c653 100644 +index 60c97d5331..3490299a6f 100644 --- a/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c +++ b/dpdk/drivers/net/i40e/i40e_rxtx_vec_avx512.c @@ -906,16 +906,13 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) @@ -53913,7 +57277,28 @@ index 60c97d5331..74ff54c653 100644 /* The cache follows the following algorithm * 1. Add the objects to the cache * 2. Anything greater than the cache min value (if it -@@ -947,7 +944,6 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) +@@ -925,6 +922,7 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { ++#ifdef RTE_ARCH_64 + const __m512i a = _mm512_load_si512(&txep[copied]); + const __m512i b = _mm512_load_si512(&txep[copied + 8]); + const __m512i c = _mm512_load_si512(&txep[copied + 16]); +@@ -934,6 +932,12 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); ++#else ++ const __m512i a = _mm512_load_si512(&txep[copied]); ++ const __m512i b = _mm512_load_si512(&txep[copied + 16]); ++ _mm512_storeu_si512(&cache_objs[copied], a); ++ _mm512_storeu_si512(&cache_objs[copied + 16], b); ++#endif + copied += 32; + } + cache->len += n; +@@ -947,7 +951,6 @@ i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq) goto done; } @@ -53972,7 +57357,7 @@ index 1edebab8dc..aa18650ffa 100644 struct iavf_parser_list dist_parser_list; struct iavf_parser_list ipsec_crypto_parser_list; diff --git a/dpdk/drivers/net/iavf/iavf_ethdev.c b/dpdk/drivers/net/iavf/iavf_ethdev.c -index 3196210f2c..b6c3cd425d 100644 +index 3196210f2c..c117896547 100644 --- a/dpdk/drivers/net/iavf/iavf_ethdev.c +++ b/dpdk/drivers/net/iavf/iavf_ethdev.c @@ -131,6 +131,8 @@ static int iavf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, @@ -54005,7 +57390,48 @@ index 3196210f2c..b6c3cd425d 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (iavf_init_rss(ad) != 0) { -@@ -1065,6 +1066,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1011,7 +1012,7 @@ iavf_dev_start(struct rte_eth_dev *dev) + if (iavf_configure_queues(adapter, + IAVF_CFG_Q_NUM_PER_BUF, index) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); +- goto err_queue; ++ goto error; + } + num_queue_pairs -= IAVF_CFG_Q_NUM_PER_BUF; + index += IAVF_CFG_Q_NUM_PER_BUF; +@@ -1019,12 +1020,12 @@ iavf_dev_start(struct rte_eth_dev *dev) + + if (iavf_configure_queues(adapter, num_queue_pairs, index) != 0) { + PMD_DRV_LOG(ERR, "configure queues failed"); +- goto err_queue; ++ goto error; + } + + if (iavf_config_rx_queues_irqs(dev, intr_handle) != 0) { + PMD_DRV_LOG(ERR, "configure irq failed"); +- goto err_queue; ++ goto error; + } + /* re-enable intr again, because efd assign may change */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { +@@ -1044,14 +1045,12 @@ iavf_dev_start(struct rte_eth_dev *dev) + + if (iavf_start_queues(dev) != 0) { + PMD_DRV_LOG(ERR, "enable queues failed"); +- goto err_mac; ++ goto error; + } + + return 0; + +-err_mac: +- iavf_add_del_all_mac_addr(adapter, false); +-err_queue: ++error: + return -1; + } + +@@ -1065,6 +1064,9 @@ iavf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -54015,7 +57441,7 @@ index 3196210f2c..b6c3cd425d 100644 if (adapter->closed) return -1; -@@ -1075,8 +1079,6 @@ iavf_dev_stop(struct rte_eth_dev *dev) +@@ -1075,19 +1077,12 @@ iavf_dev_stop(struct rte_eth_dev *dev) if (adapter->stopped == 1) return 0; @@ -54024,16 +57450,19 @@ index 3196210f2c..b6c3cd425d 100644 /* Disable the interrupt for Rx */ rte_intr_efd_disable(intr_handle); /* Rx interrupt vector mapping free */ -@@ -1089,6 +1091,8 @@ iavf_dev_stop(struct rte_eth_dev *dev) - iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, - false); + rte_intr_vec_list_free(intr_handle); +- /* remove all mac addrs */ +- iavf_add_del_all_mac_addr(adapter, false); +- +- /* remove all multicast addresses */ +- iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num, +- false); + iavf_stop_queues(dev); -+ + adapter->stopped = 1; dev->data->dev_started = 0; - -@@ -1136,7 +1140,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1136,7 +1131,6 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | @@ -54041,7 +57470,7 @@ index 3196210f2c..b6c3cd425d 100644 RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | -@@ -1145,6 +1148,10 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1145,6 +1139,10 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) RTE_ETH_TX_OFFLOAD_MULTI_SEGS | RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; @@ -54052,7 +57481,7 @@ index 3196210f2c..b6c3cd425d 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC) dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; -@@ -1178,6 +1185,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -1178,6 +1176,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .nb_max = IAVF_MAX_RING_DESC, .nb_min = IAVF_MIN_RING_DESC, .nb_align = IAVF_ALIGN_RING_DESC, @@ -54061,7 +57490,7 @@ index 3196210f2c..b6c3cd425d 100644 }; dev_info->err_handle_mode = RTE_ETH_ERROR_HANDLE_MODE_PASSIVE; -@@ -1350,6 +1359,7 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -1350,6 +1350,7 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); @@ -54069,7 +57498,7 @@ index 3196210f2c..b6c3cd425d 100644 int err; if (adapter->closed) -@@ -1368,6 +1378,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -1368,6 +1369,23 @@ iavf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) err = iavf_add_del_vlan(adapter, vlan_id, on); if (err) return -EIO; @@ -54093,7 +57522,7 @@ index 3196210f2c..b6c3cd425d 100644 return 0; } -@@ -2607,6 +2634,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2607,6 +2625,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) adapter->dev_data = eth_dev->data; adapter->stopped = 1; @@ -54103,7 +57532,7 @@ index 3196210f2c..b6c3cd425d 100644 if (iavf_init_vf(eth_dev) != 0) { PMD_INIT_LOG(ERR, "Init vf failed"); return -1; -@@ -2634,8 +2664,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2634,8 +2655,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); @@ -54112,7 +57541,7 @@ index 3196210f2c..b6c3cd425d 100644 if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* register callback func to eal lib */ -@@ -2667,18 +2695,19 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2667,18 +2686,19 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) ret = iavf_security_ctx_create(adapter); if (ret) { PMD_INIT_LOG(ERR, "failed to create ipsec crypto security instance"); @@ -54134,7 +57563,7 @@ index 3196210f2c..b6c3cd425d 100644 /* Start device watchdog */ iavf_dev_watchdog_enable(adapter); -@@ -2686,7 +2715,23 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) +@@ -2686,7 +2706,23 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) return 0; @@ -54158,7 +57587,7 @@ index 3196210f2c..b6c3cd425d 100644 rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; -@@ -2732,6 +2777,18 @@ iavf_dev_close(struct rte_eth_dev *dev) +@@ -2732,6 +2768,18 @@ iavf_dev_close(struct rte_eth_dev *dev) if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled) iavf_config_promisc(adapter, false, false); @@ -54235,7 +57664,7 @@ index 868921cac5..8da41bb68e 100644 return 0; diff --git a/dpdk/drivers/net/iavf/iavf_rxtx.c b/dpdk/drivers/net/iavf/iavf_rxtx.c -index cf87a6beda..6a0cf31a4c 100644 +index cf87a6beda..073f724231 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx.c +++ b/dpdk/drivers/net/iavf/iavf_rxtx.c @@ -654,6 +654,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -54348,14 +57777,18 @@ index cf87a6beda..6a0cf31a4c 100644 /* MSS outside the range are considered malicious */ rte_errno = EINVAL; return i; -@@ -2985,11 +3005,8 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -2985,11 +3005,12 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } - /* check the data_len in mbuf */ - if (m->data_len < IAVF_TX_MIN_PKT_LEN || - m->data_len > max_frame_size) { -+ if (m->pkt_len < IAVF_TX_MIN_PKT_LEN) { ++ /* valid packets are greater than min size, and single-buffer pkts ++ * must have data_len == pkt_len ++ */ ++ if (m->pkt_len < IAVF_TX_MIN_PKT_LEN || ++ (m->nb_segs == 1 && m->data_len != m->pkt_len)) { rte_errno = EINVAL; - PMD_DRV_LOG(ERR, "INVALID mbuf: bad data_len=[%hu]", m->data_len); return i; @@ -54505,7 +57938,7 @@ index 862f6eb0c0..7bf22d5c0d 100644 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c -index b416a716cf..b0546a14c6 100644 +index b416a716cf..eb6eb9847d 100644 --- a/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c +++ b/dpdk/drivers/net/iavf/iavf_rxtx_vec_avx512.c @@ -1338,7 +1338,10 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq, @@ -54520,10 +57953,42 @@ index b416a716cf..b0546a14c6 100644 /* end up 128-bits */ 0, 0, 0, 0, 0, 0, 0, 0, +@@ -1815,6 +1818,7 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq) + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { ++#ifdef RTE_ARCH_64 + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); +@@ -1824,6 +1828,12 @@ iavf_tx_free_bufs_avx512(struct iavf_tx_queue *txq) + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); ++#else ++ const __m512i a = _mm512_loadu_si512(&txep[copied]); ++ const __m512i b = _mm512_loadu_si512(&txep[copied + 16]); ++ _mm512_storeu_si512(&cache_objs[copied], a); ++ _mm512_storeu_si512(&cache_objs[copied + 16], b); ++#endif + copied += 32; + } + cache->len += n; diff --git a/dpdk/drivers/net/iavf/iavf_vchnl.c b/dpdk/drivers/net/iavf/iavf_vchnl.c -index f92daf97f2..aeffb07cca 100644 +index f92daf97f2..da1eec273c 100644 --- a/dpdk/drivers/net/iavf/iavf_vchnl.c +++ b/dpdk/drivers/net/iavf/iavf_vchnl.c +@@ -246,8 +246,8 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, + case VIRTCHNL_EVENT_LINK_CHANGE: + vf->link_up = + vpe->event_data.link_event.link_status; +- if (vf->vf_res->vf_cap_flags & +- VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { ++ if (vf->vf_res != NULL && ++ vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + vf->link_speed = + vpe->event_data.link_event_adv.link_speed; + } else { @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, vf->link_speed = iavf_convert_link_speed(speed); } @@ -54656,7 +58121,15 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION"); return err; -@@ -665,7 +706,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) +@@ -655,6 +696,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | + VIRTCHNL_VF_OFFLOAD_FSUB_PF | + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | ++ VIRTCHNL_VF_OFFLOAD_USO | + VIRTCHNL_VF_OFFLOAD_CRC | + VIRTCHNL_VF_OFFLOAD_VLAN_V2 | + VIRTCHNL_VF_LARGE_NUM_QPAIRS | +@@ -665,7 +707,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); @@ -54665,7 +58138,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, -@@ -710,7 +751,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter) +@@ -710,7 +752,7 @@ iavf_get_supported_rxdid(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54674,7 +58147,7 @@ index f92daf97f2..aeffb07cca 100644 if (ret) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_GET_SUPPORTED_RXDIDS"); -@@ -754,7 +795,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable) +@@ -754,7 +796,7 @@ iavf_config_vlan_strip_v2(struct iavf_adapter *adapter, bool enable) args.in_args_size = sizeof(vlan_strip); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54683,7 +58156,7 @@ index f92daf97f2..aeffb07cca 100644 if (ret) PMD_DRV_LOG(ERR, "fail to execute command %s", enable ? "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2" : -@@ -794,7 +835,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable) +@@ -794,7 +836,7 @@ iavf_config_vlan_insert_v2(struct iavf_adapter *adapter, bool enable) args.in_args_size = sizeof(vlan_insert); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54692,7 +58165,7 @@ index f92daf97f2..aeffb07cca 100644 if (ret) PMD_DRV_LOG(ERR, "fail to execute command %s", enable ? "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2" : -@@ -837,7 +878,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add) +@@ -837,7 +879,7 @@ iavf_add_del_vlan_v2(struct iavf_adapter *adapter, uint16_t vlanid, bool add) args.in_args_size = sizeof(vlan_filter); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54701,7 +58174,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "fail to execute command %s", add ? "OP_ADD_VLAN_V2" : "OP_DEL_VLAN_V2"); -@@ -858,7 +899,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter) +@@ -858,7 +900,7 @@ iavf_get_vlan_offload_caps_v2(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54710,7 +58183,7 @@ index f92daf97f2..aeffb07cca 100644 if (ret) { PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"); -@@ -889,7 +930,7 @@ iavf_enable_queues(struct iavf_adapter *adapter) +@@ -889,7 +931,7 @@ iavf_enable_queues(struct iavf_adapter *adapter) args.in_args_size = sizeof(queue_select); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54719,7 +58192,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_ENABLE_QUEUES"); -@@ -917,7 +958,7 @@ iavf_disable_queues(struct iavf_adapter *adapter) +@@ -917,7 +959,7 @@ iavf_disable_queues(struct iavf_adapter *adapter) args.in_args_size = sizeof(queue_select); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54728,7 +58201,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_DISABLE_QUEUES"); -@@ -953,7 +994,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, +@@ -953,7 +995,7 @@ iavf_switch_queue(struct iavf_adapter *adapter, uint16_t qid, args.in_args_size = sizeof(queue_select); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54737,7 +58210,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of %s", on ? "OP_ENABLE_QUEUES" : "OP_DISABLE_QUEUES"); -@@ -995,7 +1036,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter) +@@ -995,7 +1037,7 @@ iavf_enable_queues_lv(struct iavf_adapter *adapter) args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54746,7 +58219,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_ENABLE_QUEUES_V2"); -@@ -1039,7 +1080,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter) +@@ -1039,7 +1081,7 @@ iavf_disable_queues_lv(struct iavf_adapter *adapter) args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54755,7 +58228,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_DISABLE_QUEUES_V2"); -@@ -1085,7 +1126,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, +@@ -1085,7 +1127,7 @@ iavf_switch_queue_lv(struct iavf_adapter *adapter, uint16_t qid, args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54764,7 +58237,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of %s", on ? "OP_ENABLE_QUEUES_V2" : "OP_DISABLE_QUEUES_V2"); -@@ -1117,7 +1158,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter) +@@ -1117,7 +1159,7 @@ iavf_configure_rss_lut(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54773,7 +58246,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_CONFIG_RSS_LUT"); -@@ -1149,7 +1190,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter) +@@ -1149,7 +1191,7 @@ iavf_configure_rss_key(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54782,7 +58255,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_CONFIG_RSS_KEY"); -@@ -1247,7 +1288,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, +@@ -1247,7 +1289,7 @@ iavf_configure_queues(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54791,7 +58264,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of" " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); -@@ -1288,7 +1329,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter) +@@ -1288,7 +1330,7 @@ iavf_config_irq_map(struct iavf_adapter *adapter) args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54800,7 +58273,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP"); -@@ -1329,7 +1370,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, +@@ -1329,7 +1371,7 @@ iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54809,7 +58282,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); -@@ -1389,7 +1430,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) +@@ -1389,7 +1431,7 @@ iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) args.in_args_size = len; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54818,7 +58291,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "fail to execute command %s", add ? "OP_ADD_ETHER_ADDRESS" : -@@ -1419,7 +1460,7 @@ iavf_query_stats(struct iavf_adapter *adapter, +@@ -1419,7 +1461,7 @@ iavf_query_stats(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54827,7 +58300,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS"); *pstats = NULL; -@@ -1457,7 +1498,7 @@ iavf_config_promisc(struct iavf_adapter *adapter, +@@ -1457,7 +1499,7 @@ iavf_config_promisc(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54836,7 +58309,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, -@@ -1500,7 +1541,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, +@@ -1500,7 +1542,7 @@ iavf_add_del_eth_addr(struct iavf_adapter *adapter, struct rte_ether_addr *addr, args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54845,7 +58318,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "fail to execute command %s", add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR"); -@@ -1527,7 +1568,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add) +@@ -1527,7 +1569,7 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add) args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54854,7 +58327,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "fail to execute command %s", add ? "OP_ADD_VLAN" : "OP_DEL_VLAN"); -@@ -1554,7 +1595,7 @@ iavf_fdir_add(struct iavf_adapter *adapter, +@@ -1554,7 +1596,7 @@ iavf_fdir_add(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54863,7 +58336,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER"); return err; -@@ -1614,7 +1655,7 @@ iavf_fdir_del(struct iavf_adapter *adapter, +@@ -1614,7 +1656,7 @@ iavf_fdir_del(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54872,7 +58345,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER"); return err; -@@ -1661,7 +1702,7 @@ iavf_fdir_check(struct iavf_adapter *adapter, +@@ -1661,7 +1703,7 @@ iavf_fdir_check(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54881,7 +58354,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "fail to check flow director rule"); return err; -@@ -1704,7 +1745,7 @@ iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter) +@@ -1704,7 +1746,7 @@ iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54890,7 +58363,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of " "OP_FLOW_SUBSCRIBE"); -@@ -1755,7 +1796,7 @@ iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter) +@@ -1755,7 +1797,7 @@ iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54899,7 +58372,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of " "OP_FLOW_UNSUBSCRIBE"); -@@ -1798,7 +1839,7 @@ iavf_flow_sub_check(struct iavf_adapter *adapter, +@@ -1798,7 +1840,7 @@ iavf_flow_sub_check(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54908,7 +58381,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to check flow subscription rule"); return err; -@@ -1838,7 +1879,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter, +@@ -1838,7 +1880,7 @@ iavf_add_del_rss_cfg(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54917,7 +58390,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of %s", -@@ -1861,7 +1902,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps) +@@ -1861,7 +1903,7 @@ iavf_get_hena_caps(struct iavf_adapter *adapter, uint64_t *caps) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54926,7 +58399,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HENA_CAPS"); -@@ -1887,7 +1928,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena) +@@ -1887,7 +1929,7 @@ iavf_set_hena(struct iavf_adapter *adapter, uint64_t hena) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54935,7 +58408,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HENA"); -@@ -1908,7 +1949,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter) +@@ -1908,7 +1950,7 @@ iavf_get_qos_cap(struct iavf_adapter *adapter) args.in_args_size = 0; args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54944,7 +58417,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, -@@ -1941,7 +1982,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev, +@@ -1941,7 +1983,7 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54953,7 +58426,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of" " VIRTCHNL_OP_CONFIG_TC_MAP"); -@@ -1964,7 +2005,7 @@ int iavf_set_q_bw(struct rte_eth_dev *dev, +@@ -1964,7 +2006,7 @@ int iavf_set_q_bw(struct rte_eth_dev *dev, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54962,7 +58435,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) PMD_DRV_LOG(ERR, "Failed to execute command of" " VIRTCHNL_OP_CONFIG_QUEUE_BW"); -@@ -2009,7 +2050,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, +@@ -2009,7 +2051,7 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, i * sizeof(struct virtchnl_ether_addr); args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54971,7 +58444,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "fail to execute command %s", -@@ -2053,13 +2094,17 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num) +@@ -2053,13 +2095,17 @@ iavf_request_queues(struct rte_eth_dev *dev, uint16_t num) if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* disable interrupt to avoid the admin queue message to be read * before iavf_read_msg_from_pf. @@ -54990,7 +58463,7 @@ index f92daf97f2..aeffb07cca 100644 rte_eal_alarm_set(IAVF_ALARM_INTERVAL, iavf_dev_alarm_handler, dev); } -@@ -2098,7 +2143,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter) +@@ -2098,7 +2144,7 @@ iavf_get_max_rss_queue_region(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -54999,7 +58472,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_GET_MAX_RSS_QREGION"); return err; -@@ -2129,7 +2174,7 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter, +@@ -2129,7 +2175,7 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter, args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -55008,7 +58481,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "fail to execute command %s", "OP_INLINE_IPSEC_CRYPTO"); -@@ -2163,7 +2208,7 @@ iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 nu +@@ -2163,7 +2209,7 @@ iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 nu args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -55017,7 +58490,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to execute command VIRTCHNL_OP_CONFIG_QUANTA"); return err; -@@ -2189,7 +2234,7 @@ iavf_get_ptp_cap(struct iavf_adapter *adapter) +@@ -2189,7 +2235,7 @@ iavf_get_ptp_cap(struct iavf_adapter *adapter) args.out_buffer = vf->aq_resp; args.out_size = IAVF_AQ_BUF_SZ; @@ -55026,7 +58499,7 @@ index f92daf97f2..aeffb07cca 100644 if (err) { PMD_DRV_LOG(ERR, "Failed to execute command of OP_1588_PTP_GET_CAPS"); -@@ -2217,7 +2262,7 @@ iavf_get_phc_time(struct iavf_rx_queue *rxq) +@@ -2217,7 +2263,7 @@ iavf_get_phc_time(struct iavf_rx_queue *rxq) args.out_size = IAVF_AQ_BUF_SZ; rte_spinlock_lock(&vf->phc_time_aq_lock); @@ -55036,9 +58509,18 @@ index f92daf97f2..aeffb07cca 100644 PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME"); diff --git a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h -index 5a817982b4..534649802f 100644 +index 5a817982b4..1f40ae2e66 100644 --- a/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +++ b/dpdk/drivers/net/ice/base/ice_adminq_cmd.h +@@ -1621,7 +1621,7 @@ struct ice_aqc_get_link_status_data { + #define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2 + #define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3 + __le16 link_speed; +-#define ICE_AQ_LINK_SPEED_M 0x7FF ++#define ICE_AQ_LINK_SPEED_M 0xFFF + #define ICE_AQ_LINK_SPEED_10MB BIT(0) + #define ICE_AQ_LINK_SPEED_100MB BIT(1) + #define ICE_AQ_LINK_SPEED_1000MB BIT(2) @@ -1702,8 +1702,8 @@ struct ice_aqc_link_topo_addr { #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) /* Used to decode the handle field */ @@ -55151,6 +58633,51 @@ index 5391bd666b..92a520daf2 100644 /* get the data back into host order before shifting */ dest_qword = LE64_TO_CPU(src_qword); +diff --git a/dpdk/drivers/net/ice/base/ice_controlq.c b/dpdk/drivers/net/ice/base/ice_controlq.c +index 8971a140ef..576bf8e38d 100644 +--- a/dpdk/drivers/net/ice/base/ice_controlq.c ++++ b/dpdk/drivers/net/ice/base/ice_controlq.c +@@ -846,12 +846,23 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) + u16 ntc = sq->next_to_clean; + struct ice_sq_cd *details; + struct ice_aq_desc *desc; ++ u32 head; + + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); + +- while (rd32(hw, cq->sq.head) != ntc) { +- ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); ++ head = rd32(hw, sq->head); ++ if (head >= sq->count) { ++ ice_debug(hw, ICE_DBG_AQ_MSG, ++ "Read head value (%d) exceeds allowed range.\n", ++ head); ++ return 0; ++ } ++ ++ while (head != ntc) { ++ ice_debug(hw, ICE_DBG_AQ_MSG, ++ "ntc %d head %d.\n", ++ ntc, head); + ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); + ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); + ntc++; +@@ -859,6 +870,14 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) + ntc = 0; + desc = ICE_CTL_Q_DESC(*sq, ntc); + details = ICE_CTL_Q_DETAILS(*sq, ntc); ++ ++ head = rd32(hw, sq->head); ++ if (head >= sq->count) { ++ ice_debug(hw, ICE_DBG_AQ_MSG, ++ "Read head value (%d) exceeds allowed range.\n", ++ head); ++ return 0; ++ } + } + + sq->next_to_clean = ntc; diff --git a/dpdk/drivers/net/ice/base/ice_flex_pipe.c b/dpdk/drivers/net/ice/base/ice_flex_pipe.c index b6bc0062a3..e1c5e00c91 100644 --- a/dpdk/drivers/net/ice/base/ice_flex_pipe.c @@ -55194,7 +58721,7 @@ index be6d88f0ca..cd12d47d9b 100644 #define ICE_TXD_CTX_QW1_DTYPE_S 0 #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) diff --git a/dpdk/drivers/net/ice/base/ice_nvm.c b/dpdk/drivers/net/ice/base/ice_nvm.c -index 6550dda557..6da52f4d58 100644 +index 6550dda557..47ead01255 100644 --- a/dpdk/drivers/net/ice/base/ice_nvm.c +++ b/dpdk/drivers/net/ice/base/ice_nvm.c @@ -72,6 +72,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, @@ -55235,21 +58762,48 @@ index 6550dda557..6da52f4d58 100644 } while (!last_cmd); *length = bytes_read; -@@ -474,7 +488,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, +@@ -457,6 +471,8 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) + return status; + } + ++#define check_add_overflow __builtin_add_overflow ++ + /** + * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA + * @hw: pointer to hardware structure +@@ -473,8 +489,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type) { enum ice_status status; - u16 pfa_len, pfa_ptr; +- u16 pfa_len, pfa_ptr; - u16 next_tlv; -+ u32 next_tlv; ++ u16 pfa_len, pfa_ptr, next_tlv, max_tlv; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); if (status != ICE_SUCCESS) { -@@ -490,25 +504,30 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, +@@ -486,38 +501,54 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); + return status; + } +- /* Starting with first TLV after PFA length, iterate through the list ++ ++ if (check_add_overflow(pfa_ptr, (u16)(pfa_len - 1), &max_tlv)) { ++ ice_debug(hw, ICE_DBG_INIT, "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n", ++ pfa_ptr, pfa_len); ++ return ICE_ERR_INVAL_SIZE; ++ } ++ ++ /* The Preserved Fields Area contains a sequence of TLVs which define ++ * its contents. The PFA length includes all of the TLVs, plus its ++ * initial length word itself, *and* one final word at the end of all ++ * of the TLVs. ++ * ++ * Starting with first TLV after PFA length, iterate through the list * of TLVs to find the requested one. */ next_tlv = pfa_ptr + 1; - while (next_tlv < pfa_ptr + pfa_len) { -+ while (next_tlv < ((u32)pfa_ptr + pfa_len)) { ++ while (next_tlv < max_tlv) { u16 tlv_sub_module_type; u16 tlv_len; @@ -55268,10 +58822,6 @@ index 6550dda557..6da52f4d58 100644 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); break; } -+ if (tlv_len > pfa_len) { -+ ice_debug(hw, ICE_DBG_INIT, "Invalid TLV length.\n"); -+ return ICE_ERR_INVAL_SIZE; -+ } if (tlv_sub_module_type == module_type) { if (tlv_len) { - *module_tlv = next_tlv; @@ -55279,6 +58829,22 @@ index 6550dda557..6da52f4d58 100644 *module_tlv_len = tlv_len; return ICE_SUCCESS; } + return ICE_ERR_INVAL_SIZE; + } +- /* Check next TLV, i.e. current TLV pointer + length + 2 words +- * (for current TLV's type and length) +- */ +- next_tlv = next_tlv + tlv_len + 2; ++ ++ if (check_add_overflow(next_tlv, (u16)2, &next_tlv) || ++ check_add_overflow(next_tlv, tlv_len, &next_tlv)) { ++ ice_debug(hw, ICE_DBG_INIT, "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n", ++ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len); ++ return ICE_ERR_INVAL_SIZE; ++ } + } + /* Module does not exist */ + return ICE_ERR_DOES_NOT_EXIST; diff --git a/dpdk/drivers/net/ice/base/ice_ptp_hw.c b/dpdk/drivers/net/ice/base/ice_ptp_hw.c index a0b8af1b94..0f02d2fcbe 100644 --- a/dpdk/drivers/net/ice/base/ice_ptp_hw.c @@ -55395,7 +58961,7 @@ index 3724ef33a8..64ed5e0f9b 100644 u32 bw; /* in Kbps */ u16 rl_multiplier; diff --git a/dpdk/drivers/net/ice/base/ice_switch.c b/dpdk/drivers/net/ice/base/ice_switch.c -index a2581f404d..073c139c43 100644 +index a2581f404d..89270a477d 100644 --- a/dpdk/drivers/net/ice/base/ice_switch.c +++ b/dpdk/drivers/net/ice/base/ice_switch.c @@ -4339,7 +4339,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, @@ -55433,6 +58999,15 @@ index a2581f404d..073c139c43 100644 if (!ice_is_prof_rule(rinfo->tun_type) && !lkups_cnt) return ICE_ERR_PARAM; +@@ -9472,8 +9473,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi, + if (!itr->vsi_list_info || + !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle)) + continue; +- /* Clearing it so that the logic can add it back */ +- ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); + f_entry.fltr_info.vsi_handle = vsi_handle; + f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; + /* update the src in case it is VSI num */ diff --git a/dpdk/drivers/net/ice/ice_dcf.c b/dpdk/drivers/net/ice/ice_dcf.c index 1c3d22ae0f..6f7e103c3b 100644 --- a/dpdk/drivers/net/ice/ice_dcf.c @@ -56155,7 +59730,7 @@ index f35727856e..94b104fb36 100644 static void diff --git a/dpdk/drivers/net/ice/ice_rxtx.c b/dpdk/drivers/net/ice/ice_rxtx.c -index 0ea0045836..f73065b81c 100644 +index 0ea0045836..93b284ad87 100644 --- a/dpdk/drivers/net/ice/ice_rxtx.c +++ b/dpdk/drivers/net/ice/ice_rxtx.c @@ -259,7 +259,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq) @@ -56194,7 +59769,26 @@ index 0ea0045836..f73065b81c 100644 err = ice_program_hw_rx_queue(rxq); if (err) { PMD_DRV_LOG(ERR, "fail to program RX queue %u", -@@ -1761,7 +1765,8 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) +@@ -1118,6 +1122,10 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + tx_queue_id); + return -EINVAL; + } ++ if (txq->qtx_tail == NULL) { ++ PMD_DRV_LOG(INFO, "TX queue %u not started", tx_queue_id); ++ return 0; ++ } + vsi = txq->vsi; + + q_ids[0] = txq->reg_idx; +@@ -1132,6 +1140,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) + } + + txq->tx_rel_mbufs(txq); ++ txq->qtx_tail = NULL; + + return 0; + } +@@ -1761,7 +1770,8 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) ice_rxd_to_vlan_tci(mb, &rxdp[j]); rxd_to_pkt_fields_ops[rxq->rxdid](rxq, mb, &rxdp[j]); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC @@ -56204,7 +59798,7 @@ index 0ea0045836..f73065b81c 100644 rxq->time_high = rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high); if (unlikely(is_tsinit)) { -@@ -2127,7 +2132,8 @@ ice_recv_scattered_pkts(void *rx_queue, +@@ -2127,7 +2137,8 @@ ice_recv_scattered_pkts(void *rx_queue, rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC @@ -56214,7 +59808,7 @@ index 0ea0045836..f73065b81c 100644 rxq->time_high = rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); if (unlikely(is_tsinit)) { -@@ -2143,7 +2149,7 @@ ice_recv_scattered_pkts(void *rx_queue, +@@ -2143,7 +2154,7 @@ ice_recv_scattered_pkts(void *rx_queue, } rxq->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000); @@ -56223,7 +59817,7 @@ index 0ea0045836..f73065b81c 100644 (ice_timestamp_dynfield_offset), rte_mbuf_timestamp_t *) = ts_ns; pkt_flags |= ice_timestamp_dynflag; -@@ -2617,7 +2623,8 @@ ice_recv_pkts(void *rx_queue, +@@ -2617,7 +2628,8 @@ ice_recv_pkts(void *rx_queue, rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd); pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0); #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC @@ -56233,7 +59827,7 @@ index 0ea0045836..f73065b81c 100644 rxq->time_high = rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high); if (unlikely(is_tsinit)) { -@@ -2726,8 +2733,9 @@ ice_parse_tunneling_params(uint64_t ol_flags, +@@ -2726,8 +2738,9 @@ ice_parse_tunneling_params(uint64_t ol_flags, * Calculate the tunneling UDP checksum. * Shall be set only if L4TUNT = 01b and EIPT is not zero */ @@ -56245,7 +59839,7 @@ index 0ea0045836..f73065b81c 100644 *cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M; } -@@ -2738,10 +2746,7 @@ ice_txd_enable_checksum(uint64_t ol_flags, +@@ -2738,10 +2751,7 @@ ice_txd_enable_checksum(uint64_t ol_flags, union ice_tx_offload tx_offload) { /* Set MACLEN */ @@ -56257,7 +59851,7 @@ index 0ea0045836..f73065b81c 100644 *td_offset |= (tx_offload.l2_len >> 1) << ICE_TX_DESC_LEN_MACLEN_S; -@@ -3002,9 +3007,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +@@ -3002,9 +3012,12 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Fill in tunneling parameters if necessary */ cd_tunneling_params = 0; @@ -56271,7 +59865,7 @@ index 0ea0045836..f73065b81c 100644 /* Enable checksum offloading */ if (ol_flags & ICE_TX_CKSUM_OFFLOAD_MASK) -@@ -3662,23 +3670,34 @@ ice_check_empty_mbuf(struct rte_mbuf *tx_pkt) +@@ -3662,23 +3675,34 @@ ice_check_empty_mbuf(struct rte_mbuf *tx_pkt) } uint16_t @@ -56311,7 +59905,7 @@ index 0ea0045836..f73065b81c 100644 m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) { /** * MSS outside the range are considered malicious -@@ -3687,11 +3706,8 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -3687,11 +3711,8 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } @@ -56324,7 +59918,7 @@ index 0ea0045836..f73065b81c 100644 return i; } -@@ -3710,7 +3726,6 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, +@@ -3710,7 +3731,6 @@ ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if (ice_check_empty_mbuf(m) != 0) { rte_errno = EINVAL; @@ -56482,6 +60076,31 @@ index 31d6af42fd..5d591f9834 100644 if (split_packet) { int j; +diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c b/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c +index 5bfd5152df..5f91ee4839 100644 +--- a/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c ++++ b/dpdk/drivers/net/ice/ice_rxtx_vec_avx512.c +@@ -1020,6 +1020,7 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { ++#ifdef RTE_ARCH_64 + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); +@@ -1029,6 +1030,12 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); ++#else ++ const __m512i a = _mm512_loadu_si512(&txep[copied]); ++ const __m512i b = _mm512_loadu_si512(&txep[copied + 16]); ++ _mm512_storeu_si512(&cache_objs[copied], a); ++ _mm512_storeu_si512(&cache_objs[copied + 16], b); ++#endif + copied += 32; + } + cache->len += n; diff --git a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h b/dpdk/drivers/net/ice/ice_rxtx_vec_common.h index eec6ea2134..4b73465af5 100644 --- a/dpdk/drivers/net/ice/ice_rxtx_vec_common.h @@ -56571,7 +60190,7 @@ index d70c250e9a..72d77458ba 100644 rte_pmd_ice_dump_switch; }; diff --git a/dpdk/drivers/net/idpf/idpf_ethdev.c b/dpdk/drivers/net/idpf/idpf_ethdev.c -index 8b347631ce..b31cb47e90 100644 +index 8b347631ce..65b970d36d 100644 --- a/dpdk/drivers/net/idpf/idpf_ethdev.c +++ b/dpdk/drivers/net/idpf/idpf_ethdev.c @@ -563,8 +563,6 @@ idpf_dev_start(struct rte_eth_dev *dev) @@ -56601,7 +60220,17 @@ index 8b347631ce..b31cb47e90 100644 return 0; } -@@ -1313,7 +1309,11 @@ static struct rte_pci_driver rte_idpf_pmd = { +@@ -899,8 +895,7 @@ idpf_init_mbx(struct idpf_hw *hw) + if (ret != 0) + return ret; + +- LIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head, +- struct idpf_ctlq_info, cq_list) { ++ LIST_FOR_EACH_ENTRY(ctlq, &hw->cq_list_head, struct idpf_ctlq_info, cq_list) { + if (ctlq->q_id == IDPF_CTLQ_ID && + ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX) + hw->asq = ctlq; +@@ -1313,7 +1308,11 @@ static struct rte_pci_driver rte_idpf_pmd = { */ RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map); @@ -56749,10 +60378,31 @@ index 730dc64ebc..1c5b5b7c38 100644 /* MTS */ #define GLTSYN_CMD_SYNC_0_0 (PF_TIMESYNC_BASE + 0x0) diff --git a/dpdk/drivers/net/idpf/idpf_rxtx_vec_avx512.c b/dpdk/drivers/net/idpf/idpf_rxtx_vec_avx512.c -index fb2b6bb53c..f31582f5fd 100644 +index fb2b6bb53c..29f9624a9d 100644 --- a/dpdk/drivers/net/idpf/idpf_rxtx_vec_avx512.c +++ b/dpdk/drivers/net/idpf/idpf_rxtx_vec_avx512.c -@@ -843,6 +843,10 @@ idpf_singleq_tx_release_mbufs_avx512(struct idpf_tx_queue *txq) +@@ -587,6 +587,7 @@ idpf_tx_free_bufs_avx512(struct idpf_tx_queue *txq) + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { ++#ifdef RTE_ARCH_64 + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); +@@ -596,6 +597,12 @@ idpf_tx_free_bufs_avx512(struct idpf_tx_queue *txq) + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); ++#else ++ const __m512i a = _mm512_loadu_si512(&txep[copied]); ++ const __m512i b = _mm512_loadu_si512(&txep[copied + 16]); ++ _mm512_storeu_si512(&cache_objs[copied], a); ++ _mm512_storeu_si512(&cache_objs[copied + 16], b); ++#endif + copied += 32; + } + cache->len += n; +@@ -843,6 +850,10 @@ idpf_singleq_tx_release_mbufs_avx512(struct idpf_tx_queue *txq) } i = 0; } @@ -56901,6 +60551,47 @@ index 340fd0cd59..4ec9598b8e 100644 } RTE_LOG_REGISTER_DEFAULT(ionic_logtype, NOTICE); +diff --git a/dpdk/drivers/net/ionic/ionic_osdep.h b/dpdk/drivers/net/ionic/ionic_osdep.h +index 68f767b920..db49f76a70 100644 +--- a/dpdk/drivers/net/ionic/ionic_osdep.h ++++ b/dpdk/drivers/net/ionic/ionic_osdep.h +@@ -30,14 +30,28 @@ + + #define __iomem + +-typedef uint8_t u8; +-typedef uint16_t u16; +-typedef uint32_t u32; +-typedef uint64_t u64; +- +-typedef uint16_t __le16; +-typedef uint32_t __le32; +-typedef uint64_t __le64; ++#ifndef u8 ++#define u8 uint8_t ++#endif ++#ifndef u16 ++#define u16 uint16_t ++#endif ++#ifndef u32 ++#define u32 uint32_t ++#endif ++#ifndef uint64_t ++#define u64 uint64_t ++#endif ++ ++#ifndef __le16 ++#define __le16 uint16_t ++#endif ++#ifndef __le32 ++#define __le32 uint32_t ++#endif ++#ifndef __le64 ++#define __le64 uint64_t ++#endif + + #define ioread8(reg) rte_read8(reg) + #define ioread32(reg) rte_read32(rte_le_to_cpu_32(reg)) diff --git a/dpdk/drivers/net/ionic/ionic_rxtx.c b/dpdk/drivers/net/ionic/ionic_rxtx.c index b9e73b4871..170d3b0802 100644 --- a/dpdk/drivers/net/ionic/ionic_rxtx.c @@ -57103,6 +60794,29 @@ index 2ef96a984a..5361867785 100644 return NULL; } +diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c b/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c +index 69fd4cd3fb..b39dd70da0 100644 +--- a/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c ++++ b/dpdk/drivers/net/ixgbe/base/ixgbe_82599.c +@@ -551,13 +551,15 @@ out: + **/ + void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) + { +- u32 autoc2_reg; + u16 ee_ctrl_2 = 0; ++ u32 autoc2_reg; ++ u32 status; + + DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); +- ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); ++ status = ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + +- if (!ixgbe_mng_present(hw) && !hw->wol_enabled && ++ if (status == IXGBE_SUCCESS && ++ !ixgbe_mng_present(hw) && !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; diff --git a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c b/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c index 74c5db16fa..56267bb00d 100644 --- a/dpdk/drivers/net/ixgbe/base/ixgbe_phy.c @@ -57181,7 +60895,7 @@ index 5e3ae1b519..11dbbe2a86 100644 break; default: diff --git a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c -index ae9f65b334..9e1a65a50a 100644 +index ae9f65b334..99986aa1d4 100644 --- a/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c @@ -1187,7 +1187,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) @@ -57257,16 +60971,16 @@ index ae9f65b334..9e1a65a50a 100644 + + ixgbe_get_etk_id(hw, &nvm_ver); + ixgbe_get_orom_version(hw, &nvm_ver); - -- etrack_id = (eeprom_verh << 16) | eeprom_verl; -- ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); ++ + if (nvm_ver.or_valid) { + snprintf(fw_version, fw_size, "0x%08x, %d.%d.%d", + nvm_ver.etk_id, nvm_ver.or_major, + nvm_ver.or_build, nvm_ver.or_patch); + return 0; + } -+ + +- etrack_id = (eeprom_verh << 16) | eeprom_verl; +- ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); + ret = snprintf(fw_version, fw_size, "0x%08x", nvm_ver.etk_id); if (ret < 0) return -EINVAL; @@ -57290,7 +61004,19 @@ index ae9f65b334..9e1a65a50a 100644 memset(&link, 0, sizeof(link)); link.link_status = RTE_ETH_LINK_DOWN; link.link_speed = RTE_ETH_SPEED_NUM_NONE; -@@ -4639,14 +4660,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +@@ -4281,11 +4302,6 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) + wait = 0; + +-/* BSD has no interrupt mechanism, so force NIC status synchronization. */ +-#ifdef RTE_EXEC_ENV_FREEBSD +- wait = 1; +-#endif +- + if (vf) + diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); + else +@@ -4639,14 +4655,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; ixgbe_dev_link_status_print(dev); @@ -58173,7 +61899,7 @@ index 4a05238a96..57576c62e4 100644 int mana_mr_btree_init(struct mana_mr_btree *bt, int n, int socket); void mana_mr_btree_free(struct mana_mr_btree *bt); diff --git a/dpdk/drivers/net/mana/meson.build b/dpdk/drivers/net/mana/meson.build -index 493f0d26d4..2d72eca5a8 100644 +index 493f0d26d4..3ddc230ab4 100644 --- a/dpdk/drivers/net/mana/meson.build +++ b/dpdk/drivers/net/mana/meson.build @@ -1,9 +1,9 @@ @@ -58188,6 +61914,30 @@ index 493f0d26d4..2d72eca5a8 100644 subdir_done() endif +@@ -19,12 +19,14 @@ sources += files( + ) + + libnames = ['ibverbs', 'mana'] ++libs = [] + foreach libname:libnames + lib = dependency('lib' + libname, required:false) + if not lib.found() + lib = cc.find_library(libname, required:false) + endif + if lib.found() ++ libs += lib + ext_deps += lib + else + build = false +@@ -43,7 +45,7 @@ required_symbols = [ + ] + + foreach arg:required_symbols +- if not cc.has_header_symbol(arg[0], arg[1]) ++ if not cc.has_header_symbol(arg[0], arg[1], dependencies: libs, args: cflags) + build = false + reason = 'missing symbol "' + arg[1] + '" in "' + arg[0] + '"' + subdir_done() diff --git a/dpdk/drivers/net/mana/mp.c b/dpdk/drivers/net/mana/mp.c index 92432c431d..738487f65a 100644 --- a/dpdk/drivers/net/mana/mp.c @@ -58991,7 +62741,7 @@ index 300bf27cc1..3e255157f9 100644 return pkt_sent; diff --git a/dpdk/drivers/net/memif/rte_eth_memif.c b/dpdk/drivers/net/memif/rte_eth_memif.c -index 1b1c1a652b..86b821ac5c 100644 +index 1b1c1a652b..9379c201e9 100644 --- a/dpdk/drivers/net/memif/rte_eth_memif.c +++ b/dpdk/drivers/net/memif/rte_eth_memif.c @@ -261,8 +261,6 @@ memif_free_stored_mbufs(struct pmd_process_private *proc_private, struct memif_q @@ -59003,7 +62753,18 @@ index 1b1c1a652b..86b821ac5c 100644 rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); mq->last_tail++; } -@@ -707,10 +705,6 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq +@@ -533,6 +531,10 @@ refill: + ret = rte_pktmbuf_alloc_bulk(mq->mempool, &mq->buffers[head & mask], n_slots); + if (unlikely(ret < 0)) + goto no_free_mbufs; ++ if (unlikely(n_slots > ring_size - (head & mask))) { ++ rte_memcpy(mq->buffers, &mq->buffers[ring_size], ++ (n_slots + (head & mask) - ring_size) * sizeof(struct rte_mbuf *)); ++ } + + while (n_slots--) { + s0 = head++ & mask; +@@ -707,10 +709,6 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq next_in_chain: /* store pointer to mbuf to free it later */ mq->buffers[slot & mask] = mbuf; @@ -59014,7 +62775,21 @@ index 1b1c1a652b..86b821ac5c 100644 /* populate descriptor */ d0 = &ring->desc[slot & mask]; d0->length = rte_pktmbuf_data_len(mbuf); -@@ -1240,6 +1234,7 @@ memif_dev_start(struct rte_eth_dev *dev) +@@ -1133,8 +1131,12 @@ memif_init_queues(struct rte_eth_dev *dev) + } + mq->buffers = NULL; + if (pmd->flags & ETH_MEMIF_FLAG_ZERO_COPY) { ++ /* ++ * Allocate 2x ring_size to reserve a contiguous array for ++ * rte_pktmbuf_alloc_bulk (to store allocated mbufs). ++ */ + mq->buffers = rte_zmalloc("bufs", sizeof(struct rte_mbuf *) * +- (1 << mq->log2_ring_size), 0); ++ (1 << (mq->log2_ring_size + 1)), 0); + if (mq->buffers == NULL) + return -ENOMEM; + } +@@ -1240,6 +1242,7 @@ memif_dev_start(struct rte_eth_dev *dev) { struct pmd_internals *pmd = dev->data->dev_private; int ret = 0; @@ -59022,7 +62797,7 @@ index 1b1c1a652b..86b821ac5c 100644 switch (pmd->role) { case MEMIF_ROLE_CLIENT: -@@ -1254,13 +1249,28 @@ memif_dev_start(struct rte_eth_dev *dev) +@@ -1254,13 +1257,28 @@ memif_dev_start(struct rte_eth_dev *dev) break; } @@ -59109,7 +62884,7 @@ index a54016f4a2..1389b606cc 100644 } diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr.h b/dpdk/drivers/net/mlx5/hws/mlx5dr.h -index f8de27c615..d570810e95 100644 +index f8de27c615..d5a5299a23 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr.h +++ b/dpdk/drivers/net/mlx5/hws/mlx5dr.h @@ -81,6 +81,7 @@ enum mlx5dr_action_aso_ct_flags { @@ -59120,6 +62895,18 @@ index f8de27c615..d570810e95 100644 /* Allow relaxed matching by skipping derived dependent match fields. */ MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH = 1, }; +@@ -94,8 +95,10 @@ struct mlx5dr_context_attr { + uint16_t queues; + uint16_t queue_size; + size_t initial_log_ste_memory; /* Currently not in use */ +- /* Optional PD used for allocating res ources */ ++ /* Optional PD used for allocating resources */ + struct ibv_pd *pd; ++ /* Optional the STC array size for that context */ ++ size_t initial_log_stc_memory; + }; + + struct mlx5dr_table_attr { diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c index b0ae4e7693..4fb9a03d80 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_action.c @@ -59181,10 +62968,40 @@ index 721376b8da..acad42e12e 100644 if (!devx_obj->obj) { DR_LOG(ERR, "Failed to create header_modify_pattern"); diff --git a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c -index 76ada7bb7f..d1923a8e93 100644 +index 76ada7bb7f..21047b8384 100644 --- a/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c +++ b/dpdk/drivers/net/mlx5/hws/mlx5dr_context.c -@@ -210,6 +210,7 @@ struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx, +@@ -4,7 +4,8 @@ + + #include "mlx5dr_internal.h" + +-static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx) ++static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx, ++ struct mlx5dr_context_attr *attr) + { + struct mlx5dr_pool_attr pool_attr = {0}; + uint8_t max_log_sz; +@@ -16,7 +17,9 @@ static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx) + /* Create an STC pool per FT type */ + pool_attr.pool_type = MLX5DR_POOL_TYPE_STC; + pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL; +- max_log_sz = RTE_MIN(MLX5DR_POOL_STC_LOG_SZ, ctx->caps->stc_alloc_log_max); ++ if (!attr->initial_log_stc_memory) ++ attr->initial_log_stc_memory = MLX5DR_POOL_STC_LOG_SZ; ++ max_log_sz = RTE_MIN(attr->initial_log_stc_memory, ctx->caps->stc_alloc_log_max); + pool_attr.alloc_log_sz = RTE_MAX(max_log_sz, ctx->caps->stc_alloc_log_gran); + + for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) { +@@ -151,7 +154,7 @@ static int mlx5dr_context_init_hws(struct mlx5dr_context *ctx, + if (ret) + return ret; + +- ret = mlx5dr_context_pools_init(ctx); ++ ret = mlx5dr_context_pools_init(ctx, attr); + if (ret) + goto uninit_pd; + +@@ -210,6 +213,7 @@ struct mlx5dr_context *mlx5dr_context_open(struct ibv_context *ibv_ctx, free_caps: simple_free(ctx->caps); free_ctx: @@ -60195,7 +64012,7 @@ index ed71289322..3f7a94c9ee 100644 + #endif /* RTE_PMD_MLX5_FLOW_OS_H_ */ diff --git a/dpdk/drivers/net/mlx5/linux/mlx5_os.c b/dpdk/drivers/net/mlx5/linux/mlx5_os.c -index a71474c90a..b88ae631d9 100644 +index a71474c90a..183b5e6a84 100644 --- a/dpdk/drivers/net/mlx5/linux/mlx5_os.c +++ b/dpdk/drivers/net/mlx5/linux/mlx5_os.c @@ -455,15 +455,16 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv) @@ -60320,7 +64137,22 @@ index a71474c90a..b88ae631d9 100644 priv->root_drop_action = priv->sh->dr_drop_action; else priv->root_drop_action = priv->drop_queue.hrxq->action; -@@ -1508,13 +1555,6 @@ err_secondary: +@@ -1486,9 +1533,11 @@ err_secondary: + priv->ctrl_flows = 0; + rte_spinlock_init(&priv->flow_list_lock); + TAILQ_INIT(&priv->flow_meters); +- priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); +- if (!priv->mtr_profile_tbl) +- goto error; ++ if (priv->mtr_en) { ++ priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); ++ if (!priv->mtr_profile_tbl) ++ goto error; ++ } + /* Bring Ethernet device up. */ + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up", + eth_dev->data->port_id); +@@ -1508,13 +1557,6 @@ err_secondary: } /* Create context for virtual machine VLAN workaround. */ priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex); @@ -60334,7 +64166,7 @@ index a71474c90a..b88ae631d9 100644 if (mlx5_devx_obj_ops_en(sh)) { priv->obj_ops = devx_obj_ops; mlx5_queue_counter_id_prepare(eth_dev); -@@ -1565,6 +1605,13 @@ err_secondary: +@@ -1565,6 +1607,13 @@ err_secondary: goto error; } rte_rwlock_init(&priv->ind_tbls_lock); @@ -60348,7 +64180,7 @@ index a71474c90a..b88ae631d9 100644 if (priv->sh->config.dv_flow_en == 2) { #ifdef HAVE_MLX5_HWS_SUPPORT if (priv->sh->config.dv_esw_en) { -@@ -1613,6 +1660,23 @@ err_secondary: +@@ -1613,6 +1662,23 @@ err_secondary: err = EINVAL; goto error; } @@ -60372,7 +64204,7 @@ index a71474c90a..b88ae631d9 100644 return eth_dev; #else DRV_LOG(ERR, "DV support is missing for HWS."); -@@ -1631,43 +1695,6 @@ err_secondary: +@@ -1631,43 +1697,6 @@ err_secondary: err = -err; goto error; } @@ -60416,7 +64248,7 @@ index a71474c90a..b88ae631d9 100644 rte_spinlock_init(&priv->shared_act_sl); mlx5_flow_counter_mode_config(eth_dev); mlx5_flow_drop_action_config(eth_dev); -@@ -1686,8 +1713,6 @@ error: +@@ -1686,8 +1715,6 @@ error: priv->sh->config.dv_esw_en) flow_hw_destroy_vport_action(eth_dev); #endif @@ -60425,7 +64257,7 @@ index a71474c90a..b88ae631d9 100644 if (priv->sh) mlx5_os_free_shared_dr(priv); if (priv->nl_socket_route >= 0) -@@ -2145,8 +2170,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, +@@ -2145,8 +2172,7 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, list[ns].info.master = 0; list[ns].info.representor = 0; } @@ -60435,7 +64267,7 @@ index a71474c90a..b88ae631d9 100644 break; case MLX5_PHYS_PORT_NAME_TYPE_PFHPF: /* Fallthrough */ -@@ -2665,9 +2689,15 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, +@@ -2665,9 +2691,15 @@ mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, if (priv->sh) { if (priv->q_counters != NULL && @@ -60453,7 +64285,7 @@ index a71474c90a..b88ae631d9 100644 priv->sh->ibdev_path, priv->dev_port, diff --git a/dpdk/drivers/net/mlx5/mlx5.c b/dpdk/drivers/net/mlx5/mlx5.c -index e55be8720e..d66254740b 100644 +index e55be8720e..94873dfe89 100644 --- a/dpdk/drivers/net/mlx5/mlx5.c +++ b/dpdk/drivers/net/mlx5/mlx5.c @@ -241,7 +241,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { @@ -60470,8 +64302,12 @@ index e55be8720e..d66254740b 100644 .trunk_size = 64, .grow_trunk = 3, .grow_shift = 2, -@@ -902,6 +907,14 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh) - sizeof(struct mlx5_flow_handle) : +@@ -899,9 +904,17 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh) + */ + case MLX5_IPOOL_MLX5_FLOW: + cfg.size = sh->config.dv_flow_en ? +- sizeof(struct mlx5_flow_handle) : ++ RTE_ALIGN_MUL_CEIL(sizeof(struct mlx5_flow_handle), 8) : MLX5_FLOW_HANDLE_VERBS_SIZE; break; +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) @@ -60505,7 +64341,25 @@ index e55be8720e..d66254740b 100644 exit: pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); return sh; -@@ -1976,8 +1993,12 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) +@@ -1944,6 +1961,7 @@ int + mlx5_proc_priv_init(struct rte_eth_dev *dev) + { + struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_proc_priv *ppriv; + size_t ppriv_size; + +@@ -1964,6 +1982,9 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) + dev->process_private = ppriv; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + priv->sh->pppriv = ppriv; ++ /* Check and try to map HCA PCI BAR to allow reading real time. */ ++ if (sh->dev_cap.rt_timestamp && mlx5_dev_is_pci(dev->device)) ++ mlx5_txpp_map_hca_bar(dev); + return 0; + } + +@@ -1976,8 +1997,12 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) void mlx5_proc_priv_uninit(struct rte_eth_dev *dev) { @@ -60519,7 +64373,7 @@ index e55be8720e..d66254740b 100644 mlx5_free(dev->process_private); dev->process_private = NULL; } -@@ -2037,6 +2058,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) +@@ -2037,6 +2062,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) mlx5_flex_item_port_cleanup(dev); #ifdef HAVE_MLX5_HWS_SUPPORT flow_hw_destroy_vport_action(dev); @@ -60527,7 +64381,7 @@ index e55be8720e..d66254740b 100644 flow_hw_resource_release(dev); flow_hw_clear_port_info(dev); if (priv->sh->config.dv_flow_en == 2) { -@@ -2053,7 +2075,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) +@@ -2053,7 +2079,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) mlx5_free(priv->rxq_privs); priv->rxq_privs = NULL; } @@ -60536,7 +64390,7 @@ index e55be8720e..d66254740b 100644 /* XXX race condition if mlx5_tx_burst() is still running. */ rte_delay_us_sleep(1000); for (i = 0; (i != priv->txqs_n); ++i) -@@ -2062,16 +2084,20 @@ mlx5_dev_close(struct rte_eth_dev *dev) +@@ -2062,16 +2088,20 @@ mlx5_dev_close(struct rte_eth_dev *dev) priv->txqs = NULL; } mlx5_proc_priv_uninit(dev); @@ -60561,7 +64415,7 @@ index e55be8720e..d66254740b 100644 if (priv->rss_conf.rss_key != NULL) mlx5_free(priv->rss_conf.rss_key); if (priv->reta_idx != NULL) -@@ -2473,6 +2499,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, +@@ -2473,6 +2503,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM; @@ -60570,9 +64424,18 @@ index e55be8720e..d66254740b 100644 config->std_delay_drop = 0; config->hp_delay_drop = 0; diff --git a/dpdk/drivers/net/mlx5/mlx5.h b/dpdk/drivers/net/mlx5/mlx5.h -index 31982002ee..5d826527b2 100644 +index 31982002ee..7053db5fa7 100644 --- a/dpdk/drivers/net/mlx5/mlx5.h +++ b/dpdk/drivers/net/mlx5/mlx5.h +@@ -60,7 +60,7 @@ + #define MLX5_ROOT_TBL_MODIFY_NUM 16 + + /* Maximal number of flex items created on the port.*/ +-#define MLX5_PORT_FLEX_ITEM_NUM 4 ++#define MLX5_PORT_FLEX_ITEM_NUM 8 + + /* Maximal number of field/field parts to map into sample registers .*/ + #define MLX5_FLEX_ITEM_MAPPING_NUM 32 @@ -234,16 +234,29 @@ struct mlx5_counter_ctrl { struct mlx5_xstats_ctrl { /* Number of device stats. */ @@ -60730,7 +64593,15 @@ index 31982002ee..5d826527b2 100644 struct mlx5_mtr_config mtr_config; /* Meter configuration */ uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */ uint8_t mtr_color_reg; /* Meter color match REG_C. */ -@@ -1768,6 +1824,8 @@ struct mlx5_priv { +@@ -1748,6 +1804,7 @@ struct mlx5_priv { + uint32_t nb_queue; /* HW steering queue number. */ + struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */ + uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */ ++ bool hws_rule_flushing; /**< Whether this port is in rules flushing stage. */ + #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) + /* Item template list. */ + LIST_HEAD(flow_hw_itt, rte_flow_pattern_template) flow_hw_itt; +@@ -1768,6 +1825,8 @@ struct mlx5_priv { struct mlx5dr_action *hw_drop[2]; /* HW steering global tag action. */ struct mlx5dr_action *hw_tag[2]; @@ -60739,7 +64610,7 @@ index 31982002ee..5d826527b2 100644 /* HW steering create ongoing rte flow table list header. */ LIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo; struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */ -@@ -1925,8 +1983,9 @@ int mlx5_get_module_eeprom(struct rte_eth_dev *dev, +@@ -1925,8 +1984,9 @@ int mlx5_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info); int mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, uint64_t *stat); @@ -60751,7 +64622,7 @@ index 31982002ee..5d826527b2 100644 void mlx5_os_stats_init(struct rte_eth_dev *dev); int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev); -@@ -2163,6 +2222,8 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev, +@@ -2163,6 +2223,8 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int n, unsigned int n_used); void mlx5_txpp_interrupt_handler(void *cb_arg); @@ -60776,7 +64647,7 @@ index 02deaac612..7e0ec91328 100644 mlx5_devx_tir_destroy(hrxq); if (hrxq->ind_table->ind_table != NULL) diff --git a/dpdk/drivers/net/mlx5/mlx5_ethdev.c b/dpdk/drivers/net/mlx5/mlx5_ethdev.c -index 4a85415ff3..df7cd241a2 100644 +index 4a85415ff3..08c6b18975 100644 --- a/dpdk/drivers/net/mlx5/mlx5_ethdev.c +++ b/dpdk/drivers/net/mlx5/mlx5_ethdev.c @@ -146,6 +146,12 @@ mlx5_dev_configure(struct rte_eth_dev *dev) @@ -60792,8 +64663,19 @@ index 4a85415ff3..df7cd241a2 100644 return 0; } +@@ -345,6 +351,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) + info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK; + mlx5_set_default_params(dev, info); + mlx5_set_txlimit_params(dev, info); ++ info->rx_desc_lim.nb_max = ++ 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz; ++ info->tx_desc_lim.nb_max = ++ 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz; + if (priv->sh->cdev->config.hca_attr.mem_rq_rmp && + priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new) + info->dev_capa |= RTE_ETH_DEV_CAPA_RXQ_SHARE; diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.c b/dpdk/drivers/net/mlx5/mlx5_flow.c -index a0cf677fb0..a44ccea436 100644 +index a0cf677fb0..f66edf1c64 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow.c @@ -364,7 +364,7 @@ mlx5_flow_expand_rss_skip_explicit(const struct mlx5_flow_expand_node graph[], @@ -61132,7 +65014,17 @@ index a0cf677fb0..a44ccea436 100644 /* * The following information is required by * mlx5_flow_hashfields_adjust() in advance. -@@ -7555,7 +7558,6 @@ flow_release_workspace(void *data) +@@ -7471,7 +7474,9 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type, + #ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (priv->sh->config.dv_flow_en == 2 && + type == MLX5_FLOW_TYPE_GEN) { ++ priv->hws_rule_flushing = true; + flow_hw_q_flow_flush(dev, NULL); ++ priv->hws_rule_flushing = false; + return; + } + #endif +@@ -7555,7 +7560,6 @@ flow_release_workspace(void *data) while (wks) { next = wks->next; @@ -61140,7 +65032,7 @@ index a0cf677fb0..a44ccea436 100644 free(wks); wks = next; } -@@ -7586,23 +7588,17 @@ mlx5_flow_get_thread_workspace(void) +@@ -7586,23 +7590,17 @@ mlx5_flow_get_thread_workspace(void) static struct mlx5_flow_workspace* flow_alloc_thread_workspace(void) { @@ -61170,7 +65062,7 @@ index a0cf677fb0..a44ccea436 100644 } /** -@@ -7623,6 +7619,7 @@ mlx5_flow_push_thread_workspace(void) +@@ -7623,6 +7621,7 @@ mlx5_flow_push_thread_workspace(void) data = flow_alloc_thread_workspace(); if (!data) return NULL; @@ -61178,7 +65070,7 @@ index a0cf677fb0..a44ccea436 100644 } else if (!curr->inuse) { data = curr; } else if (curr->next) { -@@ -7971,6 +7968,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, +@@ -7971,6 +7970,10 @@ mlx5_flow_isolate(struct rte_eth_dev *dev, "port must be stopped first"); return -rte_errno; } @@ -61189,7 +65081,7 @@ index a0cf677fb0..a44ccea436 100644 priv->isolated = !!enable; if (enable) dev->dev_ops = &mlx5_dev_ops_isolate; -@@ -9758,23 +9759,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, +@@ -9758,23 +9761,47 @@ mlx5_flow_dev_dump_sh_all(struct rte_eth_dev *dev, } i = lcore_index; @@ -61254,7 +65146,7 @@ index a0cf677fb0..a44ccea436 100644 } } -@@ -10104,9 +10129,19 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, +@@ -10104,9 +10131,19 @@ mlx5_action_handle_update(struct rte_eth_dev *dev, const struct mlx5_flow_driver_ops *fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr)); int ret; @@ -61276,7 +65168,7 @@ index a0cf677fb0..a44ccea436 100644 if (ret) return ret; return flow_drv_action_update(dev, handle, update, fops, -@@ -10841,7 +10876,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, +@@ -10841,7 +10878,7 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev, if (!is_tunnel_offload_active(dev)) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, @@ -61286,7 +65178,7 @@ index a0cf677fb0..a44ccea436 100644 return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, diff --git a/dpdk/drivers/net/mlx5/mlx5_flow.h b/dpdk/drivers/net/mlx5/mlx5_flow.h -index 1f57ecd6e1..4f3a216ed4 100644 +index 1f57ecd6e1..fb2eb05c06 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow.h +++ b/dpdk/drivers/net/mlx5/mlx5_flow.h @@ -75,7 +75,7 @@ enum { @@ -61298,7 +65190,17 @@ index 1f57ecd6e1..4f3a216ed4 100644 #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1) /* 29-31: type, 25-28: owner port, 0-24: index */ -@@ -1437,10 +1437,10 @@ struct mlx5_flow_workspace { +@@ -117,6 +117,9 @@ struct mlx5_flow_action_copy_mreg { + /* Matches on source queue. */ + struct mlx5_rte_flow_item_sq { + uint32_t queue; /* DevX SQ number */ ++#ifdef RTE_ARCH_64 ++ uint32_t reserved; ++#endif + }; + + /* Feature name to allocate metadata register. */ +@@ -1437,10 +1440,10 @@ struct mlx5_flow_workspace { /* If creating another flow in same thread, push new as stack. */ struct mlx5_flow_workspace *prev; struct mlx5_flow_workspace *next; @@ -61310,7 +65212,7 @@ index 1f57ecd6e1..4f3a216ed4 100644 uint32_t flow_idx; /* Intermediate device flow index. */ struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */ struct mlx5_flow_meter_policy *policy; -@@ -1594,6 +1594,28 @@ flow_hw_get_reg_id(enum rte_flow_item_type type, uint32_t id) +@@ -1594,6 +1597,28 @@ flow_hw_get_reg_id(enum rte_flow_item_type type, uint32_t id) } } @@ -61339,7 +65241,7 @@ index 1f57ecd6e1..4f3a216ed4 100644 void flow_hw_set_port_info(struct rte_eth_dev *dev); void flow_hw_clear_port_info(struct rte_eth_dev *dev); -@@ -1926,6 +1948,7 @@ struct mlx5_flow_driver_ops { +@@ -1926,6 +1951,7 @@ struct mlx5_flow_driver_ops { struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); void mlx5_flow_pop_thread_workspace(void); struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); @@ -61347,7 +65249,25 @@ index 1f57ecd6e1..4f3a216ed4 100644 __extension__ struct flow_grp_info { uint64_t external:1; -@@ -2185,6 +2208,25 @@ struct mlx5_flow_hw_ctrl_rx { +@@ -2157,13 +2183,13 @@ enum mlx5_flow_ctrl_rx_eth_pattern_type { + + /* All types of RSS actions used in control flow rules. */ + enum mlx5_flow_ctrl_rx_expanded_rss_type { +- MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP = 0, +- MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4, ++ MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP = 0, ++ MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP, + MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP, + MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP, + MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6, +- MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP, +- MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP, ++ MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4, ++ MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP, + MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX, + }; + +@@ -2185,6 +2211,25 @@ struct mlx5_flow_hw_ctrl_rx { [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; }; @@ -61373,7 +65293,7 @@ index 1f57ecd6e1..4f3a216ed4 100644 #define MLX5_CTRL_PROMISCUOUS (RTE_BIT32(0)) #define MLX5_CTRL_ALL_MULTICAST (RTE_BIT32(1)) #define MLX5_CTRL_BROADCAST (RTE_BIT32(2)) -@@ -2226,7 +2268,8 @@ int mlx5_validate_action_rss(struct rte_eth_dev *dev, +@@ -2226,7 +2271,8 @@ int mlx5_validate_action_rss(struct rte_eth_dev *dev, int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error); @@ -61383,7 +65303,7 @@ index 1f57ecd6e1..4f3a216ed4 100644 const struct rte_flow_attr *attr, struct rte_flow_error *error); int mlx5_flow_validate_action_flag(uint64_t action_flags, -@@ -2579,10 +2622,13 @@ int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, +@@ -2579,10 +2625,13 @@ int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev); int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, @@ -61399,10 +65319,38 @@ index 1f57ecd6e1..4f3a216ed4 100644 int mlx5_flow_actions_validate(struct rte_eth_dev *dev, const struct rte_flow_actions_template_attr *attr, const struct rte_flow_action actions[], +@@ -2598,4 +2647,7 @@ int mlx5_flow_item_field_width(struct rte_eth_dev *dev, + enum rte_flow_field_id field, int inherit, + const struct rte_flow_attr *attr, + struct rte_flow_error *error); ++ ++#define MLX5_REPR_STC_MEMORY_LOG 11 ++ + #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c -index 29bd7ce9e8..8441be3dea 100644 +index 29bd7ce9e8..805144e27e 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_aso.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_aso.c +@@ -489,7 +489,7 @@ mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe) + int i; + + DRV_LOG(ERR, "Error cqe:"); +- for (i = 0; i < 16; i += 4) ++ for (i = 0; i < (int)sizeof(struct mlx5_error_cqe) / 4; i += 4) + DRV_LOG(ERR, "%08X %08X %08X %08X", cqe[i], cqe[i + 1], + cqe[i + 2], cqe[i + 3]); + DRV_LOG(ERR, "\nError wqe:"); +@@ -509,8 +509,8 @@ mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq) + { + struct mlx5_aso_cq *cq = &sq->cq; + uint32_t idx = cq->cq_ci & ((1 << cq->log_desc_n) - 1); +- volatile struct mlx5_err_cqe *cqe = +- (volatile struct mlx5_err_cqe *)&cq->cq_obj.cqes[idx]; ++ volatile struct mlx5_error_cqe *cqe = ++ (volatile struct mlx5_error_cqe *)&cq->cq_obj.cqes[idx]; + + cq->errors++; + idx = rte_be_to_cpu_16(cqe->wqe_counter) & (1u << sq->log_desc_n); @@ -932,7 +932,8 @@ mlx5_aso_mtr_completion_handle(struct mlx5_aso_sq *sq, bool need_lock) rte_spinlock_lock(&sq->sqsl); max = (uint16_t)(sq->head - sq->tail); @@ -61414,7 +65362,7 @@ index 29bd7ce9e8..8441be3dea 100644 } do { diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c -index 62c38b87a1..6521b5b230 100644 +index 62c38b87a1..4e0ed4415f 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_dv.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_dv.c @@ -267,21 +267,41 @@ struct field_modify_info modify_tcp[] = { @@ -61959,7 +65907,16 @@ index 62c38b87a1..6521b5b230 100644 break; case RTE_FLOW_ITEM_TYPE_GTP: ret = flow_dv_validate_item_gtp(dev, items, item_flags, -@@ -7486,6 +7554,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7435,6 +7503,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + tunnel != 0, error); + if (ret < 0) + return ret; ++ /* Reset for next proto, it is unknown. */ ++ next_protocol = 0xff; + break; + case RTE_FLOW_ITEM_TYPE_METER_COLOR: + ret = flow_dv_validate_item_meter_color(dev, items, +@@ -7486,6 +7556,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, error); if (ret) return ret; @@ -61974,7 +65931,7 @@ index 62c38b87a1..6521b5b230 100644 action_flags |= MLX5_FLOW_ACTION_PORT_ID; ++actions_n; break; -@@ -7562,7 +7638,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7562,7 +7640,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, rw_act_num += MLX5_ACT_NUM_SET_TAG; break; case RTE_FLOW_ACTION_TYPE_DROP: @@ -61983,7 +65940,7 @@ index 62c38b87a1..6521b5b230 100644 attr, error); if (ret < 0) return ret; -@@ -7985,11 +8061,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -7985,11 +8063,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_SAMPLE: ret = flow_dv_validate_action_sample(&action_flags, @@ -61997,7 +65954,7 @@ index 62c38b87a1..6521b5b230 100644 is_root, error); if (ret < 0) -@@ -8301,6 +8379,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, +@@ -8301,6 +8381,29 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "sample before ASO action is not supported"); @@ -62027,7 +65984,42 @@ index 62c38b87a1..6521b5b230 100644 } /* * Validation the NIC Egress flow on representor, except implicit -@@ -9223,12 +9324,10 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, +@@ -9005,22 +9108,23 @@ flow_dv_translate_item_gre(void *key, const struct rte_flow_item *item, + } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v; + uint16_t protocol_m, protocol_v; + +- if (key_type & MLX5_SET_MATCHER_M) ++ if (key_type & MLX5_SET_MATCHER_M) { + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, 0xff); +- else ++ if (!gre_m) ++ gre_m = &rte_flow_item_gre_mask; ++ gre_v = gre_m; ++ } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + IPPROTO_GRE); +- if (!gre_v) { +- gre_v = &empty_gre; +- gre_m = &empty_gre; +- } else { +- if (!gre_m) ++ if (!gre_v) { ++ gre_v = &empty_gre; ++ gre_m = &empty_gre; ++ } else if (!gre_m) { + gre_m = &rte_flow_item_gre_mask; ++ } ++ if (key_type == MLX5_SET_MATCHER_HS_V) ++ gre_m = gre_v; + } +- if (key_type & MLX5_SET_MATCHER_M) +- gre_v = gre_m; +- else if (key_type == MLX5_SET_MATCHER_HS_V) +- gre_m = gre_v; + gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver); + gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver); + MLX5_SET(fte_match_set_misc, misc_v, gre_c_present, +@@ -9223,12 +9327,10 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, { const struct rte_flow_item_vxlan *vxlan_m; const struct rte_flow_item_vxlan *vxlan_v; @@ -62040,7 +66032,7 @@ index 62c38b87a1..6521b5b230 100644 char *vni_v; uint16_t dport; int size; -@@ -9280,24 +9379,11 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, +@@ -9280,24 +9382,11 @@ flow_dv_translate_item_vxlan(struct rte_eth_dev *dev, vni_v[i] = vxlan_m->vni[i] & vxlan_v->vni[i]; return; } @@ -62067,7 +66059,7 @@ index 62c38b87a1..6521b5b230 100644 } /** -@@ -9559,14 +9645,13 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, +@@ -9559,14 +9648,13 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, { const struct rte_flow_item_geneve_opt *geneve_opt_m; const struct rte_flow_item_geneve_opt *geneve_opt_v; @@ -62085,7 +66077,7 @@ index 62c38b87a1..6521b5b230 100644 return -1; MLX5_ITEM_UPDATE(item, key_type, geneve_opt_v, geneve_opt_m, &rte_flow_item_geneve_opt_mask); -@@ -9579,36 +9664,15 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, +@@ -9579,36 +9667,15 @@ flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *key, return ret; } } @@ -62130,7 +66122,7 @@ index 62c38b87a1..6521b5b230 100644 } return ret; } -@@ -13117,6 +13181,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev, +@@ -13117,6 +13184,13 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev, return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Connection is not supported"); @@ -62144,7 +66136,7 @@ index 62c38b87a1..6521b5b230 100644 idx = flow_dv_aso_ct_alloc(dev, error); if (!idx) return rte_flow_error_set(error, rte_errno, -@@ -13166,6 +13237,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev, +@@ -13166,6 +13240,8 @@ flow_dv_translate_items(struct rte_eth_dev *dev, int tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL); int item_type = items->type; uint64_t last_item = wks->last_item; @@ -62153,7 +66145,7 @@ index 62c38b87a1..6521b5b230 100644 int ret; switch (item_type) { -@@ -13209,94 +13282,47 @@ flow_dv_translate_items(struct rte_eth_dev *dev, +@@ -13209,94 +13285,47 @@ flow_dv_translate_items(struct rte_eth_dev *dev, MLX5_FLOW_LAYER_OUTER_VLAN); break; case RTE_FLOW_ITEM_TYPE_IPV4: @@ -62271,7 +66263,7 @@ index 62c38b87a1..6521b5b230 100644 break; case RTE_FLOW_ITEM_TYPE_TCP: flow_dv_translate_item_tcp(key, items, tunnel, key_type); -@@ -13717,7 +13743,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, +@@ -13717,7 +13746,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev, * is the suffix flow. */ dev_flow->handle->layers |= wks.item_flags; @@ -62285,7 +66277,7 @@ index 62c38b87a1..6521b5b230 100644 return 0; } -@@ -14820,7 +14851,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, +@@ -14820,7 +14854,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } dv->actions[n++] = priv->sh->default_miss_action; } @@ -62294,7 +66286,7 @@ index 62c38b87a1..6521b5b230 100644 __flow_dv_adjust_buf_size(&dv->value.size, misc_mask); err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object, (void *)&dv->value, n, -@@ -14853,7 +14884,8 @@ error: +@@ -14853,7 +14887,8 @@ error: SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, handle_idx, dh, next) { /* hrxq is union, don't clear it if the flag is not set. */ @@ -62304,7 +66296,7 @@ index 62c38b87a1..6521b5b230 100644 mlx5_hrxq_release(dev, dh->rix_hrxq); dh->rix_hrxq = 0; } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) { -@@ -15317,9 +15349,9 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +@@ -15317,9 +15352,9 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) flow_dv_aso_ct_release(dev, flow->ct, NULL); else if (flow->age) flow_dv_aso_age_release(dev, flow->age); @@ -62316,7 +66308,7 @@ index 62c38b87a1..6521b5b230 100644 } while (flow->dev_handles) { uint32_t tmp_idx = flow->dev_handles; -@@ -15781,6 +15813,8 @@ flow_dv_action_create(struct rte_eth_dev *dev, +@@ -15781,6 +15816,8 @@ flow_dv_action_create(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_CONNTRACK: ret = flow_dv_translate_create_conntrack(dev, action->conf, err); @@ -62325,7 +66317,7 @@ index 62c38b87a1..6521b5b230 100644 idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret); break; default: -@@ -17020,7 +17054,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) +@@ -17020,7 +17057,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev) static int __flow_dv_create_policy_flow(struct rte_eth_dev *dev, uint32_t color_reg_c_idx, @@ -62334,7 +66326,7 @@ index 62c38b87a1..6521b5b230 100644 int actions_n, void *actions, bool match_src_port, const struct rte_flow_item *item, void **rule, const struct rte_flow_attr *attr) -@@ -17050,9 +17084,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, +@@ -17050,9 +17087,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev, } flow_dv_match_meta_reg(value.buf, (enum modify_reg)color_reg_c_idx, rte_col_2_mlx5_col(color), UINT32_MAX); @@ -62346,7 +66338,7 @@ index 62c38b87a1..6521b5b230 100644 actions_n, actions, rule); if (ret) { DRV_LOG(ERR, "Failed to create meter policy%d flow.", color); -@@ -17106,9 +17140,8 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, +@@ -17106,9 +17143,8 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, } } tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl); @@ -62358,7 +66350,7 @@ index 62c38b87a1..6521b5b230 100644 matcher.priority = priority; matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, matcher.mask.size); -@@ -17142,7 +17175,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, +@@ -17142,7 +17178,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev, static int __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, struct mlx5_flow_meter_sub_policy *sub_policy, @@ -62367,7 +66359,7 @@ index 62c38b87a1..6521b5b230 100644 struct mlx5_meter_policy_acts acts[RTE_COLORS]) { struct mlx5_priv *priv = dev->data->dev_private; -@@ -17157,9 +17190,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, +@@ -17157,9 +17193,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, .reserved = 0, }; int i; @@ -62378,7 +66370,7 @@ index 62c38b87a1..6521b5b230 100644 struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL}; if (ret < 0) -@@ -17192,13 +17225,12 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, +@@ -17192,13 +17228,12 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, TAILQ_INSERT_TAIL(&sub_policy->color_rules[i], color_rule, next_port); color_rule->src_port = priv->representor_id; @@ -62396,7 +66388,7 @@ index 62c38b87a1..6521b5b230 100644 &color_rule->matcher, &flow_err)) { DRV_LOG(ERR, "Failed to create color%u matcher.", i); goto err_exit; -@@ -17206,9 +17238,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, +@@ -17206,9 +17241,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev, /* Create flow, matching color. */ if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)i, @@ -62408,7 +66400,7 @@ index 62c38b87a1..6521b5b230 100644 &attr)) { DRV_LOG(ERR, "Failed to create color%u rule.", i); goto err_exit; -@@ -17256,7 +17288,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, +@@ -17256,7 +17291,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, uint8_t egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; uint8_t transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0; bool mtr_first = egress || (transfer && priv->representor_id != UINT16_MAX); @@ -62417,7 +66409,7 @@ index 62c38b87a1..6521b5b230 100644 int i; /* If RSS or Queue, no previous actions / rules is created. */ -@@ -17327,7 +17359,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, +@@ -17327,7 +17362,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, acts[i].dv_actions[acts[i].actions_n] = port_action->action; acts[i].actions_n++; @@ -62426,7 +66418,7 @@ index 62c38b87a1..6521b5b230 100644 break; case MLX5_FLOW_FATE_DROP: case MLX5_FLOW_FATE_JUMP: -@@ -17379,7 +17411,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, +@@ -17379,7 +17414,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, acts[i].dv_actions[acts[i].actions_n++] = tbl_data->jump.action; if (mtr_policy->act_cnt[i].modify_hdr) @@ -62435,7 +66427,7 @@ index 62c38b87a1..6521b5b230 100644 break; default: /*Queue action do nothing*/ -@@ -17393,9 +17425,9 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, +@@ -17393,9 +17428,9 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev, "Failed to create policy rules per domain."); goto err_exit; } @@ -62448,7 +66440,7 @@ index 62c38b87a1..6521b5b230 100644 } return 0; err_exit: -@@ -17457,6 +17489,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) +@@ -17457,6 +17492,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) uint8_t egress, transfer; struct rte_flow_error error; struct mlx5_meter_policy_acts acts[RTE_COLORS]; @@ -62456,7 +66448,7 @@ index 62c38b87a1..6521b5b230 100644 int ret; egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0; -@@ -17532,7 +17565,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) +@@ -17532,7 +17568,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain) /* Create default policy rules. */ ret = __flow_dv_create_domain_policy_rules(dev, &def_policy->sub_policy, @@ -62465,7 +66457,7 @@ index 62c38b87a1..6521b5b230 100644 if (ret) { DRV_LOG(ERR, "Failed to create default policy rules."); goto def_policy_error; -@@ -17674,7 +17707,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, +@@ -17674,7 +17710,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, actions[i++] = priv->sh->dr_drop_action; flow_dv_match_meta_reg_all(matcher_para.buf, value.buf, (enum modify_reg)mtr_id_reg_c, 0, 0); @@ -62474,7 +66466,7 @@ index 62c38b87a1..6521b5b230 100644 __flow_dv_adjust_buf_size(&value.size, misc_mask); ret = mlx5_flow_os_create_flow (mtrmng->def_matcher[domain]->matcher_object, -@@ -17719,7 +17752,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, +@@ -17719,7 +17755,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev, fm->drop_cnt, NULL); actions[i++] = cnt->action; actions[i++] = priv->sh->dr_drop_action; @@ -62483,7 +66475,7 @@ index 62c38b87a1..6521b5b230 100644 __flow_dv_adjust_buf_size(&value.size, misc_mask); ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object, (void *)&value, i, actions, -@@ -18091,7 +18124,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -18091,7 +18127,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, struct { struct mlx5_flow_meter_policy *fm_policy; struct mlx5_flow_meter_info *next_fm; @@ -62492,7 +66484,7 @@ index 62c38b87a1..6521b5b230 100644 } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} }; uint32_t fm_cnt = 0; uint32_t i, j; -@@ -18125,14 +18158,22 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -18125,14 +18161,22 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, mtr_policy = fm_info[i].fm_policy; rte_spinlock_lock(&mtr_policy->sl); sub_policy = mtr_policy->sub_policys[domain][0]; @@ -62520,7 +66512,7 @@ index 62c38b87a1..6521b5b230 100644 color_rule = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_sub_policy_color_rule), 0, SOCKET_ID_ANY); -@@ -18144,9 +18185,8 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -18144,9 +18188,8 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, goto err_exit; } color_rule->src_port = src_port; @@ -62531,7 +66523,7 @@ index 62c38b87a1..6521b5b230 100644 next_fm = fm_info[i].next_fm; if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) { mlx5_free(color_rule); -@@ -18173,7 +18213,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -18173,7 +18216,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, } acts.dv_actions[act_n++] = tbl_data->jump.action; acts.actions_n = act_n; @@ -62540,7 +66532,7 @@ index 62c38b87a1..6521b5b230 100644 port_action = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], mtr_policy->act_cnt[j].rix_port_id_action); -@@ -18186,6 +18226,9 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -18186,6 +18229,9 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, acts.dv_actions[act_n++] = modify_hdr->action; acts.dv_actions[act_n++] = port_action->action; acts.actions_n = act_n; @@ -62550,7 +66542,7 @@ index 62c38b87a1..6521b5b230 100644 } fm_info[i].tag_rule[j] = color_rule; TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port); -@@ -18199,7 +18242,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, +@@ -18199,7 +18245,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev, goto err_exit; } if (__flow_dv_create_policy_flow(dev, color_reg_c_idx, (enum rte_color)j, @@ -62559,7 +66551,7 @@ index 62c38b87a1..6521b5b230 100644 acts.actions_n, acts.dv_actions, true, item, &color_rule->rule, &attr)) { rte_spinlock_unlock(&mtr_policy->sl); -@@ -18217,7 +18260,7 @@ err_exit: +@@ -18217,7 +18263,7 @@ err_exit: mtr_policy = fm_info[i].fm_policy; rte_spinlock_lock(&mtr_policy->sl); sub_policy = mtr_policy->sub_policys[domain][0]; @@ -62568,7 +66560,7 @@ index 62c38b87a1..6521b5b230 100644 color_rule = fm_info[i].tag_rule[j]; if (!color_rule) continue; -@@ -18547,8 +18590,7 @@ flow_dv_get_aged_flows(struct rte_eth_dev *dev, +@@ -18547,8 +18593,7 @@ flow_dv_get_aged_flows(struct rte_eth_dev *dev, LIST_FOREACH(act, &age_info->aged_aso, next) { nb_flows++; if (nb_contexts) { @@ -62578,7 +66570,7 @@ index 62c38b87a1..6521b5b230 100644 if (!(--nb_contexts)) break; } -@@ -18909,7 +18951,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, +@@ -18909,7 +18954,7 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, break; case RTE_FLOW_ACTION_TYPE_DROP: ret = mlx5_flow_validate_action_drop @@ -62587,7 +66579,7 @@ index 62c38b87a1..6521b5b230 100644 if (ret < 0) return -rte_mtr_error_set(error, ENOTSUP, -@@ -19104,11 +19146,13 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, +@@ -19104,11 +19149,13 @@ flow_dv_validate_mtr_policy_acts(struct rte_eth_dev *dev, } } if (next_mtr && *policy_mode == MLX5_MTR_POLICY_MODE_ALL) { @@ -62604,7 +66596,7 @@ index 62c38b87a1..6521b5b230 100644 } /* If both colors have RSS, the attributes should be the same. */ if (flow_dv_mtr_policy_rss_compare(rss_color[RTE_COLOR_GREEN], -@@ -19243,7 +19287,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, +@@ -19243,7 +19290,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev, break; } /* Try to apply the flow to HW. */ @@ -62613,8 +66605,317 @@ index 62c38b87a1..6521b5b230 100644 __flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask); err = mlx5_flow_os_create_flow (flow.handle->dvh.matcher->matcher_object, +diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_flex.c b/dpdk/drivers/net/mlx5/mlx5_flow_flex.c +index fb08910ddb..32ab45b7e0 100644 +--- a/dpdk/drivers/net/mlx5/mlx5_flow_flex.c ++++ b/dpdk/drivers/net/mlx5/mlx5_flow_flex.c +@@ -118,28 +118,32 @@ mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item, + uint32_t pos, uint32_t width, uint32_t shift) + { + const uint8_t *ptr = item->pattern + pos / CHAR_BIT; +- uint32_t val, vbits; ++ uint32_t val, vbits, skip = pos % CHAR_BIT; + + /* Proceed the bitfield start byte. */ + MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width); + MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT); + if (item->length <= pos / CHAR_BIT) + return 0; +- val = *ptr++ >> (pos % CHAR_BIT); ++ /* Bits are enumerated in byte in network order: 01234567 */ ++ val = *ptr++; + vbits = CHAR_BIT - pos % CHAR_BIT; +- pos = (pos + vbits) / CHAR_BIT; ++ pos = RTE_ALIGN_CEIL(pos, CHAR_BIT) / CHAR_BIT; + vbits = RTE_MIN(vbits, width); +- val &= RTE_BIT32(vbits) - 1; ++ /* Load bytes to cover the field width, checking pattern boundary */ + while (vbits < width && pos < item->length) { + uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT); + uint32_t tmp = *ptr++; + +- pos++; +- tmp &= RTE_BIT32(part) - 1; +- val |= tmp << vbits; ++ val |= tmp << RTE_ALIGN_CEIL(vbits, CHAR_BIT); + vbits += part; ++ pos++; + } +- return rte_bswap32(val <<= shift); ++ val = rte_cpu_to_be_32(val); ++ val <<= skip; ++ val >>= shift; ++ val &= (RTE_BIT64(width) - 1) << (sizeof(uint32_t) * CHAR_BIT - shift - width); ++ return val; + } + + #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \ +@@ -235,19 +239,21 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, + mask = item->mask; + tp = (struct mlx5_flex_item *)spec->handle; + MLX5_ASSERT(mlx5_flex_index(dev->data->dev_private, tp) >= 0); +- for (i = 0; i < tp->mapnum; i++) { ++ for (i = 0; i < tp->mapnum && pos < (spec->length * CHAR_BIT); i++) { + struct mlx5_flex_pattern_field *map = tp->map + i; + uint32_t id = map->reg_id; +- uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift; +- uint32_t val, msk; ++ uint32_t val, msk, def; + + /* Skip placeholders for DUMMY fields. */ + if (id == MLX5_INVALID_SAMPLE_REG_ID) { + pos += map->width; + continue; + } ++ def = (uint32_t)(RTE_BIT64(map->width) - 1); ++ def <<= (sizeof(uint32_t) * CHAR_BIT - map->shift - map->width); + val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift); +- msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift); ++ msk = pos < (mask->length * CHAR_BIT) ? ++ mlx5_flex_get_bitfield(mask, pos, map->width, map->shift) : def; + MLX5_ASSERT(map->width); + MLX5_ASSERT(id < tp->devx_fp->num_samples); + if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) { +@@ -258,7 +264,7 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, + id += num_samples; + } + mlx5_flex_set_match_sample(misc4_m, misc4_v, +- def, msk & def, val & msk & def, ++ def, msk, val & msk, + tp->devx_fp->sample_ids[id], id); + pos += map->width; + } +@@ -338,12 +344,14 @@ mlx5_flex_release_index(struct rte_eth_dev *dev, + * + * shift mask + * ------- --------------- +- * 0 b111100 0x3C +- * 1 b111110 0x3E +- * 2 b111111 0x3F +- * 3 b011111 0x1F +- * 4 b001111 0x0F +- * 5 b000111 0x07 ++ * 0 b11111100 0x3C ++ * 1 b01111110 0x3E ++ * 2 b00111111 0x3F ++ * 3 b00011111 0x1F ++ * 4 b00001111 0x0F ++ * 5 b00000111 0x07 ++ * 6 b00000011 0x03 ++ * 7 b00000001 0x01 + */ + static uint8_t + mlx5_flex_hdr_len_mask(uint8_t shift, +@@ -353,8 +361,7 @@ mlx5_flex_hdr_len_mask(uint8_t shift, + int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD; + + base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr); +- return diff == 0 ? base_mask : +- diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff; ++ return diff < 0 ? base_mask << -diff : base_mask >> diff; + } + + static int +@@ -365,7 +372,6 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, + { + const struct rte_flow_item_flex_field *field = &conf->next_header; + struct mlx5_devx_graph_node_attr *node = &devx->devx_conf; +- uint32_t len_width, mask; + + if (field->field_base % CHAR_BIT) + return rte_flow_error_set +@@ -393,49 +399,90 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, + "negative header length field base (FIXED)"); + node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; + break; +- case FIELD_MODE_OFFSET: ++ case FIELD_MODE_OFFSET: { ++ uint32_t msb, lsb; ++ int32_t shift = field->offset_shift; ++ uint32_t offset = field->offset_base; ++ uint32_t mask = field->offset_mask; ++ uint32_t wmax = attr->header_length_mask_width + ++ MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD; ++ + if (!(attr->header_length_mode & + RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD))) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "unsupported header length field mode (OFFSET)"); +- node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD; +- if (field->offset_mask == 0 || +- !rte_is_power_of_2(field->offset_mask + 1)) ++ if (!field->field_size) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "field size is a must for offset mode"); ++ if ((offset ^ (field->field_size + offset)) >> 5) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "field crosses the 32-bit word boundary"); ++ /* Hardware counts in dwords, all shifts done by offset within mask */ ++ if (shift < 0 || (uint32_t)shift >= wmax) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "header length field shift exceeds limits (OFFSET)"); ++ if (!mask) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "invalid length field offset mask (OFFSET)"); +- len_width = rte_fls_u32(field->offset_mask); +- if (len_width > attr->header_length_mask_width) ++ "zero length field offset mask (OFFSET)"); ++ msb = rte_fls_u32(mask) - 1; ++ lsb = rte_bsf32(mask); ++ if (!rte_is_power_of_2((mask >> lsb) + 1)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "length field offset mask too wide (OFFSET)"); +- mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr); +- if (mask < field->offset_mask) ++ "length field offset mask not contiguous (OFFSET)"); ++ if (msb >= field->field_size) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "length field shift too big (OFFSET)"); +- node->header_length_field_mask = RTE_MIN(mask, +- field->offset_mask); ++ "length field offset mask exceeds field size (OFFSET)"); ++ if (msb >= wmax) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "length field offset mask exceeds supported width (OFFSET)"); ++ if (mask & ~mlx5_flex_hdr_len_mask(shift, attr)) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "mask and shift combination not supported (OFFSET)"); ++ msb++; ++ offset += field->field_size - msb; ++ if (msb < attr->header_length_mask_width) { ++ if (attr->header_length_mask_width - msb > offset) ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, ++ "field size plus offset_base is too small"); ++ offset += msb; ++ /* ++ * Here we can move to preceding dword. Hardware does ++ * cyclic left shift so we should avoid this and stay ++ * at current dword offset. ++ */ ++ offset = (offset & ~0x1Fu) | ++ ((offset - attr->header_length_mask_width) & 0x1F); ++ } ++ node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD; ++ node->header_length_field_mask = mask; ++ node->header_length_field_shift = shift; ++ node->header_length_field_offset = offset; + break; ++ } + case FIELD_MODE_BITMASK: + if (!(attr->header_length_mode & + RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK))) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "unsupported header length field mode (BITMASK)"); +- if (attr->header_length_mask_width < field->field_size) ++ if (field->offset_shift > 15 || field->offset_shift < 0) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "header length field width exceeds limit"); ++ "header length field shift exceeds limit (BITMASK)"); + node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK; +- mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr); +- if (mask < field->offset_mask) +- return rte_flow_error_set +- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "length field shift too big (BITMASK)"); +- node->header_length_field_mask = RTE_MIN(mask, +- field->offset_mask); ++ node->header_length_field_mask = field->offset_mask; ++ node->header_length_field_shift = field->offset_shift; ++ node->header_length_field_offset = field->offset_base; + break; + default: + return rte_flow_error_set +@@ -448,15 +495,6 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "header length field base exceeds limit"); + node->header_length_base_value = field->field_base / CHAR_BIT; +- if (field->field_mode == FIELD_MODE_OFFSET || +- field->field_mode == FIELD_MODE_BITMASK) { +- if (field->offset_shift > 15 || field->offset_shift < 0) +- return rte_flow_error_set +- (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, +- "header length field shift exceeds limit"); +- node->header_length_field_shift = field->offset_shift; +- node->header_length_field_offset = field->offset_base; +- } + return 0; + } + +@@ -1006,6 +1044,8 @@ mlx5_flex_arc_type(enum rte_flow_item_type type, int in) + return MLX5_GRAPH_ARC_NODE_GENEVE; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + return MLX5_GRAPH_ARC_NODE_VXLAN_GPE; ++ case RTE_FLOW_ITEM_TYPE_ESP: ++ return MLX5_GRAPH_ARC_NODE_IPSEC_ESP; + default: + return -EINVAL; + } +@@ -1043,6 +1083,38 @@ mlx5_flex_arc_in_udp(const struct rte_flow_item *item, + return rte_be_to_cpu_16(spec->hdr.dst_port); + } + ++static int ++mlx5_flex_arc_in_ipv4(const struct rte_flow_item *item, ++ struct rte_flow_error *error) ++{ ++ const struct rte_flow_item_ipv4 *spec = item->spec; ++ const struct rte_flow_item_ipv4 *mask = item->mask; ++ struct rte_flow_item_ipv4 ip = { .hdr.next_proto_id = 0xff }; ++ ++ if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv4))) { ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, ++ "invalid ipv4 item mask, full mask is desired"); ++ } ++ return spec->hdr.next_proto_id; ++} ++ ++static int ++mlx5_flex_arc_in_ipv6(const struct rte_flow_item *item, ++ struct rte_flow_error *error) ++{ ++ const struct rte_flow_item_ipv6 *spec = item->spec; ++ const struct rte_flow_item_ipv6 *mask = item->mask; ++ struct rte_flow_item_ipv6 ip = { .hdr.proto = 0xff }; ++ ++ if (memcmp(mask, &ip, sizeof(struct rte_flow_item_ipv6))) { ++ return rte_flow_error_set ++ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, ++ "invalid ipv6 item mask, full mask is desired"); ++ } ++ return spec->hdr.proto; ++} ++ + static int + mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr, + const struct rte_flow_item_flex_conf *conf, +@@ -1089,6 +1161,12 @@ mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr, + case RTE_FLOW_ITEM_TYPE_UDP: + ret = mlx5_flex_arc_in_udp(rte_item, error); + break; ++ case RTE_FLOW_ITEM_TYPE_IPV4: ++ ret = mlx5_flex_arc_in_ipv4(rte_item, error); ++ break; ++ case RTE_FLOW_ITEM_TYPE_IPV6: ++ ret = mlx5_flex_arc_in_ipv6(rte_item, error); ++ break; + default: + MLX5_ASSERT(false); + return rte_flow_error_set diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c -index a3c8056515..aa315c054d 100644 +index a3c8056515..047af4a0c9 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_hw.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_hw.c @@ -56,6 +56,20 @@ @@ -62869,21 +67170,25 @@ index a3c8056515..aa315c054d 100644 job->flow->age_idx = age_idx; if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) /* -@@ -2309,10 +2375,10 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, +@@ -2309,10 +2375,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, break; /* Fall-through. */ case RTE_FLOW_ACTION_TYPE_COUNT: - ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, &queue, - &cnt_id, age_idx); +- if (ret != 0) +- return ret; + cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); + ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx); - if (ret != 0) -- return ret; ++ if (ret != 0) { ++ rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ACTION, ++ action, "Failed to allocate flow counter"); + goto error; ++ } ret = mlx5_hws_cnt_pool_get_action_offset (priv->hws_cpool, cnt_id, -@@ -2320,7 +2386,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, +@@ -2320,7 +2389,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, &rule_acts[act_data->action_dst].counter.offset ); if (ret != 0) @@ -62892,7 +67197,7 @@ index a3c8056515..aa315c054d 100644 job->flow->cnt_id = cnt_id; break; case MLX5_RTE_FLOW_ACTION_TYPE_COUNT: -@@ -2331,7 +2397,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, +@@ -2331,7 +2400,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, &rule_acts[act_data->action_dst].counter.offset ); if (ret != 0) @@ -62901,7 +67206,7 @@ index a3c8056515..aa315c054d 100644 job->flow->cnt_id = act_data->shared_counter.id; break; case RTE_FLOW_ACTION_TYPE_CONNTRACK: -@@ -2339,7 +2405,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, +@@ -2339,7 +2408,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, ((uint32_t)(uintptr_t)action->conf); if (flow_hw_ct_compile(dev, queue, ct_idx, &rule_acts[act_data->action_dst])) @@ -62910,7 +67215,7 @@ index a3c8056515..aa315c054d 100644 break; case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK: mtr_id = act_data->shared_meter.id & -@@ -2347,7 +2413,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, +@@ -2347,7 +2416,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, /* Find ASO object. */ aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id); if (!aso_mtr) @@ -62919,7 +67224,7 @@ index a3c8056515..aa315c054d 100644 rule_acts[act_data->action_dst].action = pool->action; rule_acts[act_data->action_dst].aso_meter.offset = -@@ -2365,7 +2431,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, +@@ -2365,7 +2434,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, act_data->action_dst, action, rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE); if (ret != 0) @@ -62928,7 +67233,7 @@ index a3c8056515..aa315c054d 100644 break; default: break; -@@ -2398,6 +2464,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, +@@ -2398,6 +2467,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev, if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) job->flow->cnt_id = hw_acts->cnt_id; return 0; @@ -62940,8 +67245,11 @@ index a3c8056515..aa315c054d 100644 } static const struct rte_flow_item * -@@ -2502,10 +2573,6 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, +@@ -2500,12 +2574,9 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, + struct mlx5_hw_q_job *job; + const struct rte_flow_item *rule_items; uint32_t flow_idx; ++ struct rte_flow_error sub_error = { 0 }; int ret; - if (unlikely((!dev->data->dev_started))) { @@ -62951,19 +67259,35 @@ index a3c8056515..aa315c054d 100644 if (unlikely(!priv->hw_q[queue].job_idx)) { rte_errno = ENOMEM; goto error; -@@ -2544,10 +2611,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, +@@ -2544,10 +2615,8 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev, if (flow_hw_actions_construct(dev, job, &table->ats[action_template_index], pattern_template_index, actions, - rule_acts, queue, error)) { - rte_errno = EINVAL; -+ rule_acts, queue, error)) ++ rule_acts, queue, &sub_error)) goto free; - } rule_items = flow_hw_get_rule_items(dev, table, items, pattern_template_index, job); if (!rule_items) -@@ -2646,6 +2711,8 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, +@@ -2563,9 +2632,12 @@ free: + mlx5_ipool_free(table->flow, flow_idx); + priv->hw_q[queue].job_idx++; + error: +- rte_flow_error_set(error, rte_errno, +- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, +- "fail to create rte flow"); ++ if (sub_error.cause != RTE_FLOW_ERROR_TYPE_NONE && error != NULL) ++ *error = sub_error; ++ else ++ rte_flow_error_set(error, rte_errno, ++ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, ++ "fail to create rte flow"); + return NULL; + } + +@@ -2646,6 +2718,8 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow, struct rte_flow_error *error) { @@ -62972,7 +67296,7 @@ index a3c8056515..aa315c054d 100644 if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) { if (flow->age_idx && !mlx5_hws_age_is_indirect(flow->age_idx)) { /* Remove this AGE parameter from indirect counter. */ -@@ -2656,8 +2723,9 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, +@@ -2656,8 +2730,9 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, } return; } @@ -62983,7 +67307,7 @@ index a3c8056515..aa315c054d 100644 flow->cnt_id = 0; if (flow->age_idx) { if (mlx5_hws_age_is_indirect(flow->age_idx)) { -@@ -3252,14 +3320,18 @@ flow_hw_translate_group(struct rte_eth_dev *dev, +@@ -3252,14 +3327,18 @@ flow_hw_translate_group(struct rte_eth_dev *dev, "group index not supported"); *table_group = group + 1; } else if (config->dv_esw_en && @@ -63007,7 +67331,7 @@ index a3c8056515..aa315c054d 100644 */ if (group > MLX5_HW_MAX_EGRESS_GROUP) return rte_flow_error_set(error, EINVAL, -@@ -3349,7 +3421,7 @@ flow_hw_table_destroy(struct rte_eth_dev *dev, +@@ -3349,7 +3428,7 @@ flow_hw_table_destroy(struct rte_eth_dev *dev, return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -63016,7 +67340,7 @@ index a3c8056515..aa315c054d 100644 } LIST_REMOVE(table, next); for (i = 0; i < table->nb_item_templates; i++) -@@ -3863,6 +3935,34 @@ flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev, +@@ -3863,6 +3942,34 @@ flow_hw_validate_action_push_vlan(struct rte_eth_dev *dev, #undef X_FIELD } @@ -63051,7 +67375,7 @@ index a3c8056515..aa315c054d 100644 static int mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, const struct rte_flow_actions_template_attr *attr, -@@ -3879,6 +3979,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, +@@ -3879,6 +3986,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, bool actions_end = false; int ret; @@ -63060,7 +67384,7 @@ index a3c8056515..aa315c054d 100644 /* FDB actions are only valid to proxy port. */ if (attr->transfer && (!priv->sh->config.dv_esw_en || !priv->master)) return rte_flow_error_set(error, EINVAL, -@@ -3896,7 +3998,7 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, +@@ -3896,7 +4005,7 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ACTION, action, "mask type does not match action type"); @@ -63069,7 +67393,7 @@ index a3c8056515..aa315c054d 100644 case RTE_FLOW_ACTION_TYPE_VOID: break; case RTE_FLOW_ACTION_TYPE_INDIRECT: -@@ -4022,6 +4124,13 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, +@@ -4022,6 +4131,13 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_END: actions_end = true; break; @@ -63083,7 +67407,7 @@ index a3c8056515..aa315c054d 100644 default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, -@@ -4041,8 +4150,7 @@ flow_hw_actions_validate(struct rte_eth_dev *dev, +@@ -4041,8 +4157,7 @@ flow_hw_actions_validate(struct rte_eth_dev *dev, const struct rte_flow_action masks[], struct rte_flow_error *error) { @@ -63093,7 +67417,7 @@ index a3c8056515..aa315c054d 100644 } -@@ -4143,7 +4251,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) +@@ -4143,7 +4258,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) if (curr_off >= MLX5_HW_MAX_ACTS) goto err_actions_num; @@ -63102,7 +67426,7 @@ index a3c8056515..aa315c054d 100644 case RTE_FLOW_ACTION_TYPE_VOID: break; case RTE_FLOW_ACTION_TYPE_INDIRECT: -@@ -4221,6 +4329,10 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) +@@ -4221,6 +4336,10 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at) } at->actions_off[i] = cnt_off; break; @@ -63113,7 +67437,7 @@ index a3c8056515..aa315c054d 100644 default: type = mlx5_hw_dr_action_types[at->actions[i].type]; at->actions_off[i] = curr_off; -@@ -4262,7 +4374,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, +@@ -4262,7 +4381,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, rm[set_vlan_vid_ix].conf)->vlan_vid != 0); const struct rte_flow_action_of_set_vlan_vid *conf = ra[set_vlan_vid_ix].conf; @@ -63121,7 +67445,7 @@ index a3c8056515..aa315c054d 100644 int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0, NULL, &error); *spec = (typeof(*spec)) { -@@ -4273,8 +4384,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, +@@ -4273,8 +4391,6 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, }, .src = { .field = RTE_FLOW_FIELD_VALUE, @@ -63130,7 +67454,7 @@ index a3c8056515..aa315c054d 100644 }, .width = width, }; -@@ -4286,11 +4395,15 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, +@@ -4286,11 +4402,15 @@ flow_hw_set_vlan_vid(struct rte_eth_dev *dev, }, .src = { .field = RTE_FLOW_FIELD_VALUE, @@ -63148,7 +67472,7 @@ index a3c8056515..aa315c054d 100644 ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; ra[set_vlan_vid_ix].conf = spec; rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; -@@ -4317,8 +4430,6 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, +@@ -4317,8 +4437,6 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, }, .src = { .field = RTE_FLOW_FIELD_VALUE, @@ -63157,7 +67481,7 @@ index a3c8056515..aa315c054d 100644 }, .width = width, }; -@@ -4327,6 +4438,7 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, +@@ -4327,6 +4445,7 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev, .conf = &conf }; @@ -63165,7 +67489,7 @@ index a3c8056515..aa315c054d 100644 return flow_hw_modify_field_construct(job, act_data, hw_acts, &modify_action); } -@@ -4534,6 +4646,9 @@ error: +@@ -4534,6 +4653,9 @@ error: mlx5dr_action_template_destroy(at->tmpl); mlx5_free(at); } @@ -63175,7 +67499,7 @@ index a3c8056515..aa315c054d 100644 return NULL; } -@@ -4561,7 +4676,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused, +@@ -4561,7 +4683,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev __rte_unused, return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -63184,7 +67508,7 @@ index a3c8056515..aa315c054d 100644 } LIST_REMOVE(template, next); if (template->tmpl) -@@ -4614,9 +4729,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, +@@ -4614,9 +4736,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; @@ -63198,7 +67522,7 @@ index a3c8056515..aa315c054d 100644 if (!attr->ingress && !attr->egress && !attr->transfer) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR, NULL, "at least one of the direction attributes" -@@ -4657,16 +4775,26 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, +@@ -4657,16 +4782,26 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, switch (type) { case RTE_FLOW_ITEM_TYPE_TAG: { @@ -63228,7 +67552,7 @@ index a3c8056515..aa315c054d 100644 break; } case MLX5_RTE_FLOW_ITEM_TYPE_TAG: -@@ -4680,6 +4808,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, +@@ -4680,6 +4815,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Unsupported internal tag index"); @@ -63241,7 +67565,7 @@ index a3c8056515..aa315c054d 100644 break; } case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: -@@ -4790,7 +4924,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev, +@@ -4790,7 +4931,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it; struct rte_flow_item *copied_items = NULL; const struct rte_flow_item *tmpl_items; @@ -63250,7 +67574,7 @@ index a3c8056515..aa315c054d 100644 struct rte_flow_item port = { .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT, .mask = &rte_flow_item_ethdev_mask, -@@ -4897,7 +5031,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused, +@@ -4897,7 +5038,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused, return rte_flow_error_set(error, EBUSY, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -63259,7 +67583,7 @@ index a3c8056515..aa315c054d 100644 } LIST_REMOVE(template, next); claim_zero(mlx5dr_match_template_destroy(template->mt)); -@@ -5271,12 +5405,14 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv) +@@ -5271,12 +5412,14 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv) * * @param dev * Pointer to Ethernet device. @@ -63275,7 +67599,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5295,7 +5431,7 @@ flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev) +@@ -5295,7 +5438,7 @@ flow_hw_create_tx_repr_sq_pattern_tmpl(struct rte_eth_dev *dev) }, }; @@ -63284,7 +67608,7 @@ index a3c8056515..aa315c054d 100644 } static __rte_always_inline uint32_t -@@ -5353,12 +5489,15 @@ flow_hw_update_action_mask(struct rte_flow_action *action, +@@ -5353,12 +5496,15 @@ flow_hw_update_action_mask(struct rte_flow_action *action, * * @param dev * Pointer to Ethernet device. @@ -63301,7 +67625,7 @@ index a3c8056515..aa315c054d 100644 { uint32_t tag_mask = flow_hw_tx_tag_regc_mask(dev); uint32_t tag_value = flow_hw_tx_tag_regc_value(dev); -@@ -5444,7 +5583,7 @@ flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev) +@@ -5444,7 +5590,7 @@ flow_hw_create_tx_repr_tag_jump_acts_tmpl(struct rte_eth_dev *dev) NULL, NULL); idx++; MLX5_ASSERT(idx <= RTE_DIM(actions_v)); @@ -63310,7 +67634,7 @@ index a3c8056515..aa315c054d 100644 } static void -@@ -5473,12 +5612,14 @@ flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev) +@@ -5473,12 +5619,14 @@ flow_hw_cleanup_tx_repr_tagging(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -63326,7 +67650,7 @@ index a3c8056515..aa315c054d 100644 { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_template_table_attr attr = { -@@ -5496,20 +5637,22 @@ flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev) +@@ -5496,20 +5644,22 @@ flow_hw_setup_tx_repr_tagging(struct rte_eth_dev *dev) MLX5_ASSERT(priv->sh->config.dv_esw_en); MLX5_ASSERT(priv->sh->config.repr_matching); @@ -63356,7 +67680,7 @@ index a3c8056515..aa315c054d 100644 flow_hw_cleanup_tx_repr_tagging(dev); return -rte_errno; } -@@ -5540,12 +5683,15 @@ flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev) +@@ -5540,12 +5690,15 @@ flow_hw_esw_mgr_regc_marker(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -63373,7 +67697,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5575,7 +5721,7 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) +@@ -5575,7 +5728,7 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) }, }; @@ -63382,7 +67706,7 @@ index a3c8056515..aa315c054d 100644 } /** -@@ -5588,12 +5734,15 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) +@@ -5588,12 +5741,15 @@ flow_hw_create_ctrl_esw_mgr_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -63399,7 +67723,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5626,7 +5775,7 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) +@@ -5626,7 +5782,7 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) }, }; @@ -63408,7 +67732,7 @@ index a3c8056515..aa315c054d 100644 } /** -@@ -5636,12 +5785,15 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) +@@ -5636,12 +5792,15 @@ flow_hw_create_ctrl_regc_sq_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -63425,7 +67749,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_pattern_template_attr attr = { .relaxed_matching = 0, -@@ -5660,7 +5812,7 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) +@@ -5660,7 +5819,7 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) }, }; @@ -63434,7 +67758,7 @@ index a3c8056515..aa315c054d 100644 } /* -@@ -5670,12 +5822,15 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) +@@ -5670,12 +5829,15 @@ flow_hw_create_ctrl_port_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -63451,7 +67775,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_pattern_template_attr tx_pa_attr = { .relaxed_matching = 0, -@@ -5696,10 +5851,44 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) +@@ -5696,10 +5858,44 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) .type = RTE_FLOW_ITEM_TYPE_END, }, }; @@ -63499,7 +67823,7 @@ index a3c8056515..aa315c054d 100644 } /** -@@ -5710,12 +5899,15 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) +@@ -5710,12 +5906,15 @@ flow_hw_create_tx_default_mreg_copy_pattern_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -63516,7 +67840,7 @@ index a3c8056515..aa315c054d 100644 { uint32_t marker_mask = flow_hw_esw_mgr_regc_marker_mask(dev); uint32_t marker_bits = flow_hw_esw_mgr_regc_marker(dev); -@@ -5781,7 +5973,7 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) +@@ -5781,7 +5980,7 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) set_reg_v.dst.offset = rte_bsf32(marker_mask); rte_memcpy(set_reg_v.src.value, &marker_bits, sizeof(marker_bits)); rte_memcpy(set_reg_m.src.value, &marker_mask, sizeof(marker_mask)); @@ -63525,7 +67849,7 @@ index a3c8056515..aa315c054d 100644 } /** -@@ -5793,13 +5985,16 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) +@@ -5793,13 +5992,16 @@ flow_hw_create_ctrl_regc_jump_actions_template(struct rte_eth_dev *dev) * Pointer to Ethernet device. * @param group * Destination group for this action template. @@ -63543,7 +67867,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_actions_template_attr attr = { .transfer = 1, -@@ -5829,8 +6024,8 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, +@@ -5829,8 +6031,8 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, } }; @@ -63554,7 +67878,7 @@ index a3c8056515..aa315c054d 100644 } /** -@@ -5839,12 +6034,15 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, +@@ -5839,12 +6041,15 @@ flow_hw_create_ctrl_jump_actions_template(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. @@ -63571,7 +67895,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_actions_template_attr attr = { .transfer = 1, -@@ -5874,8 +6072,7 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) +@@ -5874,8 +6079,7 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) } }; @@ -63581,7 +67905,7 @@ index a3c8056515..aa315c054d 100644 } /* -@@ -5884,12 +6081,15 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) +@@ -5884,12 +6088,15 @@ flow_hw_create_ctrl_port_actions_template(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device. @@ -63598,7 +67922,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_actions_template_attr tx_act_attr = { .egress = 1, -@@ -5952,11 +6152,41 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5952,11 +6159,41 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; @@ -63643,7 +67967,7 @@ index a3c8056515..aa315c054d 100644 } /** -@@ -5969,6 +6199,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5969,6 +6206,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -63652,7 +67976,7 @@ index a3c8056515..aa315c054d 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -5976,7 +6208,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) +@@ -5976,7 +6215,8 @@ flow_hw_create_tx_default_mreg_copy_actions_template(struct rte_eth_dev *dev) static struct rte_flow_template_table* flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -63662,7 +67986,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -5993,7 +6226,7 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -5993,7 +6233,7 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, .external = false, }; @@ -63671,7 +67995,7 @@ index a3c8056515..aa315c054d 100644 } -@@ -6007,6 +6240,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -6007,6 +6247,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -63680,7 +68004,7 @@ index a3c8056515..aa315c054d 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6014,7 +6249,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, +@@ -6014,7 +6256,8 @@ flow_hw_create_ctrl_sq_miss_root_table(struct rte_eth_dev *dev, static struct rte_flow_template_table* flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -63690,7 +68014,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -6031,7 +6267,7 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6031,7 +6274,7 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, .external = false, }; @@ -63699,7 +68023,7 @@ index a3c8056515..aa315c054d 100644 } /* -@@ -6043,6 +6279,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6043,6 +6286,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -63708,7 +68032,7 @@ index a3c8056515..aa315c054d 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6050,7 +6288,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, +@@ -6050,7 +6295,8 @@ flow_hw_create_ctrl_sq_miss_table(struct rte_eth_dev *dev, static struct rte_flow_template_table* flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *pt, @@ -63718,7 +68042,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_template_table_attr tx_tbl_attr = { .flow_attr = { -@@ -6064,14 +6303,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6064,14 +6310,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, .attr = tx_tbl_attr, .external = false, }; @@ -63734,7 +68058,7 @@ index a3c8056515..aa315c054d 100644 } /** -@@ -6084,6 +6317,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6084,6 +6324,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, * Pointer to flow pattern template. * @param at * Pointer to flow actions template. @@ -63743,7 +68067,7 @@ index a3c8056515..aa315c054d 100644 * * @return * Pointer to flow table on success, NULL otherwise. -@@ -6091,7 +6326,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, +@@ -6091,7 +6333,8 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev, static struct rte_flow_template_table * flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, struct rte_flow_pattern_template *it, @@ -63753,7 +68077,7 @@ index a3c8056515..aa315c054d 100644 { struct rte_flow_template_table_attr attr = { .flow_attr = { -@@ -6108,7 +6344,110 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, +@@ -6108,7 +6351,110 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, .external = false, }; @@ -63865,7 +68189,7 @@ index a3c8056515..aa315c054d 100644 } /** -@@ -6117,142 +6456,159 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, +@@ -6117,142 +6463,159 @@ flow_hw_create_ctrl_jump_table(struct rte_eth_dev *dev, * * @param dev * Pointer to Ethernet device. @@ -64138,7 +68462,7 @@ index a3c8056515..aa315c054d 100644 return -EINVAL; } -@@ -6376,27 +6732,28 @@ flow_hw_create_vlan(struct rte_eth_dev *dev) +@@ -6376,27 +6739,28 @@ flow_hw_create_vlan(struct rte_eth_dev *dev) MLX5DR_ACTION_FLAG_HWS_FDB }; @@ -64171,7 +68495,7 @@ index a3c8056515..aa315c054d 100644 } return 0; } -@@ -6760,6 +7117,38 @@ mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev) +@@ -6760,6 +7124,38 @@ mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev) } } @@ -64210,7 +68534,7 @@ index a3c8056515..aa315c054d 100644 /** * Configure port HWS resources. * -@@ -6807,11 +7196,10 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6807,11 +7203,10 @@ flow_hw_configure(struct rte_eth_dev *dev, struct rte_flow_queue_attr ctrl_queue_attr = {0}; bool is_proxy = !!(priv->sh->config.dv_esw_en && priv->master); int ret = 0; @@ -64225,7 +68549,7 @@ index a3c8056515..aa315c054d 100644 /* In case re-configuring, release existing context at first. */ if (priv->dr_ctx) { /* */ -@@ -6836,8 +7224,7 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6836,8 +7231,7 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } @@ -64235,7 +68559,7 @@ index a3c8056515..aa315c054d 100644 _queue_attr[nb_queue] = &ctrl_queue_attr; priv->acts_ipool = mlx5_ipool_create(&cfg); if (!priv->acts_ipool) -@@ -6845,14 +7232,6 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6845,14 +7239,6 @@ flow_hw_configure(struct rte_eth_dev *dev, /* Allocate the queue job descriptor LIFO. */ mem_size = sizeof(priv->hw_q[0]) * nb_q_updated; for (i = 0; i < nb_q_updated; i++) { @@ -64250,7 +68574,17 @@ index a3c8056515..aa315c054d 100644 mem_size += (sizeof(struct mlx5_hw_q_job *) + sizeof(struct mlx5_hw_q_job) + sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN + -@@ -6926,6 +7305,7 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6916,6 +7302,9 @@ flow_hw_configure(struct rte_eth_dev *dev, + } + dr_ctx_attr.pd = priv->sh->cdev->pd; + dr_ctx_attr.queues = nb_q_updated; ++ /* Assign initial value of STC numbers for representors. */ ++ if (priv->representor) ++ dr_ctx_attr.initial_log_stc_memory = MLX5_REPR_STC_MEMORY_LOG; + /* Queue size should all be the same. Take the first one. */ + dr_ctx_attr.queue_size = _queue_attr[0]->size; + dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr); +@@ -6926,6 +7315,7 @@ flow_hw_configure(struct rte_eth_dev *dev, priv->nb_queue = nb_q_updated; rte_spinlock_init(&priv->hw_ctrl_lock); LIST_INIT(&priv->hw_ctrl_flows); @@ -64258,7 +68592,7 @@ index a3c8056515..aa315c054d 100644 ret = flow_hw_create_ctrl_rx_tables(dev); if (ret) { rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, -@@ -6952,23 +7332,34 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -6952,23 +7342,34 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } if (priv->sh->config.dv_esw_en && priv->sh->config.repr_matching) { @@ -64302,7 +68636,7 @@ index a3c8056515..aa315c054d 100644 } if (port_attr->nb_conn_tracks) { mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated + -@@ -7005,18 +7396,32 @@ flow_hw_configure(struct rte_eth_dev *dev, +@@ -7005,18 +7406,32 @@ flow_hw_configure(struct rte_eth_dev *dev, goto err; } ret = mlx5_hws_age_pool_init(dev, port_attr, nb_queue); @@ -64337,7 +68671,7 @@ index a3c8056515..aa315c054d 100644 if (priv->hws_ctpool) { flow_hw_ct_pool_destroy(dev, priv->hws_ctpool); priv->hws_ctpool = NULL; -@@ -7025,34 +7430,44 @@ err: +@@ -7025,34 +7440,44 @@ err: flow_hw_ct_mng_destroy(dev, priv->ct_mng); priv->ct_mng = NULL; } @@ -64397,7 +68731,7 @@ index a3c8056515..aa315c054d 100644 /* Do not overwrite the internal errno information. */ if (ret) return ret; -@@ -7071,32 +7486,41 @@ void +@@ -7071,32 +7496,41 @@ void flow_hw_resource_release(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; @@ -64457,7 +68791,7 @@ index a3c8056515..aa315c054d 100644 } for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { if (priv->hw_drop[i]) -@@ -7104,6 +7528,8 @@ flow_hw_resource_release(struct rte_eth_dev *dev) +@@ -7104,6 +7538,8 @@ flow_hw_resource_release(struct rte_eth_dev *dev) if (priv->hw_tag[i]) mlx5dr_action_destroy(priv->hw_tag[i]); } @@ -64466,7 +68800,7 @@ index a3c8056515..aa315c054d 100644 flow_hw_destroy_vlan(dev); flow_hw_free_vport_actions(priv); if (priv->acts_ipool) { -@@ -7130,8 +7556,6 @@ flow_hw_resource_release(struct rte_eth_dev *dev) +@@ -7130,8 +7566,6 @@ flow_hw_resource_release(struct rte_eth_dev *dev) } mlx5_free(priv->hw_q); priv->hw_q = NULL; @@ -64475,7 +68809,7 @@ index a3c8056515..aa315c054d 100644 priv->nb_queue = 0; } -@@ -7178,9 +7602,9 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) +@@ -7178,9 +7612,9 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) uint32_t meta_mode = priv->sh->config.dv_xmeta_en; uint8_t masks = (uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c; uint32_t i, j; @@ -64487,7 +68821,7 @@ index a3c8056515..aa315c054d 100644 /* * The CAPA is global for common device but only used in net. -@@ -7195,29 +7619,35 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) +@@ -7195,29 +7629,35 @@ void flow_hw_init_tags_set(struct rte_eth_dev *dev) if (meta_mode == MLX5_XMETA_MODE_META32_HWS) unset |= 1 << (REG_C_1 - REG_C_0); masks &= ~unset; @@ -64528,12 +68862,12 @@ index a3c8056515..aa315c054d 100644 + masks = common_masks; + else + goto after_avl_tags; - } ++ } + j = 0; + for (i = 0; i < MLX5_FLOW_HW_TAGS_MAX; i++) { + if ((1 << i) & masks) + mlx5_flow_hw_avl_tags[j++] = (enum modify_reg)(i + (uint32_t)REG_C_0); -+ } + } + /* Clear the rest of unusable tag indexes. */ + for (; j < MLX5_FLOW_HW_TAGS_MAX; j++) + mlx5_flow_hw_avl_tags[j] = REG_NON; @@ -64541,7 +68875,7 @@ index a3c8056515..aa315c054d 100644 priv->sh->hws_tags = 1; mlx5_flow_hw_aso_tag = (enum modify_reg)priv->mtr_color_reg; mlx5_flow_hw_avl_tags_init_cnt++; -@@ -7425,6 +7855,13 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue, +@@ -7425,6 +7865,13 @@ flow_hw_conntrack_create(struct rte_eth_dev *dev, uint32_t queue, "CT is not enabled"); return 0; } @@ -64555,7 +68889,7 @@ index a3c8056515..aa315c054d 100644 ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx); if (!ct) { rte_flow_error_set(error, rte_errno, -@@ -7566,6 +8003,9 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, +@@ -7566,6 +8013,9 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, bool push = true; bool aso = false; @@ -64565,7 +68899,7 @@ index a3c8056515..aa315c054d 100644 if (attr) { MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE); if (unlikely(!priv->hw_q[queue].job_idx)) { -@@ -8243,6 +8683,10 @@ flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id, +@@ -8243,6 +8693,10 @@ flow_hw_get_q_aged_flows(struct rte_eth_dev *dev, uint32_t queue_id, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "empty context"); @@ -64576,7 +68910,7 @@ index a3c8056515..aa315c054d 100644 if (priv->hws_strict_queue) { if (queue_id >= age_info->hw_q_age->nb_rings) return rte_flow_error_set(error, EINVAL, -@@ -8355,6 +8799,10 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = { +@@ -8355,6 +8809,10 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = { * Pointer to flow rule actions. * @param action_template_idx * Index of an action template associated with @p table. @@ -64587,7 +68921,7 @@ index a3c8056515..aa315c054d 100644 * * @return * 0 on success, negative errno value otherwise and rte_errno set. -@@ -8366,7 +8814,9 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, +@@ -8366,7 +8824,9 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, struct rte_flow_item items[], uint8_t item_template_idx, struct rte_flow_action actions[], @@ -64598,7 +68932,7 @@ index a3c8056515..aa315c054d 100644 { struct mlx5_priv *priv = proxy_dev->data->dev_private; uint32_t queue = CTRL_QUEUE_ID(priv); -@@ -8413,7 +8863,14 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, +@@ -8413,7 +8873,14 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev, } entry->owner_dev = owner_dev; entry->flow = flow; @@ -64614,7 +68948,7 @@ index a3c8056515..aa315c054d 100644 rte_spinlock_unlock(&priv->hw_ctrl_lock); return 0; error: -@@ -8587,11 +9044,23 @@ flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev) +@@ -8587,11 +9054,23 @@ flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev) mlx5_free(cf); cf = cf_next; } @@ -64639,7 +68973,7 @@ index a3c8056515..aa315c054d 100644 { uint16_t port_id = dev->data->port_id; struct rte_flow_item_ethdev esw_mgr_spec = { -@@ -8616,6 +9085,10 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8616,6 +9095,10 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) }; struct rte_flow_item items[3] = { { 0 } }; struct rte_flow_action actions[3] = { { 0 } }; @@ -64650,7 +68984,7 @@ index a3c8056515..aa315c054d 100644 struct rte_eth_dev *proxy_dev; struct mlx5_priv *proxy_priv; uint16_t proxy_port_id = dev->data->port_id; -@@ -8637,8 +9110,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8637,8 +9120,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) proxy_port_id, port_id); return 0; } @@ -64662,7 +68996,7 @@ index a3c8056515..aa315c054d 100644 DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " "default flow tables were not created.", proxy_port_id, port_id); -@@ -8670,8 +9144,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8670,8 +9154,9 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) actions[2] = (struct rte_flow_action) { .type = RTE_FLOW_ACTION_TYPE_END, }; @@ -64674,7 +69008,7 @@ index a3c8056515..aa315c054d 100644 if (ret) { DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d", port_id, sqn, ret); -@@ -8700,8 +9175,10 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8700,8 +9185,10 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) actions[1] = (struct rte_flow_action){ .type = RTE_FLOW_ACTION_TYPE_END, }; @@ -64687,7 +69021,7 @@ index a3c8056515..aa315c054d 100644 if (ret) { DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d", port_id, sqn, ret); -@@ -8710,6 +9187,61 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8710,6 +9197,61 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn) return 0; } @@ -64749,7 +69083,7 @@ index a3c8056515..aa315c054d 100644 int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) { -@@ -8738,6 +9270,9 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) +@@ -8738,6 +9280,9 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, } }; @@ -64759,7 +69093,7 @@ index a3c8056515..aa315c054d 100644 struct rte_eth_dev *proxy_dev; struct mlx5_priv *proxy_priv; uint16_t proxy_port_id = dev->data->port_id; -@@ -8752,6 +9287,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) +@@ -8752,6 +9297,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) } proxy_dev = &rte_eth_devices[proxy_port_id]; proxy_priv = proxy_dev->data->dev_private; @@ -64768,7 +69102,7 @@ index a3c8056515..aa315c054d 100644 if (!proxy_priv->dr_ctx) { DRV_LOG(DEBUG, "Transfer proxy port (port %u) of port %u must be configured " "for HWS to create default FDB jump rule. Default rule will " -@@ -8759,7 +9296,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) +@@ -8759,7 +9306,7 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) proxy_port_id, port_id); return 0; } @@ -64777,7 +69111,7 @@ index a3c8056515..aa315c054d 100644 DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " "default flow tables were not created.", proxy_port_id, port_id); -@@ -8767,8 +9304,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) +@@ -8767,8 +9314,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev) return -rte_errno; } return flow_hw_create_ctrl_flow(dev, proxy_dev, @@ -64788,7 +69122,7 @@ index a3c8056515..aa315c054d 100644 } int -@@ -8814,17 +9351,22 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev) +@@ -8814,17 +9361,22 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev) .type = RTE_FLOW_ACTION_TYPE_END, }, }; @@ -64815,7 +69149,7 @@ index a3c8056515..aa315c054d 100644 { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rte_flow_item_sq sq_spec = { -@@ -8849,6 +9391,10 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8849,6 +9401,10 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) { .type = RTE_FLOW_ACTION_TYPE_END }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -64826,7 +69160,7 @@ index a3c8056515..aa315c054d 100644 /* It is assumed that caller checked for representor matching. */ MLX5_ASSERT(priv->sh->config.repr_matching); -@@ -8874,7 +9420,44 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) +@@ -8874,7 +9430,44 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn) actions[2].type = RTE_FLOW_ACTION_TYPE_JUMP; } return flow_hw_create_ctrl_flow(dev, dev, priv->hw_tx_repr_tagging_tbl, @@ -64872,7 +69206,7 @@ index a3c8056515..aa315c054d 100644 } static uint32_t -@@ -8989,6 +9572,9 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, +@@ -8989,6 +9582,9 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -64882,7 +69216,7 @@ index a3c8056515..aa315c054d 100644 if (!eth_spec) return -EINVAL; -@@ -9002,7 +9588,7 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, +@@ -9002,7 +9598,7 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev, items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type); items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END }; /* Without VLAN filtering, only a single flow rule must be created. */ @@ -64891,7 +69225,7 @@ index a3c8056515..aa315c054d 100644 } static int -@@ -9018,6 +9604,9 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, +@@ -9018,6 +9614,9 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -64901,7 +69235,7 @@ index a3c8056515..aa315c054d 100644 unsigned int i; if (!eth_spec) -@@ -9040,7 +9629,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, +@@ -9040,7 +9639,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev, }; items[1].spec = &vlan_spec; @@ -64911,7 +69245,7 @@ index a3c8056515..aa315c054d 100644 return -rte_errno; } return 0; -@@ -9058,6 +9648,9 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, +@@ -9058,6 +9658,9 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -64921,7 +69255,7 @@ index a3c8056515..aa315c054d 100644 const struct rte_ether_addr cmp = { .addr_bytes = "\x00\x00\x00\x00\x00\x00", }; -@@ -9081,7 +9674,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, +@@ -9081,7 +9684,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev, if (!memcmp(mac, &cmp, sizeof(*mac))) continue; memcpy(ð_spec.dst.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN); @@ -64931,7 +69265,7 @@ index a3c8056515..aa315c054d 100644 return -rte_errno; } return 0; -@@ -9100,6 +9694,9 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, +@@ -9100,6 +9704,9 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, { .type = RTE_FLOW_ACTION_TYPE_RSS }, { .type = RTE_FLOW_ACTION_TYPE_END }, }; @@ -64941,7 +69275,7 @@ index a3c8056515..aa315c054d 100644 const struct rte_ether_addr cmp = { .addr_bytes = "\x00\x00\x00\x00\x00\x00", }; -@@ -9131,7 +9728,8 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, +@@ -9131,7 +9738,8 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev, }; items[1].spec = &vlan_spec; @@ -64951,10 +69285,30 @@ index a3c8056515..aa315c054d 100644 return -rte_errno; } } +@@ -9174,7 +9782,7 @@ mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags) + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx; + unsigned int i; +- unsigned int j; ++ int j; + int ret = 0; + + RTE_SET_USED(priv); diff --git a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c -index 08f8aad70a..bcaf518227 100644 +index 08f8aad70a..0ffe738ec5 100644 --- a/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +++ b/dpdk/drivers/net/mlx5/mlx5_flow_meter.c +@@ -100,8 +100,8 @@ mlx5_flow_meter_profile_find(struct mlx5_priv *priv, uint32_t meter_profile_id) + + if (priv->mtr_profile_arr) + return &priv->mtr_profile_arr[meter_profile_id]; +- if (mlx5_l3t_get_entry(priv->mtr_profile_tbl, +- meter_profile_id, &data) || !data.ptr) ++ if (!priv->mtr_profile_tbl || ++ mlx5_l3t_get_entry(priv->mtr_profile_tbl, meter_profile_id, &data) || !data.ptr) + return NULL; + fmp = data.ptr; + /* Remove reference taken by the mlx5_l3t_get_entry. */ @@ -618,6 +618,7 @@ mlx5_flow_meter_profile_get(struct rte_eth_dev *dev, meter_profile_id); } @@ -65004,7 +69358,23 @@ index 08f8aad70a..bcaf518227 100644 /** * Check meter validation. * -@@ -1915,6 +1920,7 @@ error: +@@ -1631,6 +1636,7 @@ mlx5_flow_meter_action_modify(struct mlx5_priv *priv, + if (priv->sh->meter_aso_en) { + fm->is_enable = !!is_enable; + aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); ++ aso_mtr->state = ASO_METER_WAIT; + ret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, + aso_mtr, &priv->mtr_bulk, NULL, true); + if (ret) +@@ -1881,6 +1887,7 @@ mlx5_flow_meter_create(struct rte_eth_dev *dev, uint32_t meter_id, + /* If ASO meter supported, update ASO flow meter by wqe. */ + if (priv->sh->meter_aso_en) { + aso_mtr = container_of(fm, struct mlx5_aso_mtr, fm); ++ aso_mtr->state = ASO_METER_WAIT; + ret = mlx5_aso_meter_update_by_wqe(priv->sh, MLX5_HW_INV_QUEUE, + aso_mtr, &priv->mtr_bulk, NULL, true); + if (ret) +@@ -1915,6 +1922,7 @@ error: NULL, "Failed to create devx meter."); } @@ -65012,7 +69382,7 @@ index 08f8aad70a..bcaf518227 100644 /** * Create meter rules. * -@@ -1998,6 +2004,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id, +@@ -1998,6 +2006,7 @@ mlx5_flow_meter_hws_create(struct rte_eth_dev *dev, uint32_t meter_id, __atomic_add_fetch(&policy->ref_cnt, 1, __ATOMIC_RELAXED); return 0; } @@ -65020,7 +69390,7 @@ index 08f8aad70a..bcaf518227 100644 static int mlx5_flow_meter_params_flush(struct rte_eth_dev *dev, -@@ -2482,6 +2489,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = { +@@ -2482,6 +2491,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_ops = { .stats_read = mlx5_flow_meter_stats_read, }; @@ -65028,7 +69398,7 @@ index 08f8aad70a..bcaf518227 100644 static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { .capabilities_get = mlx5_flow_mtr_cap_get, .meter_profile_add = mlx5_flow_meter_profile_hws_add, -@@ -2500,6 +2508,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { +@@ -2500,6 +2510,7 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { .stats_update = NULL, .stats_read = NULL, }; @@ -65036,7 +69406,7 @@ index 08f8aad70a..bcaf518227 100644 /** * Get meter operations. -@@ -2515,12 +2524,16 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { +@@ -2515,12 +2526,16 @@ static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) { @@ -65053,7 +69423,7 @@ index 08f8aad70a..bcaf518227 100644 return 0; } -@@ -2899,7 +2912,6 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) +@@ -2899,7 +2914,6 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) struct mlx5_flow_meter_profile *fmp; struct mlx5_legacy_flow_meter *legacy_fm; struct mlx5_flow_meter_info *fm; @@ -65061,7 +69431,7 @@ index 08f8aad70a..bcaf518227 100644 struct mlx5_flow_meter_sub_policy *sub_policy; void *tmp; uint32_t i, mtr_idx, policy_idx; -@@ -2967,15 +2979,20 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) +@@ -2967,15 +2981,20 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) mlx5_l3t_destroy(priv->policy_idx_tbl); priv->policy_idx_tbl = NULL; } @@ -65083,7 +69453,7 @@ index 08f8aad70a..bcaf518227 100644 if (priv->mtr_profile_tbl) { MLX5_L3T_FOREACH(priv->mtr_profile_tbl, i, entry) { fmp = entry; -@@ -2989,14 +3006,17 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) +@@ -2989,14 +3008,17 @@ mlx5_flow_meter_flush(struct rte_eth_dev *dev, struct rte_mtr_error *error) mlx5_l3t_destroy(priv->mtr_profile_tbl); priv->mtr_profile_tbl = NULL; } @@ -65134,7 +69504,7 @@ index 28ea28bfbe..1e9c7cf7c5 100644 error); if (ret < 0) diff --git a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c -index 51704ef754..8415aa411f 100644 +index 51704ef754..3250255727 100644 --- a/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c +++ b/dpdk/drivers/net/mlx5/mlx5_hws_cnt.c @@ -25,28 +25,32 @@ static void @@ -65187,7 +69557,56 @@ index 51704ef754..8415aa411f 100644 } } for (; iidx < cnt_num; iidx++) { -@@ -306,26 +310,25 @@ mlx5_hws_cnt_svc(void *opaque) +@@ -68,26 +72,29 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh, + uint32_t ret __rte_unused; + + reset_cnt_num = rte_ring_count(reset_list); +- do { +- cpool->query_gen++; +- mlx5_aso_cnt_query(sh, cpool); +- zcdr.n1 = 0; +- zcdu.n1 = 0; +- ret = rte_ring_enqueue_zc_burst_elem_start(reuse_list, +- sizeof(cnt_id_t), +- reset_cnt_num, &zcdu, +- NULL); +- MLX5_ASSERT(ret == reset_cnt_num); +- ret = rte_ring_dequeue_zc_burst_elem_start(reset_list, +- sizeof(cnt_id_t), +- reset_cnt_num, &zcdr, +- NULL); +- MLX5_ASSERT(ret == reset_cnt_num); +- __hws_cnt_r2rcpy(&zcdu, &zcdr, reset_cnt_num); +- rte_ring_dequeue_zc_elem_finish(reset_list, reset_cnt_num); +- rte_ring_enqueue_zc_elem_finish(reuse_list, reset_cnt_num); ++ cpool->query_gen++; ++ mlx5_aso_cnt_query(sh, cpool); ++ zcdr.n1 = 0; ++ zcdu.n1 = 0; ++ ret = rte_ring_enqueue_zc_burst_elem_start(reuse_list, ++ sizeof(cnt_id_t), ++ reset_cnt_num, &zcdu, ++ NULL); ++ MLX5_ASSERT(ret == reset_cnt_num); ++ ret = rte_ring_dequeue_zc_burst_elem_start(reset_list, ++ sizeof(cnt_id_t), ++ reset_cnt_num, &zcdr, ++ NULL); ++ MLX5_ASSERT(ret == reset_cnt_num); ++ __hws_cnt_r2rcpy(&zcdu, &zcdr, reset_cnt_num); ++ rte_ring_dequeue_zc_elem_finish(reset_list, reset_cnt_num); ++ rte_ring_enqueue_zc_elem_finish(reuse_list, reset_cnt_num); ++ ++ if (rte_log_can_log(mlx5_logtype, RTE_LOG_DEBUG)) { + reset_cnt_num = rte_ring_count(reset_list); +- } while (reset_cnt_num > 0); ++ DRV_LOG(DEBUG, "ibdev %s cpool %p wait_reset_cnt=%" PRIu32, ++ sh->ibdev_name, (void *)cpool, reset_cnt_num); ++ } + } + + /** +@@ -306,34 +313,87 @@ mlx5_hws_cnt_svc(void *opaque) (struct mlx5_dev_ctx_shared *)opaque; uint64_t interval = (uint64_t)sh->cnt_svc->query_interval * (US_PER_S / MS_PER_S); @@ -65224,8 +69643,14 @@ index 51704ef754..8415aa411f 100644 + rte_spinlock_unlock(&sh->cpool_lock); query_us = query_cycle / (rte_get_timer_hz() / US_PER_S); sleep_us = interval - query_us; ++ DRV_LOG(DEBUG, "ibdev %s counter service thread: " ++ "interval_us=%" PRIu64 " query_us=%" PRIu64 " " ++ "sleep_us=%" PRIu64, ++ sh->ibdev_name, interval, query_us, ++ interval > query_us ? sleep_us : 0); if (interval > query_us) -@@ -334,6 +337,55 @@ mlx5_hws_cnt_svc(void *opaque) + rte_delay_us_sleep(sleep_us); + } return NULL; } @@ -65281,7 +69706,7 @@ index 51704ef754..8415aa411f 100644 struct mlx5_hws_cnt_pool * mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, const struct mlx5_hws_cnt_pool_cfg *pcfg, -@@ -342,7 +394,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, +@@ -342,7 +402,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, char mz_name[RTE_MEMZONE_NAMESIZE]; struct mlx5_hws_cnt_pool *cntp; uint64_t cnt_num = 0; @@ -65289,7 +69714,7 @@ index 51704ef754..8415aa411f 100644 MLX5_ASSERT(pcfg); MLX5_ASSERT(ccfg); -@@ -352,17 +403,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, +@@ -352,17 +411,6 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, return NULL; cntp->cfg = *pcfg; @@ -65307,7 +69732,7 @@ index 51704ef754..8415aa411f 100644 if (pcfg->request_num > sh->hws_max_nb_counters) { DRV_LOG(ERR, "Counter number %u " "is greater than the maximum supported (%u).", -@@ -409,14 +449,10 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, +@@ -409,14 +457,10 @@ mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, DRV_LOG(ERR, "failed to create reuse list ring"); goto error; } @@ -65326,7 +69751,7 @@ index 51704ef754..8415aa411f 100644 goto error; } /* Initialize the time for aging-out calculation. */ -@@ -634,7 +670,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, +@@ -634,7 +678,7 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, SOCKET_ID_ANY); if (mp_name == NULL) goto error; @@ -65335,7 +69760,7 @@ index 51704ef754..8415aa411f 100644 dev->data->port_id); pcfg.name = mp_name; pcfg.request_num = pattr->nb_counters; -@@ -660,6 +696,10 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, +@@ -660,6 +704,10 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev, if (ret != 0) goto error; priv->sh->cnt_svc->refcnt++; @@ -65346,7 +69771,7 @@ index 51704ef754..8415aa411f 100644 return cpool; error: mlx5_hws_cnt_pool_destroy(priv->sh, cpool); -@@ -672,6 +712,15 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, +@@ -672,6 +720,15 @@ mlx5_hws_cnt_pool_destroy(struct mlx5_dev_ctx_shared *sh, { if (cpool == NULL) return; @@ -65362,7 +69787,7 @@ index 51704ef754..8415aa411f 100644 if (--sh->cnt_svc->refcnt == 0) mlx5_hws_cnt_svc_deinit(sh); mlx5_hws_cnt_pool_action_destroy(cpool); -@@ -1229,11 +1278,13 @@ mlx5_hws_age_pool_destroy(struct mlx5_priv *priv) +@@ -1229,11 +1286,13 @@ mlx5_hws_age_pool_destroy(struct mlx5_priv *priv) { struct mlx5_age_info *age_info = GET_PORT_AGE_INFO(priv); @@ -65430,7 +69855,7 @@ index 030dcead86..72751f3330 100644 mlx5_hws_cnt_pool_get_size(struct mlx5_hws_cnt_pool *cpool) { diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.c b/dpdk/drivers/net/mlx5/mlx5_rx.c -index 917c517b83..56e5568f33 100644 +index 917c517b83..0762782217 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rx.c +++ b/dpdk/drivers/net/mlx5/mlx5_rx.c @@ -39,7 +39,8 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, @@ -65480,9 +69905,12 @@ index 917c517b83..56e5568f33 100644 { const uint16_t cqe_n = 1 << rxq->cqe_n; const uint16_t cqe_mask = cqe_n - 1; -@@ -442,13 +453,39 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) +@@ -440,15 +451,41 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + union { volatile struct mlx5_cqe *cqe; - volatile struct mlx5_err_cqe *err_cqe; +- volatile struct mlx5_err_cqe *err_cqe; ++ volatile struct mlx5_error_cqe *err_cqe; } u = { - .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask], + .cqe = &(*rxq->cqes)[(rxq->cq_ci - vec) & cqe_mask], @@ -65701,7 +70129,7 @@ index 917c517b83..56e5568f33 100644 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; MLX5_ASSERT((int)len >= (rxq->crc_present << 2)); diff --git a/dpdk/drivers/net/mlx5/mlx5_rx.h b/dpdk/drivers/net/mlx5/mlx5_rx.h -index e078aaf3dc..6b42e27c89 100644 +index e078aaf3dc..7d0d76123f 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rx.h +++ b/dpdk/drivers/net/mlx5/mlx5_rx.h @@ -62,6 +62,7 @@ enum mlx5_rxq_err_state { @@ -65712,7 +70140,15 @@ index e078aaf3dc..6b42e27c89 100644 }; enum mlx5_rqx_code { -@@ -286,7 +287,8 @@ int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, +@@ -169,6 +170,7 @@ struct mlx5_rxq_ctrl { + struct mlx5_rxq_priv { + uint16_t idx; /* Queue index. */ + uint32_t refcnt; /* Reference counter. */ ++ bool possessor; /* Shared rxq_ctrl allocated for the 1st time. */ + struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */ + LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */ + struct mlx5_priv *priv; /* Back pointer to private data. */ +@@ -286,7 +288,8 @@ int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx, uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq); @@ -65723,7 +70159,7 @@ index e078aaf3dc..6b42e27c89 100644 uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n); diff --git a/dpdk/drivers/net/mlx5/mlx5_rxq.c b/dpdk/drivers/net/mlx5/mlx5_rxq.c -index 81aa3f074a..9179b9d9d7 100644 +index 81aa3f074a..fcf6ab54b6 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxq.c +++ b/dpdk/drivers/net/mlx5/mlx5_rxq.c @@ -528,12 +528,12 @@ mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx) @@ -65744,7 +70180,30 @@ index 81aa3f074a..9179b9d9d7 100644 rte_errno = EINVAL; return -EINVAL; } -@@ -1601,10 +1601,10 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, +@@ -652,6 +652,14 @@ mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc, + struct mlx5_rxq_priv *rxq; + bool empty; + ++ if (*desc > 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz) { ++ DRV_LOG(ERR, ++ "port %u number of descriptors requested for Rx queue" ++ " %u is more than supported", ++ dev->data->port_id, idx); ++ rte_errno = EINVAL; ++ return -EINVAL; ++ } + if (!rte_is_power_of_2(*desc)) { + *desc = 1 << log2above(*desc); + DRV_LOG(WARNING, +@@ -935,6 +943,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + rte_errno = ENOMEM; + return -rte_errno; + } ++ rxq->possessor = true; + } + rxq->priv = priv; + rxq->idx = idx; +@@ -1601,10 +1610,10 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } else { *actual_log_stride_num = config->mprq.log_stride_num; } @@ -65758,7 +70217,7 @@ index 81aa3f074a..9179b9d9d7 100644 *actual_log_stride_size = log_def_stride_size; DRV_LOG(WARNING, "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)", -@@ -1614,10 +1614,26 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, +@@ -1614,10 +1623,26 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, *actual_log_stride_size = config->mprq.log_stride_size; } } else { @@ -65787,7 +70246,7 @@ index 81aa3f074a..9179b9d9d7 100644 } log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size; /* Check if WQE buffer size is supported by hardware. */ -@@ -1657,6 +1673,8 @@ unsupport: +@@ -1657,6 +1682,8 @@ unsupport: " min_stride_sz = %u, max_stride_sz = %u).\n" "Rx segment is %senabled. External mempool is %sused.", dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n, @@ -65796,15 +70255,24 @@ index 81aa3f074a..9179b9d9d7 100644 RTE_BIT32(config->mprq.log_stride_size), RTE_BIT32(config->mprq.log_stride_num), config->mprq.min_rxqs_num, -@@ -2262,6 +2280,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) +@@ -1995,6 +2022,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, + tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 }; + tmpl->rxq.idx = idx; + rxq->hairpin_conf = *hairpin_conf; ++ rxq->possessor = true; + mlx5_rxq_ref(dev, idx); + LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); + return tmpl; +@@ -2262,6 +2290,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) RTE_ETH_QUEUE_STATE_STOPPED; } } else { /* Refcnt zero, closing device. */ -+ LIST_REMOVE(rxq_ctrl, next); ++ if (rxq->possessor) ++ LIST_REMOVE(rxq_ctrl, next); LIST_REMOVE(rxq, owner_entry); if (LIST_EMPTY(&rxq_ctrl->owners)) { if (!rxq_ctrl->is_hairpin) -@@ -2269,7 +2288,6 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) +@@ -2269,7 +2299,6 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) (&rxq_ctrl->rxq.mr_ctrl.cache_bh); if (rxq_ctrl->rxq.shared) LIST_REMOVE(rxq_ctrl, share_entry); @@ -65812,6 +70280,28 @@ index 81aa3f074a..9179b9d9d7 100644 mlx5_free(rxq_ctrl); } dev->data->rx_queues[idx] = NULL; +@@ -2862,6 +2891,7 @@ static void + __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) + { + struct mlx5_priv *priv = dev->data->dev_private; ++ bool deref_rxqs = true; + + #ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (hrxq->hws_flags) +@@ -2871,9 +2901,10 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) + #endif + priv->obj_ops.hrxq_destroy(hrxq); + if (!hrxq->standalone) { +- mlx5_ind_table_obj_release(dev, hrxq->ind_table, +- hrxq->hws_flags ? +- (!!dev->data->dev_started) : true); ++ if (!dev->data->dev_started && hrxq->hws_flags && ++ !priv->hws_rule_flushing) ++ deref_rxqs = false; ++ mlx5_ind_table_obj_release(dev, hrxq->ind_table, deref_rxqs); + } + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); + } diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c index 0e2eab068a..667475a93e 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -65886,10 +70376,20 @@ index 0e2eab068a..667475a93e 100644 return tn; } diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h -index 683a8f9a6c..204d17a8f2 100644 +index 683a8f9a6c..886314b086 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h -@@ -783,7 +783,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -93,8 +93,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + -1, -1, /* skip vlan_tci */ + 11, 10, 9, 8}; /* bswap32, rss */ + /* Restore the compressed count. Must be 16 bits. */ +- const uint16_t mcqe_n = t_pkt->data_len + +- (rxq->crc_present * RTE_ETHER_CRC_LEN); ++ const uint16_t mcqe_n = rte_be_to_cpu_32(cq->byte_cnt); + const __vector unsigned char rearm = + (__vector unsigned char)vec_vsx_ld(0, + (signed int const *)&t_pkt->rearm_data); +@@ -783,7 +782,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -65898,7 +70398,7 @@ index 683a8f9a6c..204d17a8f2 100644 uint64_t n = 0; uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; uint16_t nocmp_n = 0; -@@ -866,7 +866,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -866,7 +865,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, __vector unsigned char pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; __vector unsigned char op_own, op_own_tmp1, op_own_tmp2; __vector unsigned char opcode, owner_mask, invalid_mask; @@ -65907,7 +70407,7 @@ index 683a8f9a6c..204d17a8f2 100644 __vector unsigned char mask; #ifdef MLX5_PMD_SOFT_COUNTERS const __vector unsigned char lower_half = { -@@ -1174,6 +1174,16 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -1174,6 +1173,16 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, (__vector unsigned long)mask); /* D.3 check error in opcode. */ @@ -65924,7 +70424,7 @@ index 683a8f9a6c..204d17a8f2 100644 opcode = (__vector unsigned char) vec_cmpeq((__vector unsigned int)resp_err_check, (__vector unsigned int)opcode); -@@ -1182,7 +1192,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -1182,7 +1191,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, (__vector unsigned int)zero); opcode = (__vector unsigned char) vec_andc((__vector unsigned long)opcode, @@ -65933,11 +70433,55 @@ index 683a8f9a6c..204d17a8f2 100644 /* D.4 mark if any error is set */ *err |= ((__vector unsigned long)opcode)[0]; +@@ -1191,9 +1200,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); + if (unlikely(rxq->shared)) { + pkts[pos]->port = cq[pos].user_index_low; +- pkts[pos + p1]->port = cq[pos + p1].user_index_low; +- pkts[pos + p2]->port = cq[pos + p2].user_index_low; +- pkts[pos + p3]->port = cq[pos + p3].user_index_low; ++ pkts[pos + 1]->port = cq[pos + p1].user_index_low; ++ pkts[pos + 2]->port = cq[pos + p2].user_index_low; ++ pkts[pos + 3]->port = cq[pos + p3].user_index_low; + } + if (rxq->hw_timestamp) { + int offset = rxq->timestamp_offset; +@@ -1237,17 +1246,17 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + metadata; + pkts[pos]->ol_flags |= metadata ? flag : 0ULL; + metadata = rte_be_to_cpu_32 +- (cq[pos + 1].flow_table_metadata) & mask; ++ (cq[pos + p1].flow_table_metadata) & mask; + *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) = + metadata; + pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL; + metadata = rte_be_to_cpu_32 +- (cq[pos + 2].flow_table_metadata) & mask; ++ (cq[pos + p2].flow_table_metadata) & mask; + *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) = + metadata; + pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL; + metadata = rte_be_to_cpu_32 +- (cq[pos + 3].flow_table_metadata) & mask; ++ (cq[pos + p3].flow_table_metadata) & mask; + *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) = + metadata; + pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL; diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h -index f7bbde4e0e..6d3c594e56 100644 +index f7bbde4e0e..ce647239fe 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_neon.h -@@ -524,7 +524,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -92,8 +92,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + 11, 10, 9, 8 /* hash.rss, bswap32 */ + }; + /* Restore the compressed count. Must be 16 bits. */ +- const uint16_t mcqe_n = t_pkt->data_len + +- (rxq->crc_present * RTE_ETHER_CRC_LEN); ++ const uint16_t mcqe_n = rte_be_to_cpu_32(cq->byte_cnt); + const uint64x2_t rearm = + vld1q_u64((void *)&t_pkt->rearm_data); + const uint32x4_t rxdf_mask = { +@@ -524,7 +523,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -65946,7 +70490,7 @@ index f7bbde4e0e..6d3c594e56 100644 uint64_t n = 0; uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; uint16_t nocmp_n = 0; -@@ -616,7 +616,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -616,7 +615,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, pos += MLX5_VPMD_DESCS_PER_LOOP) { uint16x4_t op_own; uint16x4_t opcode, owner_mask, invalid_mask; @@ -65955,7 +70499,7 @@ index f7bbde4e0e..6d3c594e56 100644 uint16x4_t mask; uint16x4_t byte_cnt; uint32x4_t ptype_info, flow_tag; -@@ -647,6 +647,14 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -647,6 +646,14 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, c0 = vld1q_u64((uint64_t *)(p0 + 48)); /* Synchronize for loading the rest of blocks. */ rte_io_rmb(); @@ -65970,7 +70514,7 @@ index f7bbde4e0e..6d3c594e56 100644 /* Prefetch next 4 CQEs. */ if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP; -@@ -780,8 +788,12 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -780,21 +787,25 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, -1UL >> (n * sizeof(uint16_t) * 8) : 0); invalid_mask = vorr_u16(invalid_mask, mask); /* D.3 check error in opcode. */ @@ -65984,11 +70528,81 @@ index f7bbde4e0e..6d3c594e56 100644 /* D.4 mark if any error is set */ *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0); /* C.4 fill in mbuf - rearm_data and packet_type. */ + rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag, + opcode, &elts[pos]); + if (unlikely(rxq->shared)) { +- elts[pos]->port = container_of(p0, struct mlx5_cqe, ++ pkts[pos]->port = container_of(p0, struct mlx5_cqe, + pkt_info)->user_index_low; +- elts[pos + 1]->port = container_of(p1, struct mlx5_cqe, ++ pkts[pos + 1]->port = container_of(p1, struct mlx5_cqe, + pkt_info)->user_index_low; +- elts[pos + 2]->port = container_of(p2, struct mlx5_cqe, ++ pkts[pos + 2]->port = container_of(p2, struct mlx5_cqe, + pkt_info)->user_index_low; +- elts[pos + 3]->port = container_of(p3, struct mlx5_cqe, ++ pkts[pos + 3]->port = container_of(p3, struct mlx5_cqe, + pkt_info)->user_index_low; + } + if (unlikely(rxq->hw_timestamp)) { +@@ -806,34 +817,34 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + ts = rte_be_to_cpu_64 + (container_of(p0, struct mlx5_cqe, + pkt_info)->timestamp); +- mlx5_timestamp_set(elts[pos], offset, ++ mlx5_timestamp_set(pkts[pos], offset, + mlx5_txpp_convert_rx_ts(sh, ts)); + ts = rte_be_to_cpu_64 + (container_of(p1, struct mlx5_cqe, + pkt_info)->timestamp); +- mlx5_timestamp_set(elts[pos + 1], offset, ++ mlx5_timestamp_set(pkts[pos + 1], offset, + mlx5_txpp_convert_rx_ts(sh, ts)); + ts = rte_be_to_cpu_64 + (container_of(p2, struct mlx5_cqe, + pkt_info)->timestamp); +- mlx5_timestamp_set(elts[pos + 2], offset, ++ mlx5_timestamp_set(pkts[pos + 2], offset, + mlx5_txpp_convert_rx_ts(sh, ts)); + ts = rte_be_to_cpu_64 + (container_of(p3, struct mlx5_cqe, + pkt_info)->timestamp); +- mlx5_timestamp_set(elts[pos + 3], offset, ++ mlx5_timestamp_set(pkts[pos + 3], offset, + mlx5_txpp_convert_rx_ts(sh, ts)); + } else { +- mlx5_timestamp_set(elts[pos], offset, ++ mlx5_timestamp_set(pkts[pos], offset, + rte_be_to_cpu_64(container_of(p0, + struct mlx5_cqe, pkt_info)->timestamp)); +- mlx5_timestamp_set(elts[pos + 1], offset, ++ mlx5_timestamp_set(pkts[pos + 1], offset, + rte_be_to_cpu_64(container_of(p1, + struct mlx5_cqe, pkt_info)->timestamp)); +- mlx5_timestamp_set(elts[pos + 2], offset, ++ mlx5_timestamp_set(pkts[pos + 2], offset, + rte_be_to_cpu_64(container_of(p2, + struct mlx5_cqe, pkt_info)->timestamp)); +- mlx5_timestamp_set(elts[pos + 3], offset, ++ mlx5_timestamp_set(pkts[pos + 3], offset, + rte_be_to_cpu_64(container_of(p3, + struct mlx5_cqe, pkt_info)->timestamp)); + } diff --git a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h -index 185d2695db..ab69af0c55 100644 +index 185d2695db..10afdcf816 100644 --- a/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h +++ b/dpdk/drivers/net/mlx5/mlx5_rxtx_vec_sse.h -@@ -523,7 +523,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -92,8 +92,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, + -1, -1, 14, 15, /* pkt_len, bswap16 */ + -1, -1, -1, -1 /* skip packet_type */); + /* Restore the compressed count. Must be 16 bits. */ +- const uint16_t mcqe_n = t_pkt->data_len + +- (rxq->crc_present * RTE_ETHER_CRC_LEN); ++ const uint16_t mcqe_n = rte_be_to_cpu_32(cq->byte_cnt); + const __m128i rearm = + _mm_loadu_si128((__m128i *)&t_pkt->rearm_data); + const __m128i rxdf = +@@ -523,7 +522,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -65997,7 +70611,7 @@ index 185d2695db..ab69af0c55 100644 uint64_t n = 0; uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP; uint16_t nocmp_n = 0; -@@ -591,7 +591,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -591,7 +590,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3; __m128i op_own, op_own_tmp1, op_own_tmp2; __m128i opcode, owner_mask, invalid_mask; @@ -66006,7 +70620,7 @@ index 185d2695db..ab69af0c55 100644 __m128i mask; #ifdef MLX5_PMD_SOFT_COUNTERS __m128i byte_cnt; -@@ -729,9 +729,12 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, +@@ -729,18 +728,21 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, mask = _mm_sll_epi64(ones, mask); invalid_mask = _mm_or_si128(invalid_mask, mask); /* D.3 check error in opcode. */ @@ -66020,6 +70634,18 @@ index 185d2695db..ab69af0c55 100644 /* D.4 mark if any error is set */ *err |= _mm_cvtsi128_si64(opcode); /* D.5 fill in mbuf - rearm_data and packet_type. */ + rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]); + if (unlikely(rxq->shared)) { + pkts[pos]->port = cq[pos].user_index_low; +- pkts[pos + p1]->port = cq[pos + p1].user_index_low; +- pkts[pos + p2]->port = cq[pos + p2].user_index_low; +- pkts[pos + p3]->port = cq[pos + p3].user_index_low; ++ pkts[pos + 1]->port = cq[pos + p1].user_index_low; ++ pkts[pos + 2]->port = cq[pos + p2].user_index_low; ++ pkts[pos + 3]->port = cq[pos + p3].user_index_low; + } + if (unlikely(rxq->hw_timestamp)) { + int offset = rxq->timestamp_offset; diff --git a/dpdk/drivers/net/mlx5/mlx5_stats.c b/dpdk/drivers/net/mlx5/mlx5_stats.c index f64fa3587b..f4ac58e2f9 100644 --- a/dpdk/drivers/net/mlx5/mlx5_stats.c @@ -66269,23 +70895,94 @@ index f54443ed1a..1cb0b56ae1 100644 if (priv->isolated) return 0; diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.c b/dpdk/drivers/net/mlx5/mlx5_tx.c -index a13c7e937c..14e1487e59 100644 +index a13c7e937c..8b1a0ca3d3 100644 --- a/dpdk/drivers/net/mlx5/mlx5_tx.c +++ b/dpdk/drivers/net/mlx5/mlx5_tx.c +@@ -55,7 +55,7 @@ tx_recover_qp(struct mlx5_txq_ctrl *txq_ctrl) + + /* Return 1 if the error CQE is signed otherwise, sign it and return 0. */ + static int +-check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) ++check_err_cqe_seen(volatile struct mlx5_error_cqe *err_cqe) + { + static const uint8_t magic[] = "seen"; + int ret = 1; +@@ -83,7 +83,7 @@ check_err_cqe_seen(volatile struct mlx5_err_cqe *err_cqe) + */ + static int + mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq, +- volatile struct mlx5_err_cqe *err_cqe) ++ volatile struct mlx5_error_cqe *err_cqe) + { + if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) { + const uint16_t wqe_m = ((1 << txq->wqe_n) - 1); @@ -107,7 +107,7 @@ mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq, mlx5_dump_debug_information(name, "MLX5 Error CQ:", (const void *)((uintptr_t) txq->cqes), - sizeof(*err_cqe) * -+ sizeof(struct mlx5_cqe) * ++ sizeof(struct mlx5_error_cqe) * (1 << txq->cqe_n)); mlx5_dump_debug_information(name, "MLX5 Error SQ:", (const void *)((uintptr_t) +@@ -206,7 +206,7 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq, + */ + rte_wmb(); + ret = mlx5_tx_error_cqe_handle +- (txq, (volatile struct mlx5_err_cqe *)cqe); ++ (txq, (volatile struct mlx5_error_cqe *)cqe); + if (unlikely(ret < 0)) { + /* + * Some error occurred on queue error diff --git a/dpdk/drivers/net/mlx5/mlx5_tx.h b/dpdk/drivers/net/mlx5/mlx5_tx.h -index a44050a1ce..ff23d87b8a 100644 +index a44050a1ce..6b796b97e1 100644 --- a/dpdk/drivers/net/mlx5/mlx5_tx.h +++ b/dpdk/drivers/net/mlx5/mlx5_tx.h -@@ -817,7 +817,7 @@ mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq, +@@ -18,6 +18,7 @@ + #include + + #include "mlx5.h" ++#include "mlx5_rx.h" + #include "mlx5_autoconf.h" + + /* TX burst subroutines return codes. */ +@@ -361,6 +362,35 @@ mlx5_txpp_convert_tx_ts(struct mlx5_dev_ctx_shared *sh, uint64_t mts) + return ci; + } + ++/** ++ * Read real time clock counter directly from the device PCI BAR area. ++ * The PCI BAR must be mapped to the process memory space at initialization. ++ * ++ * @param dev ++ * Device to read clock counter from ++ * ++ * @return ++ * 0 - if HCA BAR is not supported or not mapped. ++ * !=0 - read 64-bit value of real-time in UTC formatv (nanoseconds) ++ */ ++static __rte_always_inline uint64_t mlx5_read_pcibar_clock(struct rte_eth_dev *dev) ++{ ++ struct mlx5_proc_priv *ppriv = dev->process_private; ++ ++ if (ppriv && ppriv->hca_bar) { ++ struct mlx5_priv *priv = dev->data->dev_private; ++ struct mlx5_dev_ctx_shared *sh = priv->sh; ++ uint64_t *hca_ptr = (uint64_t *)(ppriv->hca_bar) + ++ __mlx5_64_off(initial_seg, real_time); ++ uint64_t ts = __atomic_load_n(hca_ptr, __ATOMIC_SEQ_CST); ++ ++ ts = rte_be_to_cpu_64(ts); ++ ts = mlx5_txpp_convert_rx_ts(sh, ts); ++ return ts; ++ } ++ return 0; ++} ++ + /** + * Set Software Parser flags and offsets in Ethernet Segment of WQE. + * Flags must be preliminary initialized to zero. +@@ -817,7 +847,7 @@ mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq, struct mlx5_wqe_wseg *ws; ws = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE); @@ -66294,7 +70991,7 @@ index a44050a1ce..ff23d87b8a 100644 ws->lkey = RTE_BE32(0); ws->va_high = RTE_BE32(0); ws->va_low = RTE_BE32(0); -@@ -1975,7 +1975,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, +@@ -1975,7 +2005,7 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq, uintptr_t start; mbuf = loc->mbuf; @@ -66304,19 +71001,18 @@ index a44050a1ce..ff23d87b8a 100644 * Packet length exceeds the allowed inline data length, * check whether the minimal inlining is required. diff --git a/dpdk/drivers/net/mlx5/mlx5_txpp.c b/dpdk/drivers/net/mlx5/mlx5_txpp.c -index f853a67f58..0e1da1d5f5 100644 +index f853a67f58..c43a5a7927 100644 --- a/dpdk/drivers/net/mlx5/mlx5_txpp.c +++ b/dpdk/drivers/net/mlx5/mlx5_txpp.c -@@ -969,6 +969,8 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) +@@ -969,6 +969,7 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_ctx_shared *sh = priv->sh; -+ struct mlx5_proc_priv *ppriv; + uint64_t ts; int ret; if (sh->txpp.refcnt) { -@@ -979,7 +981,6 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) +@@ -979,7 +980,6 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) rte_int128_t u128; struct mlx5_cqe_ts cts; } to; @@ -66324,26 +71020,20 @@ index f853a67f58..0e1da1d5f5 100644 mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128); if (to.cts.op_own >> 4) { -@@ -994,6 +995,18 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) +@@ -994,6 +994,12 @@ mlx5_txpp_read_clock(struct rte_eth_dev *dev, uint64_t *timestamp) *timestamp = ts; return 0; } -+ /* Check and try to map HCA PIC BAR to allow reading real time. */ -+ ppriv = dev->process_private; -+ if (ppriv && !ppriv->hca_bar && -+ sh->dev_cap.rt_timestamp && mlx5_dev_is_pci(dev->device)) -+ mlx5_txpp_map_hca_bar(dev); + /* Check if we can read timestamp directly from hardware. */ -+ if (ppriv && ppriv->hca_bar) { -+ ts = MLX5_GET64(initial_seg, ppriv->hca_bar, real_time); -+ ts = mlx5_txpp_convert_rx_ts(sh, ts); ++ ts = mlx5_read_pcibar_clock(dev); ++ if (ts != 0) { + *timestamp = ts; + return 0; + } /* Not supported in isolated mode - kernel does not see the CQEs. */ if (priv->isolated || rte_eal_process_type() != RTE_PROC_PRIMARY) return -ENOTSUP; -@@ -1050,11 +1063,9 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, +@@ -1050,11 +1056,9 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev __rte_unused, if (n >= n_used + n_txpp && xstats_names) { for (i = 0; i < n_txpp; ++i) { @@ -66357,10 +71047,25 @@ index f853a67f58..0e1da1d5f5 100644 } return n_used + n_txpp; diff --git a/dpdk/drivers/net/mlx5/mlx5_txq.c b/dpdk/drivers/net/mlx5/mlx5_txq.c -index 5543f2c570..46badcd0cc 100644 +index 5543f2c570..cdc9755fe0 100644 --- a/dpdk/drivers/net/mlx5/mlx5_txq.c +++ b/dpdk/drivers/net/mlx5/mlx5_txq.c -@@ -1310,8 +1310,23 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num) +@@ -332,6 +332,14 @@ mlx5_tx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc) + { + struct mlx5_priv *priv = dev->data->dev_private; + ++ if (*desc > 1 << priv->sh->cdev->config.hca_attr.log_max_wq_sz) { ++ DRV_LOG(ERR, ++ "port %u number of descriptors requested for Tx queue" ++ " %u is more than supported", ++ dev->data->port_id, idx); ++ rte_errno = EINVAL; ++ return -EINVAL; ++ } + if (*desc <= MLX5_TX_COMP_THRESH) { + DRV_LOG(WARNING, + "port %u number of descriptors requested for Tx queue" +@@ -1310,8 +1318,23 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num) return -rte_errno; } #ifdef HAVE_MLX5_HWS_SUPPORT @@ -66528,7 +71233,7 @@ index 1c1c17fc41..856d8ba948 100644 + #endif /* RTE_PMD_MLX5_FLOW_OS_H_ */ diff --git a/dpdk/drivers/net/mlx5/windows/mlx5_os.c b/dpdk/drivers/net/mlx5/windows/mlx5_os.c -index 77f04cc931..f401264b61 100644 +index 77f04cc931..d35b949b34 100644 --- a/dpdk/drivers/net/mlx5/windows/mlx5_os.c +++ b/dpdk/drivers/net/mlx5/windows/mlx5_os.c @@ -193,8 +193,8 @@ mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh) @@ -66542,11 +71247,39 @@ index 77f04cc931..f401264b61 100644 DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u", sh->dev_cap.ind_table_max_size); } +@@ -474,9 +474,11 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, + claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); + priv->ctrl_flows = 0; + TAILQ_INIT(&priv->flow_meters); +- priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); +- if (!priv->mtr_profile_tbl) +- goto error; ++ if (priv->mtr_en) { ++ priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); ++ if (!priv->mtr_profile_tbl) ++ goto error; ++ } + /* Bring Ethernet device up. */ + DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.", + eth_dev->data->port_id); diff --git a/dpdk/drivers/net/mvneta/mvneta_ethdev.c b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -index c4355a3f64..f281d1d7f8 100644 +index c4355a3f64..6821f8e559 100644 --- a/dpdk/drivers/net/mvneta/mvneta_ethdev.c +++ b/dpdk/drivers/net/mvneta/mvneta_ethdev.c -@@ -198,7 +198,8 @@ mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +@@ -91,6 +91,12 @@ mvneta_ifnames_get(const char *key __rte_unused, const char *value, + { + struct mvneta_ifnames *ifnames = extra_args; + ++ if (ifnames->idx >= NETA_NUM_ETH_PPIO) { ++ MVNETA_LOG(ERR, "Too many ifnames specified (max %u)", ++ NETA_NUM_ETH_PPIO); ++ return -EINVAL; ++ } ++ + ifnames->names[ifnames->idx++] = value; + + return 0; +@@ -198,7 +204,8 @@ mvneta_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV6, RTE_PTYPE_L4_TCP, @@ -66556,7 +71289,7 @@ index c4355a3f64..f281d1d7f8 100644 }; return ptypes; -@@ -376,6 +377,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) +@@ -376,6 +383,10 @@ mvneta_dev_start(struct rte_eth_dev *dev) goto out; } @@ -66567,7 +71300,7 @@ index c4355a3f64..f281d1d7f8 100644 /* start tx queues */ for (i = 0; i < dev->data->nb_tx_queues; i++) dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; -@@ -400,6 +405,7 @@ static int +@@ -400,6 +411,7 @@ static int mvneta_dev_stop(struct rte_eth_dev *dev) { struct mvneta_priv *priv = dev->data->dev_private; @@ -66575,7 +71308,7 @@ index c4355a3f64..f281d1d7f8 100644 dev->data->dev_started = 0; -@@ -412,6 +418,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) +@@ -412,6 +424,14 @@ mvneta_dev_stop(struct rte_eth_dev *dev) priv->ppio = NULL; @@ -66628,6 +71361,26 @@ index 8fd3211283..15083d249c 100644 }; return ptypes; +diff --git a/dpdk/drivers/net/netvsc/hn_ethdev.c b/dpdk/drivers/net/netvsc/hn_ethdev.c +index d0bbc0a4c0..73db9d38d9 100644 +--- a/dpdk/drivers/net/netvsc/hn_ethdev.c ++++ b/dpdk/drivers/net/netvsc/hn_ethdev.c +@@ -313,6 +313,15 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev, + + if (reta_conf[idx].mask & mask) + hv->rss_ind[i] = reta_conf[idx].reta[shift]; ++ ++ /* ++ * Ensure we don't allow config that directs traffic to an Rx ++ * queue that we aren't going to poll ++ */ ++ if (hv->rss_ind[i] >= dev->data->nb_rx_queues) { ++ PMD_DRV_LOG(ERR, "RSS distributing traffic to invalid Rx queue"); ++ return -EINVAL; ++ } + } + + err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE); diff --git a/dpdk/drivers/net/netvsc/hn_rndis.c b/dpdk/drivers/net/netvsc/hn_rndis.c index e6f1f28768..fe36274df8 100644 --- a/dpdk/drivers/net/netvsc/hn_rndis.c @@ -66652,10 +71405,108 @@ index e6f1f28768..fe36274df8 100644 "missing RNDIS header %u", len); return; diff --git a/dpdk/drivers/net/netvsc/hn_rxtx.c b/dpdk/drivers/net/netvsc/hn_rxtx.c -index bc6f60c64a..6496979f28 100644 +index bc6f60c64a..080cab4464 100644 --- a/dpdk/drivers/net/netvsc/hn_rxtx.c +++ b/dpdk/drivers/net/netvsc/hn_rxtx.c -@@ -612,7 +612,9 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, +@@ -234,6 +234,17 @@ static void hn_reset_txagg(struct hn_tx_queue *txq) + txq->agg_prevpkt = NULL; + } + ++static void ++hn_rx_queue_free_common(struct hn_rx_queue *rxq) ++{ ++ if (!rxq) ++ return; ++ ++ rte_free(rxq->rxbuf_info); ++ rte_free(rxq->event_buf); ++ rte_free(rxq); ++} ++ + int + hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t nb_desc, +@@ -243,6 +254,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + { + struct hn_data *hv = dev->data->dev_private; + struct hn_tx_queue *txq; ++ struct hn_rx_queue *rxq = NULL; + char name[RTE_MEMPOOL_NAMESIZE]; + uint32_t tx_free_thresh; + int err = -ENOMEM; +@@ -301,6 +313,27 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + goto error; + } + ++ /* ++ * If there are more Tx queues than Rx queues, allocate rx_queues ++ * with event buffer so that Tx completion messages can still be ++ * received ++ */ ++ if (queue_idx >= dev->data->nb_rx_queues) { ++ rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); ++ ++ if (!rxq) { ++ err = -ENOMEM; ++ goto error; ++ } ++ ++ /* ++ * Don't allocate mbuf pool or rx ring. RSS is always configured ++ * to ensure packets aren't received by this Rx queue. ++ */ ++ rxq->mb_pool = NULL; ++ rxq->rx_ring = NULL; ++ } ++ + txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size); + txq->agg_pktmax = hv->rndis_agg_pkts; + txq->agg_align = hv->rndis_agg_align; +@@ -311,12 +344,15 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, + socket_id, tx_conf); + if (err == 0) { + dev->data->tx_queues[queue_idx] = txq; ++ if (rxq != NULL) ++ dev->data->rx_queues[queue_idx] = rxq; + return 0; + } + + error: + rte_mempool_free(txq->txdesc_pool); + rte_memzone_free(txq->tx_rndis_mz); ++ hn_rx_queue_free_common(rxq); + rte_free(txq); + return err; + } +@@ -363,6 +399,12 @@ hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) + + if (!txq) + return; ++ /* ++ * Free any Rx queues allocated for a Tx queue without a corresponding ++ * Rx queue ++ */ ++ if (qid >= dev->data->nb_rx_queues) ++ hn_rx_queue_free_common(dev->data->rx_queues[qid]); + + rte_mempool_free(txq->txdesc_pool); + +@@ -552,10 +594,12 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, + const struct hn_rxinfo *info) + { + struct hn_data *hv = rxq->hv; +- struct rte_mbuf *m; ++ struct rte_mbuf *m = NULL; + bool use_extbuf = false; + +- m = rte_pktmbuf_alloc(rxq->mb_pool); ++ if (likely(rxq->mb_pool != NULL)) ++ m = rte_pktmbuf_alloc(rxq->mb_pool); ++ + if (unlikely(!m)) { + struct rte_eth_dev *dev = + &rte_eth_devices[rxq->port_id]; +@@ -612,7 +656,9 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, RTE_PTYPE_L4_MASK); if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { @@ -66666,7 +71517,49 @@ index bc6f60c64a..6496979f28 100644 m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; /* NDIS always strips tag, put it back if necessary */ -@@ -1332,7 +1334,9 @@ static void hn_encap(struct rndis_packet_msg *pkt, +@@ -940,7 +986,15 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + if (queue_idx == 0) { + rxq = hv->primary; + } else { +- rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); ++ /* ++ * If the number of Tx queues was previously greater than the ++ * number of Rx queues, we may already have allocated an rxq. ++ */ ++ if (!dev->data->rx_queues[queue_idx]) ++ rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id); ++ else ++ rxq = dev->data->rx_queues[queue_idx]; ++ + if (!rxq) + return -ENOMEM; + } +@@ -973,9 +1027,10 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev, + + fail: + rte_ring_free(rxq->rx_ring); +- rte_free(rxq->rxbuf_info); +- rte_free(rxq->event_buf); +- rte_free(rxq); ++ /* Only free rxq if it was created in this function. */ ++ if (!dev->data->rx_queues[queue_idx]) ++ hn_rx_queue_free_common(rxq); ++ + return error; + } + +@@ -996,9 +1051,7 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary) + if (keep_primary && rxq == rxq->hv->primary) + return; + +- rte_free(rxq->rxbuf_info); +- rte_free(rxq->event_buf); +- rte_free(rxq); ++ hn_rx_queue_free_common(rxq); + } + + void +@@ -1332,7 +1385,9 @@ static void hn_encap(struct rndis_packet_msg *pkt, if (m->ol_flags & RTE_MBUF_F_TX_VLAN) { pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN); @@ -66677,6 +71570,66 @@ index bc6f60c64a..6496979f28 100644 } if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { +@@ -1514,14 +1569,32 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *m = tx_pkts[nb_tx]; +- uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; + struct rndis_packet_msg *pkt; + struct hn_txdesc *txd; ++ uint32_t pkt_size; + + txd = hn_txd_get(txq); + if (txd == NULL) + break; + ++ if (!(m->ol_flags & RTE_MBUF_F_TX_VLAN)) { ++ struct rte_ether_hdr *eh = ++ rte_pktmbuf_mtod(m, struct rte_ether_hdr *); ++ struct rte_vlan_hdr *vh; ++ ++ /* Force TX vlan offloading for 801.2Q packet */ ++ if (eh->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { ++ vh = (struct rte_vlan_hdr *)(eh + 1); ++ m->ol_flags |= RTE_MBUF_F_TX_VLAN; ++ m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci); ++ ++ /* Copy ether header over */ ++ memmove(rte_pktmbuf_adj(m, sizeof(struct rte_vlan_hdr)), ++ eh, 2 * RTE_ETHER_ADDR_LEN); ++ } ++ } ++ pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN; ++ + /* For small packets aggregate them in chimney buffer */ + if (m->pkt_len <= hv->tx_copybreak && + pkt_size <= txq->agg_szmax) { +diff --git a/dpdk/drivers/net/nfb/nfb_rx.c b/dpdk/drivers/net/nfb/nfb_rx.c +index 8a9b232305..7941197b77 100644 +--- a/dpdk/drivers/net/nfb/nfb_rx.c ++++ b/dpdk/drivers/net/nfb/nfb_rx.c +@@ -129,7 +129,7 @@ nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) + + if (rxq->queue != NULL) { + ndp_close_rx_queue(rxq->queue); +- rte_free(rxq); + rxq->queue = NULL; ++ rte_free(rxq); + } + } +diff --git a/dpdk/drivers/net/nfb/nfb_tx.c b/dpdk/drivers/net/nfb/nfb_tx.c +index d49fc324e7..5c38d69934 100644 +--- a/dpdk/drivers/net/nfb/nfb_tx.c ++++ b/dpdk/drivers/net/nfb/nfb_tx.c +@@ -108,7 +108,7 @@ nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) + + if (txq->queue != NULL) { + ndp_close_tx_queue(txq->queue); +- rte_free(txq); + txq->queue = NULL; ++ rte_free(txq); + } + } diff --git a/dpdk/drivers/net/nfp/flower/nfp_flower.c b/dpdk/drivers/net/nfp/flower/nfp_flower.c index e447258d97..360345c50b 100644 --- a/dpdk/drivers/net/nfp/flower/nfp_flower.c @@ -67586,7 +72539,7 @@ index 36c19b47e4..5b5c0aa7d3 100644 #define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\ (&((struct nfp_net_adapter *)adapter)->hw) diff --git a/dpdk/drivers/net/nfp/nfp_ethdev.c b/dpdk/drivers/net/nfp/nfp_ethdev.c -index 0956ea81df..68fd67a024 100644 +index 0956ea81df..d795025004 100644 --- a/dpdk/drivers/net/nfp/nfp_ethdev.c +++ b/dpdk/drivers/net/nfp/nfp_ethdev.c @@ -70,6 +70,7 @@ nfp_net_start(struct rte_eth_dev *dev) @@ -67618,10 +72571,77 @@ index 0956ea81df..68fd67a024 100644 return 0; -@@ -258,6 +264,45 @@ nfp_net_set_link_down(struct rte_eth_dev *dev) - hw->nfp_idx, 0); +@@ -199,6 +205,7 @@ error: + static int + nfp_net_stop(struct rte_eth_dev *dev) + { ++ int ret; + struct nfp_net_hw *hw; + + PMD_INIT_LOG(DEBUG, "Stop"); +@@ -214,10 +221,12 @@ nfp_net_stop(struct rte_eth_dev *dev) + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ +- nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); ++ ret = nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); + else +- nfp_eth_set_configured(dev->process_private, ++ ret = nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 0); ++ if (ret < 0) ++ return ret; + + return 0; + } +@@ -226,6 +235,7 @@ nfp_net_stop(struct rte_eth_dev *dev) + static int + nfp_net_set_link_up(struct rte_eth_dev *dev) + { ++ int ret; + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link up"); +@@ -234,16 +244,21 @@ nfp_net_set_link_up(struct rte_eth_dev *dev) + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ +- return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); ++ ret = nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 1); + else +- return nfp_eth_set_configured(dev->process_private, ++ ret = nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 1); ++ if (ret < 0) ++ return ret; ++ ++ return 0; } + /* Set the link down. */ + static int + nfp_net_set_link_down(struct rte_eth_dev *dev) + { ++ int ret; + struct nfp_net_hw *hw; + + PMD_DRV_LOG(DEBUG, "Set link down"); +@@ -252,10 +267,53 @@ nfp_net_set_link_down(struct rte_eth_dev *dev) + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + /* Configure the physical port down */ +- return nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); ++ ret = nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0); + else +- return nfp_eth_set_configured(dev->process_private, ++ ret = nfp_eth_set_configured(dev->process_private, + hw->nfp_idx, 0); ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ +static void +nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev, + uint8_t id) @@ -67659,12 +72679,10 @@ index 0956ea81df..68fd67a024 100644 + rte_free(pf_dev); + + return 0; -+} -+ + } + /* Reset and stop device. The device can not be restarted. */ - static int - nfp_net_close(struct rte_eth_dev *dev) -@@ -268,8 +313,19 @@ nfp_net_close(struct rte_eth_dev *dev) +@@ -268,8 +326,19 @@ nfp_net_close(struct rte_eth_dev *dev) struct nfp_app_fw_nic *app_fw_nic; int i; @@ -67685,7 +72703,7 @@ index 0956ea81df..68fd67a024 100644 PMD_INIT_LOG(DEBUG, "Close"); -@@ -297,8 +353,11 @@ nfp_net_close(struct rte_eth_dev *dev) +@@ -297,8 +366,11 @@ nfp_net_close(struct rte_eth_dev *dev) /* Only free PF resources after all physical ports have been closed */ /* Mark this port as unused and free device priv resources*/ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); @@ -67699,7 +72717,7 @@ index 0956ea81df..68fd67a024 100644 for (i = 0; i < app_fw_nic->total_phyports; i++) { /* Check to see if ports are still in use */ -@@ -306,26 +365,15 @@ nfp_net_close(struct rte_eth_dev *dev) +@@ -306,26 +378,15 @@ nfp_net_close(struct rte_eth_dev *dev) return 0; } @@ -67730,7 +72748,7 @@ index 0956ea81df..68fd67a024 100644 return 0; } -@@ -517,14 +565,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -517,14 +578,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev) /* Use backpointer to the CoreNIC app struct */ app_fw_nic = NFP_PRIV_TO_APP_FW_NIC(pf_dev->app_fw_priv); @@ -67745,7 +72763,7 @@ index 0956ea81df..68fd67a024 100644 port = ((struct nfp_net_hw *)eth_dev->data->dev_private)->idx; if (port < 0 || port > 7) { PMD_DRV_LOG(ERR, "Port value is wrong"); -@@ -572,6 +612,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -572,6 +625,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); @@ -67755,7 +72773,7 @@ index 0956ea81df..68fd67a024 100644 if (nfp_net_ethdev_ops_mount(hw, eth_dev)) return -EINVAL; -@@ -609,6 +652,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -609,6 +665,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); hw->mtu = RTE_ETHER_MTU; @@ -67763,7 +72781,7 @@ index 0956ea81df..68fd67a024 100644 /* VLAN insertion is incompatible with LSOv2 */ if (hw->cap & NFP_NET_CFG_CTRL_LSO2) -@@ -690,6 +734,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) +@@ -690,6 +747,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev) nfp_net_dev_interrupt_handler, (void *)eth_dev); /* Telling the firmware about the LSC interrupt entry */ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); @@ -67772,7 +72790,7 @@ index 0956ea81df..68fd67a024 100644 /* Recording current stats counters values */ nfp_net_stats_reset(eth_dev); -@@ -724,7 +770,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) +@@ -724,7 +783,7 @@ nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card) goto load_fw; /* Then try the PCI name */ snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH, @@ -67781,7 +72799,7 @@ index 0956ea81df..68fd67a024 100644 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name); if (rte_firmware_read(fw_name, &fw_buf, &fsize) == 0) -@@ -917,10 +963,9 @@ port_cleanup: +@@ -917,10 +976,9 @@ port_cleanup: struct rte_eth_dev *tmp_dev; tmp_dev = app_fw_nic->ports[i]->eth_dev; rte_eth_dev_release_port(tmp_dev); @@ -67793,7 +72811,7 @@ index 0956ea81df..68fd67a024 100644 app_cleanup: rte_free(app_fw_nic); -@@ -930,9 +975,11 @@ app_cleanup: +@@ -930,9 +988,11 @@ app_cleanup: static int nfp_pf_init(struct rte_pci_device *pci_dev) { @@ -67806,7 +72824,7 @@ index 0956ea81df..68fd67a024 100644 struct nfp_cpp *cpp; enum nfp_app_fw_id app_fw_id; struct nfp_pf_dev *pf_dev; -@@ -976,6 +1023,10 @@ nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -976,6 +1036,10 @@ nfp_pf_init(struct rte_pci_device *pci_dev) goto hwinfo_cleanup; } @@ -67817,7 +72835,7 @@ index 0956ea81df..68fd67a024 100644 if (nfp_fw_setup(pci_dev, cpp, nfp_eth_table, hwinfo)) { PMD_INIT_LOG(ERR, "Error when uploading firmware"); ret = -EIO; -@@ -1032,7 +1083,8 @@ nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -1032,7 +1096,8 @@ nfp_pf_init(struct rte_pci_device *pci_dev) goto pf_cleanup; } @@ -67827,7 +72845,7 @@ index 0956ea81df..68fd67a024 100644 addr, NFP_QCP_QUEUE_AREA_SZ, &pf_dev->hwqueues_area); if (pf_dev->hw_queues == NULL) { -@@ -1078,7 +1130,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) +@@ -1078,7 +1143,7 @@ nfp_pf_init(struct rte_pci_device *pci_dev) return 0; hwqueues_cleanup: @@ -67921,7 +72939,7 @@ index d1427b63bc..60e90cd5cb 100644 nfp_net_stats_reset(eth_dev); } diff --git a/dpdk/drivers/net/nfp/nfp_flow.c b/dpdk/drivers/net/nfp/nfp_flow.c -index 6f79d950db..17c091ffa0 100644 +index 6f79d950db..3746aa2d0f 100644 --- a/dpdk/drivers/net/nfp/nfp_flow.c +++ b/dpdk/drivers/net/nfp/nfp_flow.c @@ -285,7 +285,7 @@ nfp_check_mask_remove(struct nfp_flow_priv *priv, @@ -68273,7 +73291,15 @@ index 6f79d950db..17c091ffa0 100644 set_tun->tun_flags = vxlan->hdr.vx_flags; /* Send the tunnel neighbor cmsg to fw */ -@@ -3094,7 +3145,7 @@ nfp_flow_action_geneve_encap_v6(struct nfp_app_fw_flower *app_fw_flower, +@@ -2968,7 +3019,6 @@ nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr, + goto free_entry; + } + +- rte_free(entry); + rte_free(find_entry); + priv->pre_tun_cnt--; + +@@ -3094,7 +3144,7 @@ nfp_flow_action_geneve_encap_v6(struct nfp_app_fw_flower *app_fw_flower, set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size); memset(set_tun, 0, act_set_size); @@ -68282,7 +73308,7 @@ index 6f79d950db..17c091ffa0 100644 tun_id = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2]; nfp_flow_set_tun_process(set_tun, NFP_FL_TUN_GENEVE, tun_id, ipv6->hdr.hop_limits, tos); -@@ -3113,6 +3164,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, +@@ -3113,6 +3163,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, struct nfp_fl_rule_metadata *nfp_flow_meta, struct nfp_fl_tun *tun) { @@ -68290,7 +73316,7 @@ index 6f79d950db..17c091ffa0 100644 const struct rte_ether_hdr *eth; const struct rte_flow_item_ipv4 *ipv4; const struct rte_flow_item_gre *gre; -@@ -3124,6 +3176,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, +@@ -3124,6 +3175,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, eth = (const struct rte_ether_hdr *)raw_encap->data; ipv4 = (const struct rte_flow_item_ipv4 *)(eth + 1); gre = (const struct rte_flow_item_gre *)(ipv4 + 1); @@ -68298,7 +73324,7 @@ index 6f79d950db..17c091ffa0 100644 pre_tun = (struct nfp_fl_act_pre_tun *)actions; memset(pre_tun, 0, act_pre_size); -@@ -3131,7 +3184,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, +@@ -3131,7 +3183,7 @@ nfp_flow_action_nvgre_encap_v4(struct nfp_app_fw_flower *app_fw_flower, set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size); memset(set_tun, 0, act_set_size); @@ -68307,7 +73333,7 @@ index 6f79d950db..17c091ffa0 100644 ipv4->hdr.time_to_live, ipv4->hdr.type_of_service); set_tun->tun_proto = gre->protocol; -@@ -3149,6 +3202,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, +@@ -3149,6 +3201,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, struct nfp_fl_tun *tun) { uint8_t tos; @@ -68315,7 +73341,7 @@ index 6f79d950db..17c091ffa0 100644 const struct rte_ether_hdr *eth; const struct rte_flow_item_ipv6 *ipv6; const struct rte_flow_item_gre *gre; -@@ -3160,6 +3214,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, +@@ -3160,6 +3213,7 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, eth = (const struct rte_ether_hdr *)raw_encap->data; ipv6 = (const struct rte_flow_item_ipv6 *)(eth + 1); gre = (const struct rte_flow_item_gre *)(ipv6 + 1); @@ -68323,7 +73349,7 @@ index 6f79d950db..17c091ffa0 100644 pre_tun = (struct nfp_fl_act_pre_tun *)actions; memset(pre_tun, 0, act_pre_size); -@@ -3167,8 +3222,8 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, +@@ -3167,8 +3221,8 @@ nfp_flow_action_nvgre_encap_v6(struct nfp_app_fw_flower *app_fw_flower, set_tun = (struct nfp_fl_act_set_tun *)(act_data + act_pre_size); memset(set_tun, 0, act_set_size); @@ -68334,7 +73360,7 @@ index 6f79d950db..17c091ffa0 100644 ipv6->hdr.hop_limits, tos); set_tun->tun_proto = gre->protocol; -@@ -3232,12 +3287,27 @@ nfp_flow_action_raw_encap(struct nfp_app_fw_flower *app_fw_flower, +@@ -3232,12 +3286,27 @@ nfp_flow_action_raw_encap(struct nfp_app_fw_flower *app_fw_flower, return ret; } @@ -68362,7 +73388,7 @@ index 6f79d950db..17c091ffa0 100644 char *position; char *action_data; bool ttl_tos_flag = false; -@@ -3256,6 +3326,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, +@@ -3256,6 +3325,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, position = action_data; meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data; @@ -68371,7 +73397,7 @@ index 6f79d950db..17c091ffa0 100644 for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; ++action) { switch (action->type) { case RTE_FLOW_ACTION_TYPE_VOID: -@@ -3272,7 +3344,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, +@@ -3272,7 +3343,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, break; case RTE_FLOW_ACTION_TYPE_PORT_ID: PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_PORT_ID"); @@ -68381,7 +73407,7 @@ index 6f79d950db..17c091ffa0 100644 if (ret != 0) { PMD_DRV_LOG(ERR, "Failed when process" " RTE_FLOW_ACTION_TYPE_PORT_ID"); -@@ -3347,7 +3420,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, +@@ -3347,7 +3419,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, break; case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_TP_SRC"); @@ -68391,7 +73417,7 @@ index 6f79d950db..17c091ffa0 100644 if (!tp_set_flag) { position += sizeof(struct nfp_fl_act_set_tport); tp_set_flag = true; -@@ -3355,7 +3429,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, +@@ -3355,7 +3428,8 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, break; case RTE_FLOW_ACTION_TYPE_SET_TP_DST: PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_TP_DST"); @@ -68401,7 +73427,7 @@ index 6f79d950db..17c091ffa0 100644 if (!tp_set_flag) { position += sizeof(struct nfp_fl_act_set_tport); tp_set_flag = true; -@@ -3370,7 +3445,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, +@@ -3370,7 +3444,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, ttl_tos_flag = true; } } else { @@ -68410,7 +73436,7 @@ index 6f79d950db..17c091ffa0 100644 if (!tc_hl_flag) { position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl); tc_hl_flag = true; -@@ -3387,7 +3462,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, +@@ -3387,7 +3461,7 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, break; case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP: PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP"); @@ -68419,7 +73445,7 @@ index 6f79d950db..17c091ffa0 100644 if (!tc_hl_flag) { position += sizeof(struct nfp_fl_act_set_ipv6_tc_hl_fl); tc_hl_flag = true; -@@ -3442,6 +3517,11 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, +@@ -3442,6 +3516,11 @@ nfp_flow_compile_action(struct nfp_flower_representor *representor, total_actions++; } @@ -68431,7 +73457,7 @@ index 6f79d950db..17c091ffa0 100644 if (drop_flag) nfp_flow_meta->shortcut = rte_cpu_to_be_32(NFP_FL_SC_ACT_DROP); else if (total_actions > 1) -@@ -3484,7 +3564,7 @@ nfp_flow_process(struct nfp_flower_representor *representor, +@@ -3484,7 +3563,7 @@ nfp_flow_process(struct nfp_flower_representor *representor, return NULL; } @@ -68440,7 +73466,7 @@ index 6f79d950db..17c091ffa0 100644 if (nfp_flow == NULL) { PMD_DRV_LOG(ERR, "Alloc nfp flow failed."); goto free_stats; -@@ -3592,6 +3672,7 @@ nfp_flow_teardown(struct nfp_flow_priv *priv, +@@ -3592,6 +3671,7 @@ nfp_flow_teardown(struct nfp_flow_priv *priv, nfp_flow_meta = nfp_flow->payload.meta; mask_data = nfp_flow->payload.mask_data; mask_len = nfp_flow_meta->mask_len << NFP_FL_LW_SIZ; @@ -68448,7 +73474,7 @@ index 6f79d950db..17c091ffa0 100644 if (!nfp_check_mask_remove(priv, mask_data, mask_len, &nfp_flow_meta->flags)) { PMD_DRV_LOG(ERR, "nfp mask del check failed."); -@@ -3791,14 +3872,21 @@ nfp_flow_flush(struct rte_eth_dev *dev, +@@ -3791,14 +3871,21 @@ nfp_flow_flush(struct rte_eth_dev *dev, void *next_data; uint32_t iter = 0; const void *next_key; @@ -68473,7 +73499,7 @@ index 6f79d950db..17c091ffa0 100644 } return ret; -@@ -3809,6 +3897,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, +@@ -3809,6 +3896,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, struct rte_flow *nfp_flow, void *data) { @@ -68481,7 +73507,7 @@ index 6f79d950db..17c091ffa0 100644 uint32_t ctx_id; struct rte_flow *flow; struct nfp_flow_priv *priv; -@@ -3823,6 +3912,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, +@@ -3823,6 +3911,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, } query = (struct rte_flow_query_count *)data; @@ -68489,7 +73515,7 @@ index 6f79d950db..17c091ffa0 100644 memset(query, 0, sizeof(*query)); ctx_id = rte_be_to_cpu_32(nfp_flow->payload.meta->host_ctx_id); -@@ -3834,7 +3924,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, +@@ -3834,7 +3923,7 @@ nfp_flow_stats_get(struct rte_eth_dev *dev, query->bytes = stats->bytes; query->hits_set = 1; query->bytes_set = 1; @@ -68498,7 +73524,7 @@ index 6f79d950db..17c091ffa0 100644 stats->pkts = 0; stats->bytes = 0; } -@@ -3981,11 +4071,21 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) +@@ -3981,11 +4070,21 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) size_t stats_size; uint64_t ctx_count; uint64_t ctx_split; @@ -68521,7 +73547,7 @@ index 6f79d950db..17c091ffa0 100644 .entries = NFP_MASK_TABLE_ENTRIES, .hash_func = rte_jhash, .socket_id = rte_socket_id(), -@@ -3994,7 +4094,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) +@@ -3994,7 +4093,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) }; struct rte_hash_parameters flow_hash_params = { @@ -68530,7 +73556,7 @@ index 6f79d950db..17c091ffa0 100644 .hash_func = rte_jhash, .socket_id = rte_socket_id(), .key_len = sizeof(uint32_t), -@@ -4002,7 +4102,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) +@@ -4002,7 +4101,7 @@ nfp_flow_priv_init(struct nfp_pf_dev *pf_dev) }; struct rte_hash_parameters pre_tun_hash_params = { @@ -69415,6 +74441,19 @@ index ddf992e79a..c45bec7ce7 100644 /* SDS EXT */ #define YT_AUTO 0xA5 +diff --git a/dpdk/drivers/net/ngbe/base/ngbe_regs.h b/dpdk/drivers/net/ngbe/base/ngbe_regs.h +index c0e79a2ba7..0d820f4079 100644 +--- a/dpdk/drivers/net/ngbe/base/ngbe_regs.h ++++ b/dpdk/drivers/net/ngbe/base/ngbe_regs.h +@@ -712,6 +712,8 @@ enum ngbe_5tuple_protocol { + #define NGBE_MACRXFLT_CTL_PASS LS(3, 6, 0x3) + #define NGBE_MACRXFLT_RXALL MS(31, 0x1) + ++#define NGBE_MAC_WDG_TIMEOUT 0x01100C ++ + /****************************************************************************** + * Statistic Registers + ******************************************************************************/ diff --git a/dpdk/drivers/net/ngbe/base/ngbe_type.h b/dpdk/drivers/net/ngbe/base/ngbe_type.h index aa5c41146c..8a7d2cd331 100644 --- a/dpdk/drivers/net/ngbe/base/ngbe_type.h @@ -69478,7 +74517,7 @@ index aa5c41146c..8a7d2cd331 100644 u64 rx_qp_packets; u64 tx_qp_packets; diff --git a/dpdk/drivers/net/ngbe/ngbe_ethdev.c b/dpdk/drivers/net/ngbe/ngbe_ethdev.c -index afdb3ad41f..443bd9fef9 100644 +index afdb3ad41f..9d6ae6f2ef 100644 --- a/dpdk/drivers/net/ngbe/ngbe_ethdev.c +++ b/dpdk/drivers/net/ngbe/ngbe_ethdev.c @@ -90,6 +90,7 @@ static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev); @@ -69499,7 +74538,16 @@ index afdb3ad41f..443bd9fef9 100644 HW_XSTAT(rx_management_packets), HW_XSTAT(tx_management_packets), HW_XSTAT(rx_management_dropped), -@@ -543,7 +546,7 @@ static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) +@@ -260,6 +263,8 @@ ngbe_pf_reset_hw(struct ngbe_hw *hw) + status = hw->mac.reset_hw(hw); + + ctrl_ext = rd32(hw, NGBE_PORTCTL); ++ /* let hardware know driver is loaded */ ++ ctrl_ext |= NGBE_PORTCTL_DRVLOAD; + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= NGBE_PORTCTL_RSTDONE; + wr32(hw, NGBE_PORTCTL, ctrl_ext); +@@ -543,7 +548,7 @@ static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev) if (ethdev == NULL) return 0; @@ -69508,7 +74556,86 @@ index afdb3ad41f..443bd9fef9 100644 } static struct rte_pci_driver rte_ngbe_pmd = { -@@ -972,9 +975,6 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -579,41 +584,25 @@ ngbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) + } + + static void +-ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ++ngbe_vlan_strip_q_set(struct rte_eth_dev *dev, uint16_t queue, int on) + { +- struct ngbe_hw *hw = ngbe_dev_hw(dev); +- struct ngbe_rx_queue *rxq; +- bool restart; +- uint32_t rxcfg, rxbal, rxbah; +- + if (on) + ngbe_vlan_hw_strip_enable(dev, queue); + else + ngbe_vlan_hw_strip_disable(dev, queue); ++} + +- rxq = dev->data->rx_queues[queue]; +- rxbal = rd32(hw, NGBE_RXBAL(rxq->reg_idx)); +- rxbah = rd32(hw, NGBE_RXBAH(rxq->reg_idx)); +- rxcfg = rd32(hw, NGBE_RXCFG(rxq->reg_idx)); +- if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) { +- restart = (rxcfg & NGBE_RXCFG_ENA) && +- !(rxcfg & NGBE_RXCFG_VLAN); +- rxcfg |= NGBE_RXCFG_VLAN; +- } else { +- restart = (rxcfg & NGBE_RXCFG_ENA) && +- (rxcfg & NGBE_RXCFG_VLAN); +- rxcfg &= ~NGBE_RXCFG_VLAN; +- } +- rxcfg &= ~NGBE_RXCFG_ENA; ++static void ++ngbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ++{ ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); + +- if (restart) { +- /* set vlan strip for ring */ +- ngbe_dev_rx_queue_stop(dev, queue); +- wr32(hw, NGBE_RXBAL(rxq->reg_idx), rxbal); +- wr32(hw, NGBE_RXBAH(rxq->reg_idx), rxbah); +- wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxcfg); +- ngbe_dev_rx_queue_start(dev, queue); ++ if (!hw->adapter_stopped) { ++ PMD_DRV_LOG(ERR, "Please stop port first"); ++ return; + } ++ ++ ngbe_vlan_strip_q_set(dev, queue, on); + } + + static int +@@ -839,9 +828,9 @@ ngbe_vlan_hw_strip_config(struct rte_eth_dev *dev) + rxq = dev->data->rx_queues[i]; + + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) +- ngbe_vlan_hw_strip_enable(dev, i); ++ ngbe_vlan_strip_q_set(dev, i, 1); + else +- ngbe_vlan_hw_strip_disable(dev, i); ++ ngbe_vlan_strip_q_set(dev, i, 0); + } + } + +@@ -903,6 +892,13 @@ ngbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) + static int + ngbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) + { ++ struct ngbe_hw *hw = ngbe_dev_hw(dev); ++ ++ if (!hw->adapter_stopped && (mask & RTE_ETH_VLAN_STRIP_MASK)) { ++ PMD_DRV_LOG(ERR, "Please stop port first"); ++ return -EPERM; ++ } ++ + ngbe_config_vlan_strip_on_all_queues(dev, mask); + + ngbe_vlan_offload_config(dev, mask); +@@ -972,9 +968,6 @@ ngbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -69518,7 +74645,7 @@ index afdb3ad41f..443bd9fef9 100644 /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); -@@ -1038,6 +1038,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -1038,6 +1031,7 @@ ngbe_dev_start(struct rte_eth_dev *dev) } hw->mac.setup_pba(hw); @@ -69526,7 +74653,7 @@ index afdb3ad41f..443bd9fef9 100644 ngbe_configure_port(dev); err = ngbe_dev_rxtx_start(dev); -@@ -1050,6 +1051,8 @@ ngbe_dev_start(struct rte_eth_dev *dev) +@@ -1050,6 +1044,8 @@ ngbe_dev_start(struct rte_eth_dev *dev) if (hw->is_pf && dev->data->dev_conf.lpbk_mode) goto skip_link_setup; @@ -69535,7 +74662,7 @@ index afdb3ad41f..443bd9fef9 100644 err = hw->mac.check_link(hw, &speed, &link_up, 0); if (err != 0) goto error; -@@ -1164,12 +1167,10 @@ ngbe_dev_stop(struct rte_eth_dev *dev) +@@ -1164,12 +1160,10 @@ ngbe_dev_stop(struct rte_eth_dev *dev) int vf; if (hw->adapter_stopped) @@ -69549,7 +74676,7 @@ index afdb3ad41f..443bd9fef9 100644 if (hw->gpio_ctl) { /* gpio0 is used to power on/off control*/ wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0); -@@ -1188,8 +1189,6 @@ ngbe_dev_stop(struct rte_eth_dev *dev) +@@ -1188,8 +1182,6 @@ ngbe_dev_stop(struct rte_eth_dev *dev) for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; @@ -69558,7 +74685,7 @@ index afdb3ad41f..443bd9fef9 100644 ngbe_dev_clear_queues(dev); /* Clear stored conf */ -@@ -1216,6 +1215,10 @@ ngbe_dev_stop(struct rte_eth_dev *dev) +@@ -1216,6 +1208,10 @@ ngbe_dev_stop(struct rte_eth_dev *dev) hw->adapter_stopped = true; dev->data->dev_started = 0; @@ -69569,7 +74696,7 @@ index afdb3ad41f..443bd9fef9 100644 return 0; } -@@ -1259,6 +1262,9 @@ ngbe_dev_close(struct rte_eth_dev *dev) +@@ -1259,10 +1255,16 @@ ngbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -69579,7 +74706,14 @@ index afdb3ad41f..443bd9fef9 100644 ngbe_pf_reset_hw(hw); ngbe_dev_stop(dev); -@@ -1805,7 +1811,9 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) + ++ /* Let firmware take over control of hardware */ ++ wr32m(hw, NGBE_PORTCTL, NGBE_PORTCTL_DRVLOAD, 0); ++ + ngbe_dev_free_queues(dev); + + ngbe_set_pcie_master(hw, false); +@@ -1805,7 +1807,9 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; @@ -69590,7 +74724,7 @@ index afdb3ad41f..443bd9fef9 100644 dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = NGBE_VMDQ_NUM_UC_MAC; dev_info->max_vfs = pci_dev->max_vfs; -@@ -1869,24 +1877,6 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +@@ -1869,24 +1873,6 @@ ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } @@ -69615,7 +74749,7 @@ index afdb3ad41f..443bd9fef9 100644 /* return 0 means link status changed, -1 means not changed */ int ngbe_dev_link_update_share(struct rte_eth_dev *dev, -@@ -1896,7 +1886,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1896,10 +1882,10 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, struct rte_eth_link link; u32 link_speed = NGBE_LINK_SPEED_UNKNOWN; u32 lan_speed = 0; @@ -69623,7 +74757,11 @@ index afdb3ad41f..443bd9fef9 100644 bool link_up; int err; int wait = 1; -@@ -1910,9 +1899,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, ++ u32 reg; + + memset(&link, 0, sizeof(link)); + link.link_status = RTE_ETH_LINK_DOWN; +@@ -1910,9 +1896,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, hw->mac.get_link_status = true; @@ -69633,7 +74771,7 @@ index afdb3ad41f..443bd9fef9 100644 /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) wait = 0; -@@ -1927,7 +1913,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1927,7 +1910,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, if (!link_up) return rte_eth_linkstatus_set(dev, &link); @@ -69641,16 +74779,41 @@ index afdb3ad41f..443bd9fef9 100644 link.link_status = RTE_ETH_LINK_UP; link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; -@@ -1961,6 +1946,8 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -1961,6 +1943,13 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev, wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK, NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE); } ++ /* Re configure MAC RX */ ++ reg = rd32(hw, NGBE_MACRXCFG); ++ wr32(hw, NGBE_MACRXCFG, reg); + wr32m(hw, NGBE_MACRXFLT, NGBE_MACRXFLT_PROMISC, + NGBE_MACRXFLT_PROMISC); ++ reg = rd32(hw, NGBE_MAC_WDG_TIMEOUT); ++ wr32(hw, NGBE_MAC_WDG_TIMEOUT, reg); } return rte_eth_linkstatus_set(dev, &link); -@@ -2380,6 +2367,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +@@ -2148,6 +2137,19 @@ ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev) + struct ngbe_hw *hw = ngbe_dev_hw(dev); + struct ngbe_interrupt *intr = ngbe_dev_intr(dev); + ++ eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_VEC0]; ++ if (!eicr) { ++ /* ++ * shared interrupt alert! ++ * make sure interrupts are enabled because the read will ++ * have disabled interrupts. ++ */ ++ if (!hw->adapter_stopped) ++ ngbe_enable_intr(dev); ++ return 0; ++ } ++ ((u32 *)hw->isb_mem)[NGBE_ISB_VEC0] = 0; ++ + /* read-on-clear nic registers here */ + eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC]; + PMD_DRV_LOG(DEBUG, "eicr %x", eicr); +@@ -2380,6 +2382,93 @@ ngbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return -EIO; } @@ -70171,6 +75334,92 @@ index d52a3e73d5..2b97f0163e 100644 return 0; } +diff --git a/dpdk/drivers/net/pcap/pcap_ethdev.c b/dpdk/drivers/net/pcap/pcap_ethdev.c +index 5780f704b6..29664164b4 100644 +--- a/dpdk/drivers/net/pcap/pcap_ethdev.c ++++ b/dpdk/drivers/net/pcap/pcap_ethdev.c +@@ -274,7 +274,7 @@ static uint16_t + eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + { + unsigned int i; +- struct pcap_pkthdr header; ++ struct pcap_pkthdr *header; + struct pmd_process_private *pp; + const u_char *packet; + struct rte_mbuf *mbuf; +@@ -294,9 +294,13 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + */ + for (i = 0; i < nb_pkts; i++) { + /* Get the next PCAP packet */ +- packet = pcap_next(pcap, &header); +- if (unlikely(packet == NULL)) ++ int ret = pcap_next_ex(pcap, &header, &packet); ++ if (ret != 1) { ++ if (ret == PCAP_ERROR) ++ pcap_q->rx_stat.err_pkts++; ++ + break; ++ } + + mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool); + if (unlikely(mbuf == NULL)) { +@@ -304,33 +308,30 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) + break; + } + +- if (header.caplen <= rte_pktmbuf_tailroom(mbuf)) { ++ uint32_t len = header->caplen; ++ if (len <= rte_pktmbuf_tailroom(mbuf)) { + /* pcap packet will fit in the mbuf, can copy it */ +- rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, +- header.caplen); +- mbuf->data_len = (uint16_t)header.caplen; ++ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet, len); ++ mbuf->data_len = len; + } else { + /* Try read jumbo frame into multi mbufs. */ + if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool, +- mbuf, +- packet, +- header.caplen) == -1)) { ++ mbuf, packet, len) == -1)) { + pcap_q->rx_stat.err_pkts++; + rte_pktmbuf_free(mbuf); + break; + } + } + +- mbuf->pkt_len = (uint16_t)header.caplen; +- *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset, +- rte_mbuf_timestamp_t *) = +- (uint64_t)header.ts.tv_sec * 1000000 + +- header.ts.tv_usec; ++ mbuf->pkt_len = len; ++ uint64_t us = (uint64_t)header->ts.tv_sec * US_PER_S + header->ts.tv_usec; ++ ++ *RTE_MBUF_DYNFIELD(mbuf, timestamp_dynfield_offset, rte_mbuf_timestamp_t *) = us; + mbuf->ol_flags |= timestamp_rx_dynflag; + mbuf->port = pcap_q->port_id; + bufs[num_rx] = mbuf; + num_rx++; +- rx_bytes += header.caplen; ++ rx_bytes += len; + } + pcap_q->rx_stat.pkts += num_rx; + pcap_q->rx_stat.bytes += rx_bytes; +@@ -522,6 +523,12 @@ open_iface_live(const char *iface, pcap_t **pcap) { + return -1; + } + ++ if (pcap_setnonblock(*pcap, 1, errbuf)) { ++ PMD_LOG(ERR, "Couldn't set non-blocking on %s: %s", iface, errbuf); ++ pcap_close(*pcap); ++ return -1; ++ } ++ + return 0; + } + diff --git a/dpdk/drivers/net/pfe/pfe_ethdev.c b/dpdk/drivers/net/pfe/pfe_ethdev.c index 0352a57950..0073dd7405 100644 --- a/dpdk/drivers/net/pfe/pfe_ethdev.c @@ -70346,8 +75595,24 @@ index 2ec743ebce..170ee57931 100644 return EINVAL; } +diff --git a/dpdk/drivers/net/sfc/sfc_flow_rss.c b/dpdk/drivers/net/sfc/sfc_flow_rss.c +index e28c943335..8e2749833b 100644 +--- a/dpdk/drivers/net/sfc/sfc_flow_rss.c ++++ b/dpdk/drivers/net/sfc/sfc_flow_rss.c +@@ -303,9 +303,9 @@ sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx) + + TAILQ_REMOVE(&flow_rss->ctx_list, ctx, entries); + rte_free(ctx->qid_offsets); +- rte_free(ctx); +- + sfc_dbg(sa, "flow-rss: deleted ctx=%p", ctx); ++ ++ rte_free(ctx); + } + + static int diff --git a/dpdk/drivers/net/sfc/sfc_mae.c b/dpdk/drivers/net/sfc/sfc_mae.c -index 421bb6da95..b61b9658e3 100644 +index 421bb6da95..4775953b8d 100644 --- a/dpdk/drivers/net/sfc/sfc_mae.c +++ b/dpdk/drivers/net/sfc/sfc_mae.c @@ -281,8 +281,10 @@ sfc_mae_attach(struct sfc_adapter *sa) @@ -70362,7 +75627,53 @@ index 421bb6da95..b61b9658e3 100644 mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios; mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios; -@@ -1180,6 +1182,8 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa, +@@ -419,9 +421,8 @@ sfc_mae_outer_rule_del(struct sfc_adapter *sa, + efx_mae_match_spec_fini(sa->nic, rule->match_spec); + + TAILQ_REMOVE(&mae->outer_rules, rule, entries); +- rte_free(rule); +- + sfc_dbg(sa, "deleted outer_rule=%p", rule); ++ rte_free(rule); + } + + static int +@@ -580,9 +581,8 @@ sfc_mae_mac_addr_del(struct sfc_adapter *sa, struct sfc_mae_mac_addr *mac_addr) + } + + TAILQ_REMOVE(&mae->mac_addrs, mac_addr, entries); +- rte_free(mac_addr); +- + sfc_dbg(sa, "deleted mac_addr=%p", mac_addr); ++ rte_free(mac_addr); + } + + enum sfc_mae_mac_addr_type { +@@ -777,10 +777,10 @@ sfc_mae_encap_header_del(struct sfc_adapter *sa, + } + + TAILQ_REMOVE(&mae->encap_headers, encap_header, entries); ++ sfc_dbg(sa, "deleted encap_header=%p", encap_header); ++ + rte_free(encap_header->buf); + rte_free(encap_header); +- +- sfc_dbg(sa, "deleted encap_header=%p", encap_header); + } + + static int +@@ -1083,9 +1083,8 @@ sfc_mae_action_set_del(struct sfc_adapter *sa, + rte_free(action_set->counters); + } + TAILQ_REMOVE(&mae->action_sets, action_set, entries); +- rte_free(action_set); +- + sfc_dbg(sa, "deleted action_set=%p", action_set); ++ rte_free(action_set); + } + + static int +@@ -1180,6 +1179,8 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa, } if (fw_rsrc->refcnt == 1) { @@ -70371,7 +75682,7 @@ index 421bb6da95..b61b9658e3 100644 rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id); if (rc == 0) { sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x", -@@ -3896,12 +3900,10 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, +@@ -3896,12 +3897,10 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, break; case SFC_FT_RULE_SWITCH: /* @@ -70387,7 +75698,7 @@ index 421bb6da95..b61b9658e3 100644 ctx.ft_switch_hit_counter = &spec_mae->ft_ctx->switch_hit_counter; -@@ -3910,8 +3912,25 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, +@@ -3910,8 +3909,25 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, SFC_ASSERT(B_FALSE); } @@ -70413,7 +75724,7 @@ index 421bb6da95..b61b9658e3 100644 sfc_mae_encap_header_del(sa, ctx.encap_header); efx_mae_action_set_spec_fini(sa->nic, ctx.spec); return 0; -@@ -3924,6 +3943,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, +@@ -3924,6 +3940,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, return 0; fail_action_set_add: @@ -70618,7 +75929,7 @@ index bcf6664460..1b90cf7a21 100644 } diff --git a/dpdk/drivers/net/tap/rte_eth_tap.c b/dpdk/drivers/net/tap/rte_eth_tap.c -index f2a6c33a19..a6bf6eec55 100644 +index f2a6c33a19..68f9a5ce34 100644 --- a/dpdk/drivers/net/tap/rte_eth_tap.c +++ b/dpdk/drivers/net/tap/rte_eth_tap.c @@ -559,7 +559,7 @@ tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len, @@ -70724,6 +76035,15 @@ index f2a6c33a19..a6bf6eec55 100644 goto error; success: TAP_LOG(DEBUG, "TAP user MAC param (%s)", value); +@@ -2466,7 +2453,7 @@ tap_mp_sync_queues(const struct rte_mp_msg *request, const void *peer) + reply.num_fds = 0; + reply_param->rxq_count = 0; + if (dev->data->nb_rx_queues + dev->data->nb_tx_queues > +- RTE_MP_MAX_FD_NUM){ ++ RTE_PMD_TAP_MAX_QUEUES){ + TAP_LOG(ERR, "Number of rx/tx queues exceeds max number of fds"); + return -1; + } diff --git a/dpdk/drivers/net/tap/tap_bpf_insns.h b/dpdk/drivers/net/tap/tap_bpf_insns.h index 1a91bbad13..53fa76c4e6 100644 --- a/dpdk/drivers/net/tap/tap_bpf_insns.h @@ -74101,7 +79421,7 @@ index efe66fe059..7468c3f0ea 100644 if (set) { struct rte_flow *remote_flow; diff --git a/dpdk/drivers/net/tap/tap_netlink.c b/dpdk/drivers/net/tap/tap_netlink.c -index 75af3404b0..d9c260127d 100644 +index 75af3404b0..35c491ac37 100644 --- a/dpdk/drivers/net/tap/tap_netlink.c +++ b/dpdk/drivers/net/tap/tap_netlink.c @@ -72,7 +72,8 @@ tap_nl_init(uint32_t nl_groups) @@ -74114,6 +79434,16 @@ index 75af3404b0..d9c260127d 100644 #endif if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) { +@@ -301,7 +302,8 @@ tap_nlattr_add(struct nlmsghdr *nh, unsigned short type, + rta = (struct rtattr *)NLMSG_TAIL(nh); + rta->rta_len = RTA_LENGTH(data_len); + rta->rta_type = type; +- memcpy(RTA_DATA(rta), data, data_len); ++ if (data_len > 0) ++ memcpy(RTA_DATA(rta), data, data_len); + nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len); + } + diff --git a/dpdk/drivers/net/thunderx/base/nicvf_mbox.c b/dpdk/drivers/net/thunderx/base/nicvf_mbox.c index 5993eec4e6..0e0176974d 100644 --- a/dpdk/drivers/net/thunderx/base/nicvf_mbox.c @@ -74425,10 +79755,18 @@ index 7031589f7c..4bf9da2d4c 100644 s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr); diff --git a/dpdk/drivers/net/txgbe/base/txgbe_mng.c b/dpdk/drivers/net/txgbe/base/txgbe_mng.c -index df7145094f..029a0a1fe1 100644 +index df7145094f..9770c88bc8 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_mng.c +++ b/dpdk/drivers/net/txgbe/base/txgbe_mng.c -@@ -141,21 +141,7 @@ txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, +@@ -58,6 +58,7 @@ txgbe_hic_unlocked(struct txgbe_hw *hw, u32 *buffer, u32 length, u32 timeout) + + dword_len = length >> 2; + ++ txgbe_flush(hw); + /* The device driver writes the relevant command block + * into the ram area. + */ +@@ -141,21 +142,7 @@ txgbe_host_interface_command(struct txgbe_hw *hw, u32 *buffer, for (bi = 0; bi < dword_len; bi++) buffer[bi] = rd32a(hw, TXGBE_MNGMBX, bi); @@ -74557,7 +79895,7 @@ index 9f46d5bdb0..a7c11c50df 100644 void txgbe_bp_down_event(struct txgbe_hw *hw) diff --git a/dpdk/drivers/net/txgbe/base/txgbe_regs.h b/dpdk/drivers/net/txgbe/base/txgbe_regs.h -index 911bb6e04e..a2984f1106 100644 +index 911bb6e04e..db02b1b81b 100644 --- a/dpdk/drivers/net/txgbe/base/txgbe_regs.h +++ b/dpdk/drivers/net/txgbe/base/txgbe_regs.h @@ -1022,6 +1022,8 @@ enum txgbe_5tuple_protocol { @@ -74569,6 +79907,15 @@ index 911bb6e04e..a2984f1106 100644 /****************************************************************************** * Statistic Registers ******************************************************************************/ +@@ -1195,7 +1197,7 @@ enum txgbe_5tuple_protocol { + #define TXGBE_ICRMISC_ANDONE MS(19, 0x1) /* link auto-nego done */ + #define TXGBE_ICRMISC_ERRIG MS(20, 0x1) /* integrity error */ + #define TXGBE_ICRMISC_SPI MS(21, 0x1) /* SPI interface */ +-#define TXGBE_ICRMISC_VFMBX MS(22, 0x1) /* VF-PF message box */ ++#define TXGBE_ICRMISC_VFMBX MS(23, 0x1) /* VF-PF message box */ + #define TXGBE_ICRMISC_GPIO MS(26, 0x1) /* GPIO interrupt */ + #define TXGBE_ICRMISC_ERRPCI MS(27, 0x1) /* pcie request error */ + #define TXGBE_ICRMISC_HEAT MS(28, 0x1) /* overheat detection */ @@ -1236,6 +1238,9 @@ enum txgbe_5tuple_protocol { #define TXGBE_TCPTMR 0x000170 #define TXGBE_ITRSEL 0x000180 @@ -74629,7 +79976,7 @@ index c3486b472f..f52736cae9 100644 uint64_t isb_dma; diff --git a/dpdk/drivers/net/txgbe/txgbe_ethdev.c b/dpdk/drivers/net/txgbe/txgbe_ethdev.c -index 86ef979b29..2ed5ee683f 100644 +index 86ef979b29..784b66a579 100644 --- a/dpdk/drivers/net/txgbe/txgbe_ethdev.c +++ b/dpdk/drivers/net/txgbe/txgbe_ethdev.c @@ -179,7 +179,9 @@ static const struct rte_txgbe_xstats_name_off rte_txgbe_stats_strings[] = { @@ -74642,7 +79989,16 @@ index 86ef979b29..2ed5ee683f 100644 HW_XSTAT(rx_management_packets), HW_XSTAT(tx_management_packets), HW_XSTAT(rx_management_dropped), -@@ -543,6 +545,7 @@ null: +@@ -328,6 +330,8 @@ txgbe_pf_reset_hw(struct txgbe_hw *hw) + status = hw->mac.reset_hw(hw); + + ctrl_ext = rd32(hw, TXGBE_PORTCTL); ++ /* let hardware know driver is loaded */ ++ ctrl_ext |= TXGBE_PORTCTL_DRVLOAD; + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= TXGBE_PORTCTL_RSTDONE; + wr32(hw, TXGBE_PORTCTL, ctrl_ext); +@@ -543,6 +547,7 @@ null: static int eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { @@ -74650,7 +80006,7 @@ index 86ef979b29..2ed5ee683f 100644 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev); struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev); -@@ -591,11 +594,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -591,11 +596,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) return 0; } @@ -74664,7 +80020,7 @@ index 86ef979b29..2ed5ee683f 100644 hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; if (pci_dev->id.subsystem_vendor_id == PCI_VENDOR_ID_WANGXUN) { -@@ -729,6 +734,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) +@@ -729,6 +736,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses", RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC); @@ -74673,7 +80029,7 @@ index 86ef979b29..2ed5ee683f 100644 return -ENOMEM; } -@@ -896,6 +903,7 @@ static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) +@@ -896,6 +905,7 @@ static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) if (!fdir_info->hash_map) { PMD_INIT_LOG(ERR, "Failed to allocate memory for fdir hash map!"); @@ -74681,7 +80037,7 @@ index 86ef979b29..2ed5ee683f 100644 return -ENOMEM; } fdir_info->mask_added = FALSE; -@@ -931,6 +939,7 @@ static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) +@@ -931,6 +941,7 @@ static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) if (!l2_tn_info->hash_map) { PMD_INIT_LOG(ERR, "Failed to allocate memory for L2 TN hash map!"); @@ -74689,7 +80045,7 @@ index 86ef979b29..2ed5ee683f 100644 return -ENOMEM; } l2_tn_info->e_tag_en = FALSE; -@@ -958,7 +967,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) +@@ -958,7 +969,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev) if (!ethdev) return 0; @@ -74698,7 +80054,7 @@ index 86ef979b29..2ed5ee683f 100644 } static struct rte_pci_driver rte_txgbe_pmd = { -@@ -994,41 +1003,25 @@ txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +@@ -994,41 +1005,25 @@ txgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } static void @@ -74751,7 +80107,7 @@ index 86ef979b29..2ed5ee683f 100644 } static int -@@ -1253,9 +1246,9 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) +@@ -1253,9 +1248,9 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) @@ -74763,7 +80119,7 @@ index 86ef979b29..2ed5ee683f 100644 } } -@@ -1317,6 +1310,13 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) +@@ -1317,6 +1312,13 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) static int txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { @@ -74777,7 +80133,7 @@ index 86ef979b29..2ed5ee683f 100644 txgbe_config_vlan_strip_on_all_queues(dev, mask); txgbe_vlan_offload_config(dev, mask); -@@ -1494,6 +1494,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) +@@ -1494,6 +1496,19 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } } @@ -74797,7 +80153,7 @@ index 86ef979b29..2ed5ee683f 100644 } return 0; } -@@ -1530,6 +1543,25 @@ txgbe_dev_configure(struct rte_eth_dev *dev) +@@ -1530,6 +1545,25 @@ txgbe_dev_configure(struct rte_eth_dev *dev) return 0; } @@ -74823,7 +80179,7 @@ index 86ef979b29..2ed5ee683f 100644 static void txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) { -@@ -1647,7 +1679,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1647,7 +1681,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Stop the link setup handler before resetting the HW. */ @@ -74832,7 +80188,7 @@ index 86ef979b29..2ed5ee683f 100644 /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); -@@ -1668,6 +1700,12 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1668,6 +1702,12 @@ txgbe_dev_start(struct rte_eth_dev *dev) hw->mac.get_link_status = true; hw->dev_start = true; @@ -74845,7 +80201,7 @@ index 86ef979b29..2ed5ee683f 100644 /* configure PF module if SRIOV enabled */ txgbe_pf_host_configure(dev); -@@ -1786,6 +1824,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1786,6 +1826,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) speed = (TXGBE_LINK_SPEED_100M_FULL | TXGBE_LINK_SPEED_1GB_FULL | TXGBE_LINK_SPEED_10GB_FULL); @@ -74853,7 +80209,7 @@ index 86ef979b29..2ed5ee683f 100644 } else { if (*link_speeds & RTE_ETH_LINK_SPEED_10G) speed |= TXGBE_LINK_SPEED_10GB_FULL; -@@ -1797,6 +1836,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) +@@ -1797,6 +1838,7 @@ txgbe_dev_start(struct rte_eth_dev *dev) speed |= TXGBE_LINK_SPEED_1GB_FULL; if (*link_speeds & RTE_ETH_LINK_SPEED_100M) speed |= TXGBE_LINK_SPEED_100M_FULL; @@ -74861,7 +80217,7 @@ index 86ef979b29..2ed5ee683f 100644 } err = hw->mac.setup_link(hw, speed, link_up); -@@ -1875,15 +1915,19 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1875,15 +1917,19 @@ txgbe_dev_stop(struct rte_eth_dev *dev) struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); if (hw->adapter_stopped) @@ -74883,7 +80239,7 @@ index 86ef979b29..2ed5ee683f 100644 /* reset the NIC */ txgbe_pf_reset_hw(hw); hw->adapter_stopped = 0; -@@ -1894,14 +1938,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1894,14 +1940,6 @@ txgbe_dev_stop(struct rte_eth_dev *dev) for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; @@ -74898,7 +80254,7 @@ index 86ef979b29..2ed5ee683f 100644 txgbe_dev_clear_queues(dev); /* Clear stored conf */ -@@ -1928,10 +1964,22 @@ txgbe_dev_stop(struct rte_eth_dev *dev) +@@ -1928,10 +1966,22 @@ txgbe_dev_stop(struct rte_eth_dev *dev) adapter->rss_reta_updated = 0; wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK); @@ -74921,7 +80277,7 @@ index 86ef979b29..2ed5ee683f 100644 return 0; } -@@ -1991,12 +2039,17 @@ txgbe_dev_close(struct rte_eth_dev *dev) +@@ -1991,12 +2041,20 @@ txgbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); @@ -74932,6 +80288,9 @@ index 86ef979b29..2ed5ee683f 100644 ret = txgbe_dev_stop(dev); ++ /* Let firmware take over control of hardware */ ++ wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_DRVLOAD, 0); ++ txgbe_dev_free_queues(dev); + txgbe_set_pcie_master(hw, false); @@ -74939,7 +80298,7 @@ index 86ef979b29..2ed5ee683f 100644 /* reprogram the RAR[0] in case user changed it. */ txgbe_set_rar(hw, 0, hw->mac.addr, 0, true); -@@ -2019,8 +2072,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) +@@ -2019,8 +2077,9 @@ txgbe_dev_close(struct rte_eth_dev *dev) rte_delay_ms(100); } while (retries++ < (10 + TXGBE_LINK_UP_TIME)); @@ -74950,7 +80309,7 @@ index 86ef979b29..2ed5ee683f 100644 /* uninitialize PF if max_vfs not zero */ txgbe_pf_host_uninit(dev); -@@ -2605,7 +2659,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +@@ -2605,7 +2664,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; @@ -74961,7 +80320,7 @@ index 86ef979b29..2ed5ee683f 100644 dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC; dev_info->max_vfs = pci_dev->max_vfs; -@@ -2690,11 +2746,52 @@ txgbe_dev_setup_link_alarm_handler(void *param) +@@ -2690,11 +2751,52 @@ txgbe_dev_setup_link_alarm_handler(void *param) intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG; } @@ -75014,7 +80373,7 @@ index 86ef979b29..2ed5ee683f 100644 struct txgbe_hw *hw = TXGBE_DEV_HW(dev); struct rte_eth_link link; u32 link_speed = TXGBE_LINK_SPEED_UNKNOWN; -@@ -2702,6 +2799,7 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2702,6 +2804,7 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, bool link_up; int err; int wait = 1; @@ -75022,7 +80381,7 @@ index 86ef979b29..2ed5ee683f 100644 memset(&link, 0, sizeof(link)); link.link_status = RTE_ETH_LINK_DOWN; -@@ -2731,10 +2829,24 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2731,10 +2834,24 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, if ((hw->subsystem_device_id & 0xFF) == TXGBE_DEV_ID_KR_KX_KX4) { hw->mac.bp_down_event(hw); @@ -75051,7 +80410,7 @@ index 86ef979b29..2ed5ee683f 100644 } return rte_eth_linkstatus_set(dev, &link); } else if (!hw->dev_start) { -@@ -2773,6 +2885,16 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, +@@ -2773,6 +2890,16 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev, break; } @@ -75068,7 +80427,7 @@ index 86ef979b29..2ed5ee683f 100644 return rte_eth_linkstatus_set(dev, &link); } -@@ -2949,9 +3071,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, +@@ -2949,9 +3076,6 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX) wr32(hw, TXGBE_PX_INTA, 1); @@ -75078,7 +80437,7 @@ index 86ef979b29..2ed5ee683f 100644 /* read-on-clear nic registers here */ eicr = ((u32 *)hw->isb_mem)[TXGBE_ISB_MISC]; PMD_DRV_LOG(DEBUG, "eicr %x", eicr); -@@ -2974,6 +3093,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, +@@ -2974,6 +3098,8 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev, if (eicr & TXGBE_ICRMISC_GPIO) intr->flags |= TXGBE_FLAG_PHY_INTERRUPT; @@ -75087,7 +80446,7 @@ index 86ef979b29..2ed5ee683f 100644 return 0; } -@@ -3143,7 +3264,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) +@@ -3143,7 +3269,8 @@ txgbe_dev_interrupt_delayed_handler(void *param) } /* restore original mask */ @@ -75097,7 +80456,7 @@ index 86ef979b29..2ed5ee683f 100644 intr->mask = intr->mask_orig; intr->mask_orig = 0; -@@ -3481,12 +3603,8 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +@@ -3481,12 +3608,8 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; } @@ -75112,7 +80471,7 @@ index 86ef979b29..2ed5ee683f 100644 return 0; } -@@ -3637,13 +3755,13 @@ txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +@@ -3637,13 +3760,13 @@ txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct txgbe_hw *hw = TXGBE_DEV_HW(dev); if (queue_id < 32) { @@ -75132,7 +80491,7 @@ index 86ef979b29..2ed5ee683f 100644 } rte_intr_enable(intr_handle); -@@ -3658,11 +3776,11 @@ txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +@@ -3658,11 +3781,11 @@ txgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) if (queue_id < 32) { mask = rd32(hw, TXGBE_IMS(0)); @@ -75146,7 +80505,7 @@ index 86ef979b29..2ed5ee683f 100644 wr32(hw, TXGBE_IMS(1), mask); } -@@ -3696,7 +3814,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, +@@ -3696,7 +3819,7 @@ txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, wr32(hw, TXGBE_IVARMISC, tmp); } else { /* rx or tx causes */ @@ -75155,7 +80514,7 @@ index 86ef979b29..2ed5ee683f 100644 idx = ((16 * (queue & 1)) + (8 * direction)); tmp = rd32(hw, TXGBE_IVAR(queue >> 1)); tmp &= ~(0xFF << idx); -@@ -3802,6 +3920,7 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, +@@ -3802,6 +3925,7 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev); uint32_t syn_info; uint32_t synqf; @@ -75163,7 +80522,7 @@ index 86ef979b29..2ed5ee683f 100644 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; -@@ -3811,7 +3930,11 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, +@@ -3811,7 +3935,11 @@ txgbe_syn_filter_set(struct rte_eth_dev *dev, if (add) { if (syn_info & TXGBE_SYNCLS_ENA) return -EINVAL; @@ -75176,7 +80535,7 @@ index 86ef979b29..2ed5ee683f 100644 synqf |= TXGBE_SYNCLS_ENA; if (filter->hig_pri) -@@ -3880,7 +4003,10 @@ txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, +@@ -3880,7 +4008,10 @@ txgbe_inject_5tuple_filter(struct rte_eth_dev *dev, wr32(hw, TXGBE_5TFPORT(i), sdpqf); wr32(hw, TXGBE_5TFCTL0(i), ftqf); @@ -75188,7 +80547,7 @@ index 86ef979b29..2ed5ee683f 100644 wr32(hw, TXGBE_5TFCTL1(i), l34timir); } -@@ -4164,7 +4290,17 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, +@@ -4164,7 +4295,17 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, if (add) { etqf = TXGBE_ETFLT_ENA; etqf |= TXGBE_ETFLT_ETID(filter->ether_type); @@ -75422,7 +80781,7 @@ index fa6c347d53..6fa8147f05 100644 + #endif /* _TXGBE_PTYPE_H_ */ diff --git a/dpdk/drivers/net/txgbe/txgbe_rxtx.c b/dpdk/drivers/net/txgbe/txgbe_rxtx.c -index ac1bba08a3..b0ec1c96d7 100644 +index ac1bba08a3..9a075cf54b 100644 --- a/dpdk/drivers/net/txgbe/txgbe_rxtx.c +++ b/dpdk/drivers/net/txgbe/txgbe_rxtx.c @@ -516,20 +516,21 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) @@ -75680,7 +81039,17 @@ index ac1bba08a3..b0ec1c96d7 100644 rte_free(txq); } } -@@ -2335,6 +2347,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, +@@ -2242,8 +2254,7 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev) + + tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT; + +- tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | +- RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; ++ tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM; + + #ifdef RTE_LIB_SECURITY + if (dev->security_ctx) +@@ -2335,6 +2346,7 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } @@ -75688,7 +81057,7 @@ index ac1bba08a3..b0ec1c96d7 100644 txq->nb_tx_desc = nb_desc; txq->tx_free_thresh = tx_free_thresh; txq->pthresh = tx_conf->tx_thresh.pthresh; -@@ -2452,6 +2465,7 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq) +@@ -2452,6 +2464,7 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq) txgbe_rx_queue_release_mbufs(rxq); rte_free(rxq->sw_ring); rte_free(rxq->sw_sc_ring); @@ -75696,7 +81065,7 @@ index ac1bba08a3..b0ec1c96d7 100644 rte_free(rxq); } } -@@ -2545,6 +2559,7 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq) +@@ -2545,6 +2558,7 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq) rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); rxq->rx_tail = 0; rxq->nb_rx_hold = 0; @@ -75704,7 +81073,7 @@ index ac1bba08a3..b0ec1c96d7 100644 rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; } -@@ -2625,6 +2640,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, +@@ -2625,6 +2639,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } @@ -75712,7 +81081,7 @@ index ac1bba08a3..b0ec1c96d7 100644 /* * Zero init all the descriptors in the ring. */ -@@ -2795,6 +2811,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2795,6 +2810,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) txq->ops->release_mbufs(txq); txq->ops->reset(txq); } @@ -75721,7 +81090,7 @@ index ac1bba08a3..b0ec1c96d7 100644 } for (i = 0; i < dev->data->nb_rx_queues; i++) { -@@ -2804,6 +2822,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) +@@ -2804,6 +2821,8 @@ txgbe_dev_clear_queues(struct rte_eth_dev *dev) txgbe_rx_queue_release_mbufs(rxq); txgbe_reset_rx_queue(adapter, rxq); } @@ -75730,7 +81099,7 @@ index ac1bba08a3..b0ec1c96d7 100644 } } -@@ -4382,7 +4402,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) +@@ -4382,7 +4401,7 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) */ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM); @@ -75739,7 +81108,7 @@ index ac1bba08a3..b0ec1c96d7 100644 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size); wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl); -@@ -4994,6 +5014,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -4994,6 +5013,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i); @@ -75748,7 +81117,7 @@ index ac1bba08a3..b0ec1c96d7 100644 } for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; -@@ -5008,6 +5030,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +@@ -5008,6 +5029,8 @@ txgbevf_dev_rxtx_start(struct rte_eth_dev *dev) } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i); @@ -75757,7 +81126,7 @@ index ac1bba08a3..b0ec1c96d7 100644 rte_wmb(); wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1); } -@@ -5055,6 +5079,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, +@@ -5055,6 +5078,7 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, uint32_t reta; uint16_t i; uint16_t j; @@ -75765,7 +81134,7 @@ index ac1bba08a3..b0ec1c96d7 100644 struct rte_eth_rss_conf rss_conf = { .rss_key = conf->conf.key_len ? (void *)(uintptr_t)conf->conf.key : NULL, -@@ -5087,7 +5112,12 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, +@@ -5087,7 +5111,12 @@ txgbe_config_rss_filter(struct rte_eth_dev *dev, for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) { if (j == conf->conf.queue_num) j = 0; @@ -76696,7 +82065,7 @@ index 19599aa3f6..697a8dcd6b 100644 } diff --git a/dpdk/drivers/net/virtio/virtio_user_ethdev.c b/dpdk/drivers/net/virtio/virtio_user_ethdev.c -index d32abec327..78b1ed9ace 100644 +index d32abec327..ae087b0c92 100644 --- a/dpdk/drivers/net/virtio/virtio_user_ethdev.c +++ b/dpdk/drivers/net/virtio/virtio_user_ethdev.c @@ -90,10 +90,15 @@ virtio_user_set_status(struct virtio_hw *hw, uint8_t status) @@ -76718,6 +82087,14 @@ index d32abec327..78b1ed9ace 100644 virtio_user_dev_set_status(dev, status); } +@@ -194,6 +199,7 @@ virtio_user_setup_queue_packed(struct virtqueue *vq, + vring->device = (void *)(uintptr_t)used_addr; + dev->packed_queues[queue_idx].avail_wrap_counter = true; + dev->packed_queues[queue_idx].used_wrap_counter = true; ++ dev->packed_queues[queue_idx].used_idx = 0; + + for (i = 0; i < vring->num; i++) + vring->desc[i].flags = 0; diff --git a/dpdk/drivers/net/virtio/virtqueue.h b/dpdk/drivers/net/virtio/virtqueue.h index f5d8b40cad..5c9230cfe1 100644 --- a/dpdk/drivers/net/virtio/virtqueue.h @@ -76731,7 +82108,7 @@ index f5d8b40cad..5c9230cfe1 100644 idx++; if (idx >= vq->vq_nentries) { diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c -index fd946dec5c..c1c7539fff 100644 +index fd946dec5c..f4cdb1bb31 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -257,6 +257,7 @@ vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw) @@ -76750,7 +82127,15 @@ index fd946dec5c..c1c7539fff 100644 /* * Gets tx data ring descriptor size. -@@ -957,6 +959,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -345,6 +347,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) + /* Vendor and Device ID need to be set before init of shared code */ + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; ++ hw->adapter_stopped = TRUE; + hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr; + hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr; + +@@ -957,6 +960,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) { int ret; struct vmxnet3_hw *hw = dev->data->dev_private; @@ -76758,7 +82143,7 @@ index fd946dec5c..c1c7539fff 100644 PMD_INIT_FUNC_TRACE(); -@@ -1035,6 +1038,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -1035,6 +1039,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) /* Setting proper Rx Mode and issue Rx Mode Update command */ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); @@ -76766,7 +82151,7 @@ index fd946dec5c..c1c7539fff 100644 /* Setup interrupt callback */ rte_intr_callback_register(dev->intr_handle, vmxnet3_interrupt_handler, dev); -@@ -1046,6 +1050,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -1046,6 +1051,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) /* enable all intrs */ vmxnet3_enable_all_intrs(hw); @@ -76774,7 +82159,7 @@ index fd946dec5c..c1c7539fff 100644 vmxnet3_process_events(dev); -@@ -1058,6 +1063,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) +@@ -1058,6 +1064,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) */ __vmxnet3_dev_link_update(dev, 0); @@ -76786,7 +82171,7 @@ index fd946dec5c..c1c7539fff 100644 return VMXNET3_SUCCESS; } -@@ -1070,6 +1080,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) +@@ -1070,6 +1081,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) struct rte_eth_link link; struct vmxnet3_hw *hw = dev->data->dev_private; struct rte_intr_handle *intr_handle = dev->intr_handle; @@ -76794,7 +82179,7 @@ index fd946dec5c..c1c7539fff 100644 int ret; PMD_INIT_FUNC_TRACE(); -@@ -1125,6 +1136,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) +@@ -1125,6 +1137,11 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev) hw->adapter_stopped = 1; dev->data->dev_started = 0; @@ -76806,7 +82191,88 @@ index fd946dec5c..c1c7539fff 100644 return 0; } -@@ -1810,11 +1826,13 @@ done: +@@ -1360,42 +1377,52 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) + struct vmxnet3_hw *hw = dev->data->dev_private; + struct UPT1_TxStats txStats; + struct UPT1_RxStats rxStats; ++ uint64_t packets, bytes; + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + + for (i = 0; i < hw->num_tx_queues; i++) { + vmxnet3_tx_stats_get(hw, i, &txStats); + +- stats->q_opackets[i] = txStats.ucastPktsTxOK + ++ packets = txStats.ucastPktsTxOK + + txStats.mcastPktsTxOK + + txStats.bcastPktsTxOK; + +- stats->q_obytes[i] = txStats.ucastBytesTxOK + ++ bytes = txStats.ucastBytesTxOK + + txStats.mcastBytesTxOK + + txStats.bcastBytesTxOK; + +- stats->opackets += stats->q_opackets[i]; +- stats->obytes += stats->q_obytes[i]; ++ stats->opackets += packets; ++ stats->obytes += bytes; + stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard; ++ ++ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { ++ stats->q_opackets[i] = packets; ++ stats->q_obytes[i] = bytes; ++ } + } + + for (i = 0; i < hw->num_rx_queues; i++) { + vmxnet3_rx_stats_get(hw, i, &rxStats); + +- stats->q_ipackets[i] = rxStats.ucastPktsRxOK + ++ packets = rxStats.ucastPktsRxOK + + rxStats.mcastPktsRxOK + + rxStats.bcastPktsRxOK; + +- stats->q_ibytes[i] = rxStats.ucastBytesRxOK + ++ bytes = rxStats.ucastBytesRxOK + + rxStats.mcastBytesRxOK + + rxStats.bcastBytesRxOK; + +- stats->ipackets += stats->q_ipackets[i]; +- stats->ibytes += stats->q_ibytes[i]; +- +- stats->q_errors[i] = rxStats.pktsRxError; ++ stats->ipackets += packets; ++ stats->ibytes += bytes; + stats->ierrors += rxStats.pktsRxError; + stats->imissed += rxStats.pktsRxOutOfBuf; ++ ++ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { ++ stats->q_ipackets[i] = packets; ++ stats->q_ibytes[i] = bytes; ++ stats->q_errors[i] = rxStats.pktsRxError; ++ } + } + + return 0; +@@ -1411,8 +1438,6 @@ vmxnet3_dev_stats_reset(struct rte_eth_dev *dev) + + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + +- RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); +- + for (i = 0; i < hw->num_tx_queues; i++) { + vmxnet3_hw_tx_stats_get(hw, i, &txStats); + memcpy(&hw->snapshot_tx_stats[i], &txStats, +@@ -1456,7 +1481,7 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev, + dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; + dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ + dev_info->min_mtu = VMXNET3_MIN_MTU; +- dev_info->max_mtu = VMXNET3_MAX_MTU; ++ dev_info->max_mtu = VMXNET3_VERSION_GE_6(hw) ? VMXNET3_V6_MAX_MTU : VMXNET3_MAX_MTU; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G; + dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS; + +@@ -1810,11 +1835,13 @@ done: static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -76820,6 +82286,21 @@ index fd946dec5c..c1c7539fff 100644 return 0; } +diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h +index 5a303717b1..ffd21ece79 100644 +--- a/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h ++++ b/dpdk/drivers/net/vmxnet3/vmxnet3_ethdev.h +@@ -120,8 +120,8 @@ struct vmxnet3_hw { + #define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t)) + UPT1_TxStats saved_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES]; + UPT1_RxStats saved_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES]; +- UPT1_TxStats snapshot_tx_stats[VMXNET3_MAX_TX_QUEUES]; +- UPT1_RxStats snapshot_rx_stats[VMXNET3_MAX_RX_QUEUES]; ++ UPT1_TxStats snapshot_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES]; ++ UPT1_RxStats snapshot_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES]; + }; + + #define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */ diff --git a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h b/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h index 74154e3a1a..ae8542811a 100644 --- a/dpdk/drivers/net/vmxnet3/vmxnet3_logs.h @@ -76954,6 +82435,74 @@ index 1117c3e160..6d48d227d6 100644 dev_info(NULL, "shared memory %s address is %p\n", shm_name, adapter->shm.ptr); +diff --git a/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c b/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c +index dd97a5f9fd..d5a9ceb6e3 100644 +--- a/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c ++++ b/dpdk/drivers/raw/ifpga/base/opae_intel_max10.c +@@ -6,6 +6,13 @@ + #include + #include "opae_osdep.h" + ++#ifndef TAILQ_FOREACH_SAFE ++#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ ++ for ((var) = TAILQ_FIRST((head)); \ ++ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ ++ (var) = (tvar)) ++#endif ++ + int max10_sys_read(struct intel_max10_device *dev, + unsigned int offset, unsigned int *val) + { +@@ -746,9 +753,9 @@ static int fdt_get_named_reg(const void *fdt, int node, const char *name, + + static void max10_sensor_uinit(struct intel_max10_device *dev) + { +- struct opae_sensor_info *info; ++ struct opae_sensor_info *info, *next; + +- TAILQ_FOREACH(info, &dev->opae_sensor_list, node) { ++ TAILQ_FOREACH_SAFE(info, &dev->opae_sensor_list, node, next) { + TAILQ_REMOVE(&dev->opae_sensor_list, info, node); + opae_free(info); + } +diff --git a/dpdk/drivers/raw/ifpga/ifpga_rawdev.c b/dpdk/drivers/raw/ifpga/ifpga_rawdev.c +index 1020adcf6e..aa5d8b1a43 100644 +--- a/dpdk/drivers/raw/ifpga/ifpga_rawdev.c ++++ b/dpdk/drivers/raw/ifpga/ifpga_rawdev.c +@@ -1498,7 +1498,7 @@ ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id, + + nb_intr = rte_intr_nb_intr_get(*intr_handle); + +- intr_efds = calloc(nb_intr, sizeof(int)); ++ intr_efds = rte_calloc("ifpga_efds", nb_intr, sizeof(int), 0); + if (!intr_efds) + return -ENOMEM; + +@@ -1507,7 +1507,7 @@ ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id, + + ret = opae_acc_set_irq(acc, vec_start, count, intr_efds); + if (ret) { +- free(intr_efds); ++ rte_free(intr_efds); + return -EINVAL; + } + } +@@ -1516,13 +1516,13 @@ ifpga_register_msix_irq(struct ifpga_rawdev *dev, int port_id, + ret = rte_intr_callback_register(*intr_handle, + handler, (void *)arg); + if (ret) { +- free(intr_efds); ++ rte_free(intr_efds); + return -EINVAL; + } + + IFPGA_RAWDEV_PMD_INFO("success register %s interrupt\n", name); + +- free(intr_efds); ++ rte_free(intr_efds); + return 0; + } + diff --git a/dpdk/drivers/raw/ntb/ntb.c b/dpdk/drivers/raw/ntb/ntb.c index 76e98fe515..0ed4c14592 100644 --- a/dpdk/drivers/raw/ntb/ntb.c @@ -77421,6 +82970,40 @@ index 4ea504ed6a..489cd4f515 100644 NULL }, }; +diff --git a/dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c b/dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c +index 783f68c91e..831d7fd53d 100644 +--- a/dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c ++++ b/dpdk/examples/eventdev_pipeline/pipeline_worker_generic.c +@@ -38,10 +38,12 @@ worker_generic(void *arg) + } + received++; + +- /* The first worker stage does classification */ +- if (ev.queue_id == cdata.qid[0]) ++ /* The first worker stage does classification and sets txq. */ ++ if (ev.queue_id == cdata.qid[0]) { + ev.flow_id = ev.mbuf->hash.rss + % cdata.num_fids; ++ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0); ++ } + + ev.queue_id = cdata.next_qid[ev.queue_id]; + ev.op = RTE_EVENT_OP_FORWARD; +@@ -96,10 +98,12 @@ worker_generic_burst(void *arg) + + for (i = 0; i < nb_rx; i++) { + +- /* The first worker stage does classification */ +- if (events[i].queue_id == cdata.qid[0]) ++ /* The first worker stage does classification and sets txq. */ ++ if (events[i].queue_id == cdata.qid[0]) { + events[i].flow_id = events[i].mbuf->hash.rss + % cdata.num_fids; ++ rte_event_eth_tx_adapter_txq_set(events[i].mbuf, 0); ++ } + + events[i].queue_id = cdata.next_qid[events[i].queue_id]; + events[i].op = RTE_EVENT_OP_FORWARD; diff --git a/dpdk/examples/fips_validation/Makefile b/dpdk/examples/fips_validation/Makefile index bca6647f55..fbb778d57a 100644 --- a/dpdk/examples/fips_validation/Makefile @@ -77737,7 +83320,7 @@ index af5cfcf794..5c4e260e2c 100644 }; diff --git a/dpdk/examples/ipsec-secgw/ipsec-secgw.c b/dpdk/examples/ipsec-secgw/ipsec-secgw.c -index a64a26c992..9620d73fc8 100644 +index a64a26c992..849efbe755 100644 --- a/dpdk/examples/ipsec-secgw/ipsec-secgw.c +++ b/dpdk/examples/ipsec-secgw/ipsec-secgw.c @@ -99,10 +99,10 @@ uint32_t qp_desc_nb = 2048; @@ -77775,7 +83358,37 @@ index a64a26c992..9620d73fc8 100644 { struct ipsec_traffic traffic; -@@ -695,9 +695,7 @@ ipsec_poll_mode_worker(void) +@@ -626,12 +626,13 @@ drain_inbound_crypto_queues(const struct lcore_conf *qconf, + uint32_t n; + struct ipsec_traffic trf; + unsigned int lcoreid = rte_lcore_id(); ++ const int nb_pkts = RTE_DIM(trf.ipsec.pkts); + + if (app_sa_prm.enable == 0) { + + /* dequeue packets from crypto-queue */ + n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts, +- RTE_DIM(trf.ipsec.pkts)); ++ RTE_MIN(MAX_PKT_BURST, nb_pkts)); + + trf.ip4.num = 0; + trf.ip6.num = 0; +@@ -663,12 +664,13 @@ drain_outbound_crypto_queues(const struct lcore_conf *qconf, + { + uint32_t n; + struct ipsec_traffic trf; ++ const int nb_pkts = RTE_DIM(trf.ipsec.pkts); + + if (app_sa_prm.enable == 0) { + + /* dequeue packets from crypto-queue */ + n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts, +- RTE_DIM(trf.ipsec.pkts)); ++ RTE_MIN(MAX_PKT_BURST, nb_pkts)); + + trf.ip4.num = 0; + trf.ip6.num = 0; +@@ -695,9 +697,7 @@ ipsec_poll_mode_worker(void) struct rte_mbuf *pkts[MAX_PKT_BURST]; uint32_t lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; @@ -77786,7 +83399,7 @@ index a64a26c992..9620d73fc8 100644 struct lcore_conf *qconf; int32_t rc, socket_id; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) -@@ -744,7 +742,7 @@ ipsec_poll_mode_worker(void) +@@ -744,7 +744,7 @@ ipsec_poll_mode_worker(void) portid = rxql[i].port_id; queueid = rxql[i].queue_id; RTE_LOG(INFO, IPSEC, @@ -77795,7 +83408,7 @@ index a64a26c992..9620d73fc8 100644 lcore_id, portid, queueid); } -@@ -789,8 +787,7 @@ int +@@ -789,8 +789,7 @@ int check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) { uint16_t i; @@ -77805,7 +83418,7 @@ index a64a26c992..9620d73fc8 100644 for (i = 0; i < nb_lcore_params; ++i) { portid = lcore_params_array[i].port_id; -@@ -810,7 +807,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) +@@ -810,7 +809,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid) static int32_t check_poll_mode_params(struct eh_conf *eh_conf) { @@ -77814,7 +83427,7 @@ index a64a26c992..9620d73fc8 100644 uint16_t portid; uint16_t i; int32_t socket_id; -@@ -829,13 +826,13 @@ check_poll_mode_params(struct eh_conf *eh_conf) +@@ -829,13 +828,13 @@ check_poll_mode_params(struct eh_conf *eh_conf) for (i = 0; i < nb_lcore_params; ++i) { lcore = lcore_params[i].lcore_id; if (!rte_lcore_is_enabled(lcore)) { @@ -77830,7 +83443,7 @@ index a64a26c992..9620d73fc8 100644 "with numa off\n", lcore, socket_id); } -@@ -852,7 +849,7 @@ check_poll_mode_params(struct eh_conf *eh_conf) +@@ -852,7 +851,7 @@ check_poll_mode_params(struct eh_conf *eh_conf) return 0; } @@ -77839,7 +83452,7 @@ index a64a26c992..9620d73fc8 100644 get_port_nb_rx_queues(const uint16_t port) { int32_t queue = -1; -@@ -863,14 +860,14 @@ get_port_nb_rx_queues(const uint16_t port) +@@ -863,14 +862,14 @@ get_port_nb_rx_queues(const uint16_t port) lcore_params[i].queue_id > queue) queue = lcore_params[i].queue_id; } @@ -77856,7 +83469,7 @@ index a64a26c992..9620d73fc8 100644 for (i = 0; i < nb_lcore_params; ++i) { lcore = lcore_params[i].lcore_id; -@@ -1051,6 +1048,11 @@ parse_config(const char *q_arg) +@@ -1051,6 +1050,11 @@ parse_config(const char *q_arg) char *str_fld[_NUM_FLD]; int32_t i; uint32_t size; @@ -77868,7 +83481,7 @@ index a64a26c992..9620d73fc8 100644 nb_lcore_params = 0; -@@ -1071,7 +1073,7 @@ parse_config(const char *q_arg) +@@ -1071,7 +1075,7 @@ parse_config(const char *q_arg) for (i = 0; i < _NUM_FLD; i++) { errno = 0; int_fld[i] = strtoul(str_fld[i], &end, 0); @@ -77877,7 +83490,7 @@ index a64a26c992..9620d73fc8 100644 return -1; } if (nb_lcore_params >= MAX_LCORE_PARAMS) { -@@ -1080,11 +1082,11 @@ parse_config(const char *q_arg) +@@ -1080,11 +1084,11 @@ parse_config(const char *q_arg) return -1; } lcore_params_array[nb_lcore_params].port_id = @@ -77892,7 +83505,7 @@ index a64a26c992..9620d73fc8 100644 ++nb_lcore_params; } lcore_params = lcore_params_array; -@@ -1427,9 +1429,8 @@ add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr) +@@ -1427,9 +1431,8 @@ add_dst_ethaddr(uint16_t port, const struct rte_ether_addr *addr) if (port >= RTE_DIM(ethaddr_tbl)) return -EINVAL; @@ -77904,7 +83517,7 @@ index a64a26c992..9620d73fc8 100644 return 0; } -@@ -1700,6 +1701,9 @@ cryptodevs_init(enum eh_pkt_transfer_mode mode) +@@ -1700,6 +1703,9 @@ cryptodevs_init(enum eh_pkt_transfer_mode mode) total_nb_qps += qp; dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id); @@ -77914,7 +83527,7 @@ index a64a26c992..9620d73fc8 100644 dev_conf.nb_queue_pairs = qp; dev_conf.ff_disable = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; -@@ -1881,7 +1885,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, +@@ -1881,7 +1887,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, struct rte_eth_dev_info dev_info; struct rte_eth_txconf *txconf; uint16_t nb_tx_queue, nb_rx_queue; @@ -77924,7 +83537,7 @@ index a64a26c992..9620d73fc8 100644 int32_t ret, socket_id; struct lcore_conf *qconf; struct rte_ether_addr ethaddr; -@@ -1907,11 +1912,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, +@@ -1907,11 +1914,12 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, "Error getting MAC address (port %u): %s\n", portid, rte_strerror(-ret)); @@ -77940,7 +83553,7 @@ index a64a26c992..9620d73fc8 100644 (struct rte_ether_addr *)(val_eth + portid) + 1); print_ethaddr("Address: ", ðaddr); -@@ -2054,10 +2060,10 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, +@@ -2054,10 +2062,10 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads, /* Register Rx callback if ptypes are not supported */ if (!ptype_supported && @@ -78055,6 +83668,27 @@ index 6bef2a7285..13694ee1e0 100644 struct rte_security_ctx *sec_ctx; } __rte_cache_aligned; +diff --git a/dpdk/examples/ipsec-secgw/ipsec_process.c b/dpdk/examples/ipsec-secgw/ipsec_process.c +index b0cece3ad1..1a64a4b49f 100644 +--- a/dpdk/examples/ipsec-secgw/ipsec_process.c ++++ b/dpdk/examples/ipsec-secgw/ipsec_process.c +@@ -336,6 +336,7 @@ ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) + struct rte_ipsec_session *ss; + struct traffic_type *out; + struct rte_ipsec_group *pg; ++ const int nb_cops = RTE_DIM(trf->ipsec.pkts); + struct rte_crypto_op *cop[RTE_DIM(trf->ipsec.pkts)]; + struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; + +@@ -345,7 +346,7 @@ ipsec_cqp_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) + out = &trf->ipsec; + + /* dequeue completed crypto-ops */ +- n = ctx_dequeue(ctx, cop, RTE_DIM(cop)); ++ n = ctx_dequeue(ctx, cop, RTE_MIN(MAX_PKT_BURST, nb_cops)); + if (n == 0) + return; + diff --git a/dpdk/examples/ipsec-secgw/ipsec_worker.c b/dpdk/examples/ipsec-secgw/ipsec_worker.c index 2f02946f86..7e4db87caf 100644 --- a/dpdk/examples/ipsec-secgw/ipsec_worker.c @@ -78205,10 +83839,18 @@ index 23a09550a4..d06053451a 100644 LDFLAGS += -lpqos diff --git a/dpdk/examples/l2fwd-event/l2fwd_event.c b/dpdk/examples/l2fwd-event/l2fwd_event.c -index 63450537fe..4b5a032e35 100644 +index 63450537fe..78f10f31ad 100644 --- a/dpdk/examples/l2fwd-event/l2fwd_event.c +++ b/dpdk/examples/l2fwd-event/l2fwd_event.c -@@ -284,7 +284,7 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc, +@@ -141,6 +141,7 @@ l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc) + rte_spinlock_lock(&evt_rsrc->evp.lock); + if (index >= evt_rsrc->evp.nb_ports) { + printf("No free event port is available\n"); ++ rte_spinlock_unlock(&evt_rsrc->evp.lock); + return -1; + } + +@@ -284,7 +285,7 @@ l2fwd_event_loop_burst(struct l2fwd_resources *rsrc, } } @@ -78217,7 +83859,7 @@ index 63450537fe..4b5a032e35 100644 } static __rte_always_inline void -@@ -468,7 +468,7 @@ l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags) +@@ -468,7 +469,7 @@ l2fwd_event_loop_vector(struct l2fwd_resources *rsrc, const uint32_t flags) } } @@ -78731,6 +84373,48 @@ index 401692bcec..31798ccb10 100644 } } } +diff --git a/dpdk/examples/l3fwd/l3fwd_altivec.h b/dpdk/examples/l3fwd/l3fwd_altivec.h +index e45e138e59..b91a6b5587 100644 +--- a/dpdk/examples/l3fwd/l3fwd_altivec.h ++++ b/dpdk/examples/l3fwd/l3fwd_altivec.h +@@ -11,6 +11,9 @@ + #include "altivec/port_group.h" + #include "l3fwd_common.h" + ++#undef SENDM_PORT_OVERHEAD ++#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) ++ + /* + * Update source and destination MAC addresses in the ethernet header. + * Perform RFC1812 checks and updates for IPV4 packets. +@@ -117,7 +120,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) + */ + static __rte_always_inline void + send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, +- uint16_t dst_port[MAX_PKT_BURST], int nb_rx) ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], ++ int nb_rx) + { + int32_t k; + int j = 0; +diff --git a/dpdk/examples/l3fwd/l3fwd_common.h b/dpdk/examples/l3fwd/l3fwd_common.h +index 224b1c08e8..d94e5f1357 100644 +--- a/dpdk/examples/l3fwd/l3fwd_common.h ++++ b/dpdk/examples/l3fwd/l3fwd_common.h +@@ -18,6 +18,13 @@ + /* Minimum value of IPV4 total length (20B) in network byte order. */ + #define IPV4_MIN_LEN_BE (sizeof(struct rte_ipv4_hdr) << 8) + ++/* ++ * send_packet_multi() specific number of dest ports ++ * due to implementation we need to allocate array bigger then ++ * actual max number of elements in the array. ++ */ ++#define SENDM_PORT_OVERHEAD(x) (x) ++ + /* + * From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2: + * - The IP version number must be 4. diff --git a/dpdk/examples/l3fwd/l3fwd_em.c b/dpdk/examples/l3fwd/l3fwd_em.c index 35de31157e..e298fef523 100644 --- a/dpdk/examples/l3fwd/l3fwd_em.c @@ -78753,6 +84437,32 @@ index 35de31157e..e298fef523 100644 lcore_id, portid, queueid); } +diff --git a/dpdk/examples/l3fwd/l3fwd_em_hlm.h b/dpdk/examples/l3fwd/l3fwd_em_hlm.h +index 2e11eefad7..db9a212e05 100644 +--- a/dpdk/examples/l3fwd/l3fwd_em_hlm.h ++++ b/dpdk/examples/l3fwd/l3fwd_em_hlm.h +@@ -249,7 +249,7 @@ static inline void + l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, + struct lcore_conf *qconf) + { +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + l3fwd_em_process_packets(nb_rx, pkts_burst, dst_port, portid, qconf, 0); + send_packets_multi(qconf, pkts_burst, dst_port, nb_rx); +diff --git a/dpdk/examples/l3fwd/l3fwd_em_sequential.h b/dpdk/examples/l3fwd/l3fwd_em_sequential.h +index 067f23889a..3a40b2e434 100644 +--- a/dpdk/examples/l3fwd/l3fwd_em_sequential.h ++++ b/dpdk/examples/l3fwd/l3fwd_em_sequential.h +@@ -79,7 +79,7 @@ l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, + uint16_t portid, struct lcore_conf *qconf) + { + int32_t i, j; +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + if (nb_rx > 0) { + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[0], diff --git a/dpdk/examples/l3fwd/l3fwd_event.h b/dpdk/examples/l3fwd/l3fwd_event.h index e21817c36b..a7af23b8a0 100644 --- a/dpdk/examples/l3fwd/l3fwd_event.h @@ -78768,9 +84478,18 @@ index e21817c36b..a7af23b8a0 100644 uint64_t vector_tmo_ns; }; diff --git a/dpdk/examples/l3fwd/l3fwd_fib.c b/dpdk/examples/l3fwd/l3fwd_fib.c -index edc0dd69b9..10fa121942 100644 +index edc0dd69b9..5e398f8ce2 100644 --- a/dpdk/examples/l3fwd/l3fwd_fib.c +++ b/dpdk/examples/l3fwd/l3fwd_fib.c +@@ -121,7 +121,7 @@ fib_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, + { + uint32_t ipv4_arr[nb_rx]; + uint8_t ipv6_arr[nb_rx][RTE_FIB6_IPV6_ADDR_SIZE]; +- uint16_t hops[nb_rx]; ++ uint16_t hops[SENDM_PORT_OVERHEAD(nb_rx)]; + uint64_t hopsv4[nb_rx], hopsv6[nb_rx]; + uint8_t type_arr[nb_rx]; + uint32_t ipv4_cnt = 0, ipv6_cnt = 0; @@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy) uint64_t prev_tsc, diff_tsc, cur_tsc; int i, nb_rx; @@ -78827,6 +84546,93 @@ index 5172979c72..54b059fe2a 100644 lcore_id, portid, queueid); } +diff --git a/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h b/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h +index adb82f1478..91aad5c313 100644 +--- a/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h ++++ b/dpdk/examples/l3fwd/l3fwd_lpm_altivec.h +@@ -145,7 +145,7 @@ static inline void + l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint8_t portid, + struct lcore_conf *qconf) + { +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, + 0); +diff --git a/dpdk/examples/l3fwd/l3fwd_lpm_neon.h b/dpdk/examples/l3fwd/l3fwd_lpm_neon.h +index 2a68c4c15e..3c1f827424 100644 +--- a/dpdk/examples/l3fwd/l3fwd_lpm_neon.h ++++ b/dpdk/examples/l3fwd/l3fwd_lpm_neon.h +@@ -171,7 +171,7 @@ static inline void + l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, + struct lcore_conf *qconf) + { +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, + 0); +diff --git a/dpdk/examples/l3fwd/l3fwd_lpm_sse.h b/dpdk/examples/l3fwd/l3fwd_lpm_sse.h +index db15030320..50f1abbd8a 100644 +--- a/dpdk/examples/l3fwd/l3fwd_lpm_sse.h ++++ b/dpdk/examples/l3fwd/l3fwd_lpm_sse.h +@@ -129,7 +129,7 @@ static inline void + l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst, uint16_t portid, + struct lcore_conf *qconf) + { +- uint16_t dst_port[MAX_PKT_BURST]; ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)]; + + l3fwd_lpm_process_packets(nb_rx, pkts_burst, portid, dst_port, qconf, + 0); +diff --git a/dpdk/examples/l3fwd/l3fwd_neon.h b/dpdk/examples/l3fwd/l3fwd_neon.h +index 40807d5965..bc2bab8265 100644 +--- a/dpdk/examples/l3fwd/l3fwd_neon.h ++++ b/dpdk/examples/l3fwd/l3fwd_neon.h +@@ -10,6 +10,9 @@ + #include "neon/port_group.h" + #include "l3fwd_common.h" + ++#undef SENDM_PORT_OVERHEAD ++#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) ++ + /* + * Update source and destination MAC addresses in the ethernet header. + * Perform RFC1812 checks and updates for IPV4 packets. +@@ -92,7 +95,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) + */ + static __rte_always_inline void + send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, +- uint16_t dst_port[MAX_PKT_BURST], int nb_rx) ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], ++ int nb_rx) + { + int32_t k; + int j = 0; +diff --git a/dpdk/examples/l3fwd/l3fwd_sse.h b/dpdk/examples/l3fwd/l3fwd_sse.h +index 083729cdef..6236b7873c 100644 +--- a/dpdk/examples/l3fwd/l3fwd_sse.h ++++ b/dpdk/examples/l3fwd/l3fwd_sse.h +@@ -10,6 +10,9 @@ + #include "sse/port_group.h" + #include "l3fwd_common.h" + ++#undef SENDM_PORT_OVERHEAD ++#define SENDM_PORT_OVERHEAD(x) ((x) + 2 * FWDSTEP) ++ + /* + * Update source and destination MAC addresses in the ethernet header. + * Perform RFC1812 checks and updates for IPV4 packets. +@@ -91,7 +94,8 @@ process_packet(struct rte_mbuf *pkt, uint16_t *dst_port) + */ + static __rte_always_inline void + send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst, +- uint16_t dst_port[MAX_PKT_BURST], int nb_rx) ++ uint16_t dst_port[SENDM_PORT_OVERHEAD(MAX_PKT_BURST)], ++ int nb_rx) + { + int32_t k; + int j = 0; diff --git a/dpdk/examples/l3fwd/main.c b/dpdk/examples/l3fwd/main.c index 5198ff30dd..9201019711 100644 --- a/dpdk/examples/l3fwd/main.c @@ -79071,7 +84877,7 @@ index 5198ff30dd..9201019711 100644 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { diff --git a/dpdk/examples/ntb/ntb_fwd.c b/dpdk/examples/ntb/ntb_fwd.c -index f9abed28e4..585aad9d70 100644 +index f9abed28e4..5082e66c7b 100644 --- a/dpdk/examples/ntb/ntb_fwd.c +++ b/dpdk/examples/ntb/ntb_fwd.c @@ -865,7 +865,7 @@ ntb_stats_clear(void) @@ -79092,6 +84898,18 @@ index f9abed28e4..585aad9d70 100644 printf("Error: Cannot get count of xstats\n"); return; } +@@ -1443,7 +1443,10 @@ main(int argc, char **argv) + eth_port_id = rte_eth_find_next(0); + + if (eth_port_id < RTE_MAX_ETHPORTS) { +- rte_eth_dev_info_get(eth_port_id, ðdev_info); ++ ret = rte_eth_dev_info_get(eth_port_id, ðdev_info); ++ if (ret) ++ rte_exit(EXIT_FAILURE, "Can't get info for port %u\n", eth_port_id); ++ + eth_pconf.rx_adv_conf.rss_conf.rss_hf &= + ethdev_info.flow_type_rss_offloads; + ret = rte_eth_dev_configure(eth_port_id, num_queues, diff --git a/dpdk/examples/packet_ordering/main.c b/dpdk/examples/packet_ordering/main.c index d2fd6f77e4..f839db9102 100644 --- a/dpdk/examples/packet_ordering/main.c @@ -79367,6 +85185,19 @@ index 42e53a0f9a..31c7471236 100644 while (isblank(*addrs)) addrs++; if (*addrs == '\0') { +diff --git a/dpdk/examples/vhost_blk/vhost_blk.c b/dpdk/examples/vhost_blk/vhost_blk.c +index 3709d7ed06..197ad6250b 100644 +--- a/dpdk/examples/vhost_blk/vhost_blk.c ++++ b/dpdk/examples/vhost_blk/vhost_blk.c +@@ -785,7 +785,7 @@ vhost_blk_bdev_construct(const char *bdev_name, + bdev->data = rte_zmalloc(NULL, blk_cnt * blk_size, 0); + if (!bdev->data) { + fprintf(stderr, "No enough reserved huge memory for disk\n"); +- free(bdev); ++ rte_free(bdev); + return NULL; + } + diff --git a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c index 94bfbbaf78..5eddb47847 100644 --- a/dpdk/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c @@ -79528,6 +85359,19 @@ index 1521cdbc53..71f1957bf7 100644 dev_id, queue_id, op, epfd, vec); return ret; } +diff --git a/dpdk/lib/bpf/bpf_convert.c b/dpdk/lib/bpf/bpf_convert.c +index 9563274c9c..024c9edc9e 100644 +--- a/dpdk/lib/bpf/bpf_convert.c ++++ b/dpdk/lib/bpf/bpf_convert.c +@@ -559,7 +559,7 @@ rte_bpf_convert(const struct bpf_program *prog) + ret = bpf_convert_filter(prog->bf_insns, prog->bf_len, ebpf, &ebpf_len); + if (ret < 0) { + RTE_BPF_LOG(ERR, "%s: cannot convert cBPF to eBPF\n", __func__); +- free(prm); ++ rte_free(prm); + rte_errno = -ret; + return NULL; + } diff --git a/dpdk/lib/bpf/bpf_validate.c b/dpdk/lib/bpf/bpf_validate.c index 61cbb42216..ae2dad46bb 100644 --- a/dpdk/lib/bpf/bpf_validate.c @@ -80596,7 +86440,7 @@ index 86d792e2e7..cef9f2b3cb 100644 struct rte_cryptodev_cb *cb; diff --git a/dpdk/lib/dmadev/rte_dmadev.c b/dpdk/lib/dmadev/rte_dmadev.c -index 4da653eec7..d94f85ea9a 100644 +index 4da653eec7..2091db98bd 100644 --- a/dpdk/lib/dmadev/rte_dmadev.c +++ b/dpdk/lib/dmadev/rte_dmadev.c @@ -157,15 +157,24 @@ static int @@ -80628,7 +86472,12 @@ index 4da653eec7..d94f85ea9a 100644 return 0; } -@@ -710,7 +719,7 @@ rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status * +@@ -706,11 +715,11 @@ rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status * + { + struct rte_dma_dev *dev = &rte_dma_devices[dev_id]; + +- if (!rte_dma_is_valid(dev_id)) ++ if (!rte_dma_is_valid(dev_id) || status == NULL) return -EINVAL; if (vchan >= dev->data->dev_conf.nb_vchans) { @@ -80672,6 +86521,34 @@ index dcb554af1e..9cac9c6390 100644 RTE_LOG(CRIT, EAL, "EAL could not release all resources\n"); exit(exit_code); +diff --git a/dpdk/lib/eal/common/eal_common_dev.c b/dpdk/lib/eal/common/eal_common_dev.c +index 614ef6c9fc..bc53b2e28d 100644 +--- a/dpdk/lib/eal/common/eal_common_dev.c ++++ b/dpdk/lib/eal/common/eal_common_dev.c +@@ -550,16 +550,17 @@ rte_dev_event_callback_unregister(const char *device_name, + next = TAILQ_NEXT(event_cb, next); + + if (device_name != NULL && event_cb->dev_name != NULL) { +- if (!strcmp(event_cb->dev_name, device_name)) { +- if (event_cb->cb_fn != cb_fn || +- (cb_arg != (void *)-1 && +- event_cb->cb_arg != cb_arg)) +- continue; +- } ++ if (strcmp(event_cb->dev_name, device_name)) ++ continue; + } else if (device_name != NULL) { + continue; + } + ++ /* Remove only matching callback with arg */ ++ if (event_cb->cb_fn != cb_fn || ++ (cb_arg != (void *)-1 && event_cb->cb_arg != cb_arg)) ++ continue; ++ + /* + * if this callback is not executing right now, + * then remove it. diff --git a/dpdk/lib/eal/common/eal_common_dynmem.c b/dpdk/lib/eal/common/eal_common_dynmem.c index 52e52e5986..95da55d9b0 100644 --- a/dpdk/lib/eal/common/eal_common_dynmem.c @@ -81531,8 +87408,21 @@ index d1616b0bd9..1a7cf8e7b7 100644 } static ssize_t +diff --git a/dpdk/lib/eal/unix/meson.build b/dpdk/lib/eal/unix/meson.build +index cc7d67dd32..f1eb82e16a 100644 +--- a/dpdk/lib/eal/unix/meson.build ++++ b/dpdk/lib/eal/unix/meson.build +@@ -11,3 +11,8 @@ sources += files( + 'eal_unix_timer.c', + 'rte_thread.c', + ) ++ ++if is_freebsd or cc.has_function('pthread_attr_setaffinity_np', args: '-D_GNU_SOURCE', ++ prefix : '#include ') ++ cflags += '-DRTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP' ++endif diff --git a/dpdk/lib/eal/unix/rte_thread.c b/dpdk/lib/eal/unix/rte_thread.c -index 37ebfcfca1..f4076122a4 100644 +index 37ebfcfca1..f15e641f91 100644 --- a/dpdk/lib/eal/unix/rte_thread.c +++ b/dpdk/lib/eal/unix/rte_thread.c @@ -5,6 +5,7 @@ @@ -81543,11 +87433,12 @@ index 37ebfcfca1..f4076122a4 100644 #include #include -@@ -16,9 +17,14 @@ struct eal_tls_key { +@@ -16,10 +17,17 @@ struct eal_tls_key { pthread_key_t thread_index; }; -struct thread_routine_ctx { ++#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP +struct thread_start_context { rte_thread_func thread_func; - void *routine_args; @@ -81558,11 +87449,15 @@ index 37ebfcfca1..f4076122a4 100644 + int wrapper_ret; + bool wrapper_done; }; ++#endif static int -@@ -81,13 +87,29 @@ thread_map_os_priority_to_eal_priority(int policy, int os_pri, + thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri, +@@ -80,15 +88,33 @@ thread_map_os_priority_to_eal_priority(int policy, int os_pri, + return 0; } ++#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP static void * -thread_func_wrapper(void *arg) +thread_start_wrapper(void *arg) @@ -81572,29 +87467,31 @@ index 37ebfcfca1..f4076122a4 100644 + rte_thread_func thread_func = ctx->thread_func; + void *thread_args = ctx->thread_args; + int ret = 0; -+ + +- free(arg); + if (ctx->thread_attr != NULL && CPU_COUNT(&ctx->thread_attr->cpuset) > 0) { + ret = rte_thread_set_affinity_by_id(rte_thread_self(), &ctx->thread_attr->cpuset); + if (ret != 0) + RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity_by_id failed\n"); + } -- free(arg); +- return (void *)(uintptr_t)ctx.thread_func(ctx.routine_args); + pthread_mutex_lock(&ctx->wrapper_mutex); + ctx->wrapper_ret = ret; + ctx->wrapper_done = true; + pthread_cond_signal(&ctx->wrapper_cond); + pthread_mutex_unlock(&ctx->wrapper_mutex); - -- return (void *)(uintptr_t)ctx.thread_func(ctx.routine_args); ++ + if (ret != 0) + return NULL; + + return (void *)(uintptr_t)thread_func(thread_args); } ++#endif int -@@ -98,20 +120,18 @@ rte_thread_create(rte_thread_t *thread_id, + rte_thread_create(rte_thread_t *thread_id, +@@ -98,20 +124,20 @@ rte_thread_create(rte_thread_t *thread_id, int ret = 0; pthread_attr_t attr; pthread_attr_t *attrp = NULL; @@ -81612,6 +87509,7 @@ index 37ebfcfca1..f4076122a4 100644 - } - ctx->routine_args = args; - ctx->thread_func = thread_func; ++#ifndef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP + struct thread_start_context ctx = { + .thread_func = thread_func, + .thread_args = args, @@ -81620,10 +87518,28 @@ index 37ebfcfca1..f4076122a4 100644 + .wrapper_mutex = PTHREAD_MUTEX_INITIALIZER, + .wrapper_cond = PTHREAD_COND_INITIALIZER, + }; ++#endif if (thread_attr != NULL) { ret = pthread_attr_init(&attr); -@@ -133,7 +153,6 @@ rte_thread_create(rte_thread_t *thread_id, +@@ -122,6 +148,16 @@ rte_thread_create(rte_thread_t *thread_id, + + attrp = &attr; + ++#ifdef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP ++ if (CPU_COUNT(&thread_attr->cpuset) > 0) { ++ ret = pthread_attr_setaffinity_np(attrp, sizeof(thread_attr->cpuset), ++ &thread_attr->cpuset); ++ if (ret != 0) { ++ RTE_LOG(DEBUG, EAL, "pthread_attr_setaffinity_np failed\n"); ++ goto cleanup; ++ } ++ } ++#endif + /* + * Set the inherit scheduler parameter to explicit, + * otherwise the priority attribute is ignored. +@@ -133,7 +169,6 @@ rte_thread_create(rte_thread_t *thread_id, goto cleanup; } @@ -81631,17 +87547,19 @@ index 37ebfcfca1..f4076122a4 100644 if (thread_attr->priority == RTE_THREAD_PRIORITY_REALTIME_CRITICAL) { ret = ENOTSUP; -@@ -158,24 +177,22 @@ rte_thread_create(rte_thread_t *thread_id, +@@ -157,25 +192,32 @@ rte_thread_create(rte_thread_t *thread_id, + } } ++#ifdef RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp, - thread_func_wrapper, ctx); -+ thread_start_wrapper, &ctx); ++ (void *)(void *)thread_func, args); if (ret != 0) { RTE_LOG(DEBUG, EAL, "pthread_create failed\n"); goto cleanup; } - +- - if (thread_attr != NULL && CPU_COUNT(&thread_attr->cpuset) > 0) { - ret = rte_thread_set_affinity_by_id(*thread_id, - &thread_attr->cpuset); @@ -81649,7 +87567,15 @@ index 37ebfcfca1..f4076122a4 100644 - RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity_by_id failed\n"); - goto cleanup; - } -- } ++#else /* !RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP */ ++ ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp, ++ thread_start_wrapper, &ctx); ++ if (ret != 0) { ++ RTE_LOG(DEBUG, EAL, "pthread_create failed\n"); ++ goto cleanup; + } + +- ctx = NULL; + pthread_mutex_lock(&ctx.wrapper_mutex); + while (!ctx.wrapper_done) + pthread_cond_wait(&ctx.wrapper_cond, &ctx.wrapper_mutex); @@ -81658,8 +87584,8 @@ index 37ebfcfca1..f4076122a4 100644 + + if (ret != 0) + pthread_join((pthread_t)thread_id->opaque_id, NULL); - -- ctx = NULL; ++#endif /* RTE_EAL_PTHREAD_ATTR_SETAFFINITY_NP */ ++ cleanup: - free(ctx); if (attrp != NULL) @@ -81810,6 +87736,19 @@ index 1c1e9d01e3..3538633816 100644 if (ResumeThread(thread_handle) == (DWORD)-1) { ret = thread_log_last_error("ResumeThread()"); goto cleanup; +diff --git a/dpdk/lib/eal/x86/include/rte_io.h b/dpdk/lib/eal/x86/include/rte_io.h +index 0e1fefdee1..5366e09c47 100644 +--- a/dpdk/lib/eal/x86/include/rte_io.h ++++ b/dpdk/lib/eal/x86/include/rte_io.h +@@ -24,7 +24,7 @@ __rte_x86_movdiri(uint32_t value, volatile void *addr) + { + asm volatile( + /* MOVDIRI */ +- ".byte 0x40, 0x0f, 0x38, 0xf9, 0x02" ++ ".byte 0x0f, 0x38, 0xf9, 0x02" + : + : "a" (value), "d" (addr)); + } diff --git a/dpdk/lib/eal/x86/include/rte_memcpy.h b/dpdk/lib/eal/x86/include/rte_memcpy.h index d4d7a5cfc8..fd151be708 100644 --- a/dpdk/lib/eal/x86/include/rte_memcpy.h @@ -82022,7 +87961,7 @@ index 838b3a8f9f..311beb17cb 100644 return NULL; } diff --git a/dpdk/lib/ethdev/rte_ethdev.c b/dpdk/lib/ethdev/rte_ethdev.c -index 5d5e18db1e..e1f18fd8a4 100644 +index 5d5e18db1e..99b16e4dcc 100644 --- a/dpdk/lib/ethdev/rte_ethdev.c +++ b/dpdk/lib/ethdev/rte_ethdev.c @@ -631,7 +631,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) @@ -82161,7 +88100,20 @@ index 5d5e18db1e..e1f18fd8a4 100644 cap.max_nb_queues); return -EINVAL; } -@@ -4362,6 +4364,11 @@ rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) +@@ -2489,6 +2491,12 @@ rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + ++#ifdef RTE_ETHDEV_DEBUG_TX ++ ret = eth_dev_validate_tx_queue(dev, queue_id); ++ if (ret != 0) ++ return ret; ++#endif ++ + if (*dev->dev_ops->tx_done_cleanup == NULL) + return -ENOTSUP; + +@@ -4362,6 +4370,11 @@ rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -82173,7 +88125,7 @@ index 5d5e18db1e..e1f18fd8a4 100644 if (*dev->dev_ops->fec_set == NULL) return -ENOTSUP; return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa)); -@@ -4499,6 +4506,7 @@ int +@@ -4499,6 +4512,7 @@ int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) { struct rte_eth_dev *dev; @@ -82181,7 +88133,7 @@ index 5d5e18db1e..e1f18fd8a4 100644 int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); -@@ -4517,6 +4525,15 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) +@@ -4517,6 +4531,15 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) if (*dev->dev_ops->mac_addr_set == NULL) return -ENOTSUP; @@ -82197,7 +88149,30 @@ index 5d5e18db1e..e1f18fd8a4 100644 ret = (*dev->dev_ops->mac_addr_set)(dev, addr); if (ret < 0) return ret; -@@ -5935,7 +5952,7 @@ eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, +@@ -5722,13 +5745,19 @@ static void + eth_dev_adjust_nb_desc(uint16_t *nb_desc, + const struct rte_eth_desc_lim *desc_lim) + { ++ /* Upcast to uint32 to avoid potential overflow with RTE_ALIGN_CEIL(). */ ++ uint32_t nb_desc_32 = (uint32_t)*nb_desc; ++ + if (desc_lim->nb_align != 0) +- *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align); ++ nb_desc_32 = RTE_ALIGN_CEIL(nb_desc_32, desc_lim->nb_align); + + if (desc_lim->nb_max != 0) +- *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max); ++ nb_desc_32 = RTE_MIN(nb_desc_32, desc_lim->nb_max); ++ ++ nb_desc_32 = RTE_MAX(nb_desc_32, desc_lim->nb_min); + +- *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min); ++ /* Assign clipped u32 back to u16. */ ++ *nb_desc = (uint16_t)nb_desc_32; + } + + int +@@ -5935,7 +5964,7 @@ eth_dev_handle_port_dump_priv(const char *cmd __rte_unused, if (!rte_eth_dev_is_valid_port(port_id)) return -EINVAL; @@ -82206,7 +88181,7 @@ index 5d5e18db1e..e1f18fd8a4 100644 if (buf == NULL) return -ENOMEM; -@@ -6037,10 +6054,8 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6037,10 +6066,8 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, eth_dev->data->nb_tx_queues); rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id); rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu); @@ -82218,7 +88193,7 @@ index 5d5e18db1e..e1f18fd8a4 100644 rte_ether_format_addr(mac_addr, sizeof(mac_addr), eth_dev->data->mac_addrs); rte_tel_data_add_dict_string(d, "mac_addr", mac_addr); -@@ -6068,12 +6083,12 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, +@@ -6068,12 +6095,12 @@ eth_dev_handle_port_info(const char *cmd __rte_unused, rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0); rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0); rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node); @@ -82235,7 +88210,7 @@ index 5d5e18db1e..e1f18fd8a4 100644 eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); return 0; -@@ -6137,7 +6152,7 @@ rte_eth_ip_reassembly_capability_get(uint16_t port_id, +@@ -6137,7 +6164,7 @@ rte_eth_ip_reassembly_capability_get(uint16_t port_id, } if (reassembly_capa == NULL) { @@ -82244,7 +88219,7 @@ index 5d5e18db1e..e1f18fd8a4 100644 return -EINVAL; } -@@ -6167,7 +6182,7 @@ rte_eth_ip_reassembly_conf_get(uint16_t port_id, +@@ -6167,7 +6194,7 @@ rte_eth_ip_reassembly_conf_get(uint16_t port_id, } if (conf == NULL) { @@ -82253,7 +88228,7 @@ index 5d5e18db1e..e1f18fd8a4 100644 return -EINVAL; } -@@ -6190,7 +6205,7 @@ rte_eth_ip_reassembly_conf_set(uint16_t port_id, +@@ -6190,7 +6217,7 @@ rte_eth_ip_reassembly_conf_set(uint16_t port_id, if (dev->data->dev_configured == 0) { RTE_ETHDEV_LOG(ERR, "Device with port_id=%u is not configured.\n" @@ -82759,7 +88734,7 @@ index 83d154a6ce..2a69290097 100644 uint8_t dev_id, uint16_t cdev_id, struct rte_event_crypto_adapter_vector_limits *limits); diff --git a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c -index cf7bbd4d69..b4f05f250c 100644 +index cf7bbd4d69..bd971fb63a 100644 --- a/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/dpdk/lib/eventdev/rte_event_eth_rx_adapter.c @@ -290,14 +290,14 @@ rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, @@ -82854,6 +88829,15 @@ index cf7bbd4d69..b4f05f250c 100644 memset(&service, 0, sizeof(service)); snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN, "rte_event_eth_rx_adapter_%d", id); +@@ -2286,7 +2293,7 @@ rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, + for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) + dev_info->intr_queue[i] = i; + } else { +- if (!rxa_intr_queue(dev_info, rx_queue_id)) ++ if (!rxa_intr_queue(dev_info, rx_queue_id) && nb_rx_intr > 0) + dev_info->intr_queue[nb_rx_intr - 1] = + rx_queue_id; + } @@ -2432,7 +2439,7 @@ rxa_create(uint8_t id, uint8_t dev_id, RTE_DIM(default_rss_key)); @@ -83361,7 +89345,7 @@ index dd63ec6f68..56000271a4 100644 rte_event_queue_attr_set; diff --git a/dpdk/lib/fib/dir24_8.c b/dpdk/lib/fib/dir24_8.c -index a8ba4f64ca..5f73b8a7f0 100644 +index a8ba4f64ca..ec7b9c4b5c 100644 --- a/dpdk/lib/fib/dir24_8.c +++ b/dpdk/lib/fib/dir24_8.c @@ -388,9 +388,15 @@ modify_fib(struct dir24_8_tbl *dp, struct rte_rib *rib, uint32_t ip, @@ -83381,11 +89365,35 @@ index a8ba4f64ca..5f73b8a7f0 100644 break; ret = install_to_fib(dp, ledge, redge, next_hop); +@@ -520,8 +526,8 @@ dir24_8_create(const char *name, int socket_id, struct rte_fib_conf *fib_conf) + + snprintf(mem_name, sizeof(mem_name), "DP_%s", name); + dp = rte_zmalloc_socket(name, sizeof(struct dir24_8_tbl) + +- DIR24_8_TBL24_NUM_ENT * (1 << nh_sz), RTE_CACHE_LINE_SIZE, +- socket_id); ++ DIR24_8_TBL24_NUM_ENT * (1 << nh_sz) + sizeof(uint32_t), ++ RTE_CACHE_LINE_SIZE, socket_id); + if (dp == NULL) { + rte_errno = ENOMEM; + return NULL; diff --git a/dpdk/lib/fib/trie.c b/dpdk/lib/fib/trie.c -index 3e780afdaf..09470e7287 100644 +index 3e780afdaf..ca1c2fe3bc 100644 --- a/dpdk/lib/fib/trie.c +++ b/dpdk/lib/fib/trie.c -@@ -451,6 +451,14 @@ get_nxt_net(uint8_t *ip, uint8_t depth) +@@ -46,8 +46,10 @@ static inline rte_fib6_lookup_fn_t + get_vector_fn(enum rte_fib_trie_nh_sz nh_sz) + { + #ifdef CC_TRIE_AVX512_SUPPORT +- if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0) || +- (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) ++ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0 || ++ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ) <= 0 || ++ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) <= 0 || ++ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512) + return NULL; + switch (nh_sz) { + case RTE_FIB6_TRIE_2B: +@@ -451,6 +453,14 @@ get_nxt_net(uint8_t *ip, uint8_t depth) } } @@ -83400,7 +89408,7 @@ index 3e780afdaf..09470e7287 100644 static int modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE], -@@ -484,11 +492,19 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, +@@ -484,11 +494,19 @@ modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib, return ret; get_nxt_net(redge, tmp_depth); rte_rib6_copy_addr(ledge, redge); @@ -83421,6 +89429,17 @@ index 3e780afdaf..09470e7287 100644 ret = install_to_dp(dp, ledge, redge, next_hop); if (ret != 0) +@@ -629,8 +647,8 @@ trie_create(const char *name, int socket_id, + + snprintf(mem_name, sizeof(mem_name), "DP_%s", name); + dp = rte_zmalloc_socket(name, sizeof(struct rte_trie_tbl) + +- TRIE_TBL24_NUM_ENT * (1 << nh_sz), RTE_CACHE_LINE_SIZE, +- socket_id); ++ TRIE_TBL24_NUM_ENT * (1 << nh_sz) + sizeof(uint32_t), ++ RTE_CACHE_LINE_SIZE, socket_id); + if (dp == NULL) { + rte_errno = ENOMEM; + return dp; diff --git a/dpdk/lib/gpudev/gpudev.c b/dpdk/lib/gpudev/gpudev.c index 805719d00c..8f12abef23 100644 --- a/dpdk/lib/gpudev/gpudev.c @@ -83777,10 +89796,50 @@ index a399346d02..51611e392b 100644 * value that was returned when the key was added. */ diff --git a/dpdk/lib/hash/rte_thash.c b/dpdk/lib/hash/rte_thash.c -index 0249883b8d..2228af576b 100644 +index 0249883b8d..363603c102 100644 --- a/dpdk/lib/hash/rte_thash.c +++ b/dpdk/lib/hash/rte_thash.c -@@ -670,7 +670,7 @@ rte_thash_get_gfni_matrices(struct rte_thash_ctx *ctx) +@@ -160,6 +160,30 @@ thash_get_rand_poly(uint32_t poly_degree) + RTE_DIM(irreducible_poly_table[poly_degree])]; + } + ++static inline uint32_t ++get_rev_poly(uint32_t poly, int degree) ++{ ++ int i; ++ /* ++ * The implicit highest coefficient of the polynomial ++ * becomes the lowest after reversal. ++ */ ++ uint32_t rev_poly = 1; ++ uint32_t mask = (1 << degree) - 1; ++ ++ /* ++ * Here we assume "poly" argument is an irreducible polynomial, ++ * thus the lowest coefficient of the "poly" must always be equal to "1". ++ * After the reversal, this the lowest coefficient becomes the highest and ++ * it is omitted since the highest coefficient is implicitly determined by ++ * degree of the polynomial. ++ */ ++ for (i = 1; i < degree; i++) ++ rev_poly |= ((poly >> i) & 0x1) << (degree - i); ++ ++ return rev_poly & mask; ++} ++ + static struct thash_lfsr * + alloc_lfsr(struct rte_thash_ctx *ctx) + { +@@ -179,7 +203,7 @@ alloc_lfsr(struct rte_thash_ctx *ctx) + lfsr->state = rte_rand() & ((1 << lfsr->deg) - 1); + } while (lfsr->state == 0); + /* init reverse order polynomial */ +- lfsr->rev_poly = (lfsr->poly >> 1) | (1 << (lfsr->deg - 1)); ++ lfsr->rev_poly = get_rev_poly(lfsr->poly, lfsr->deg); + /* init proper rev_state*/ + lfsr->rev_state = lfsr->state; + for (i = 0; i <= lfsr->deg; i++) +@@ -670,7 +694,7 @@ rte_thash_get_gfni_matrices(struct rte_thash_ctx *ctx) } static inline uint8_t @@ -83789,7 +89848,7 @@ index 0249883b8d..2228af576b 100644 { uint8_t ret = 0; -@@ -681,13 +681,14 @@ read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) +@@ -681,13 +705,14 @@ read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset) (CHAR_BIT - (offset % CHAR_BIT)); } @@ -83805,7 +89864,7 @@ index 0249883b8d..2228af576b 100644 len = RTE_MAX(len, 0); len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT)); -@@ -695,13 +696,14 @@ read_unaligned_bits(uint8_t *ptr, int len, int offset) +@@ -695,13 +720,14 @@ read_unaligned_bits(uint8_t *ptr, int len, int offset) while (len > 0) { ret <<= CHAR_BIT; @@ -84007,6 +90066,19 @@ index 072a253c89..d08b143e51 100644 */ uint32_t prim_hash_seed; +diff --git a/dpdk/lib/member/rte_member_ht.c b/dpdk/lib/member/rte_member_ht.c +index a85561b472..0d0376b264 100644 +--- a/dpdk/lib/member/rte_member_ht.c ++++ b/dpdk/lib/member/rte_member_ht.c +@@ -493,7 +493,7 @@ rte_member_add_ht(const struct rte_member_setsum *ss, + return ret; + + /* Random pick prim or sec for recursive displacement */ +- uint32_t select_bucket = (tmp_sig && 1U) ? prim_bucket : sec_bucket; ++ uint32_t select_bucket = (tmp_sig & 1U) ? prim_bucket : sec_bucket; + if (ss->cache) { + ret = evict_from_bucket(); + buckets[select_bucket].sigs[ret] = tmp_sig; diff --git a/dpdk/lib/member/rte_member_sketch.c b/dpdk/lib/member/rte_member_sketch.c index 524ba77620..d5f35aabe9 100644 --- a/dpdk/lib/member/rte_member_sketch.c @@ -84436,7 +90508,7 @@ index 8bce03d7db..75253ed837 100644 return ret; } diff --git a/dpdk/lib/pcapng/rte_pcapng.c b/dpdk/lib/pcapng/rte_pcapng.c -index 80d08e1a3b..d8fd36799b 100644 +index 80d08e1a3b..3f5e08379a 100644 --- a/dpdk/lib/pcapng/rte_pcapng.c +++ b/dpdk/lib/pcapng/rte_pcapng.c @@ -110,7 +110,8 @@ pcapng_add_option(struct pcapng_option *popt, uint16_t code, @@ -84449,6 +90521,52 @@ index 80d08e1a3b..d8fd36799b 100644 return (struct pcapng_option *)((uint8_t *)popt + pcapng_optlen(len)); } +@@ -453,7 +454,7 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, + enum rte_pcapng_direction direction) + { + struct pcapng_enhance_packet_block *epb; +- uint32_t orig_len, data_len, padding, flags; ++ uint32_t orig_len, pkt_len, padding, flags; + struct pcapng_option *opt; + uint16_t optlen; + struct rte_mbuf *mc; +@@ -496,8 +497,8 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, + (md->ol_flags & RTE_MBUF_F_RX_RSS_HASH)); + + /* pad the packet to 32 bit boundary */ +- data_len = rte_pktmbuf_data_len(mc); +- padding = RTE_ALIGN(data_len, sizeof(uint32_t)) - data_len; ++ pkt_len = rte_pktmbuf_pkt_len(mc); ++ padding = RTE_ALIGN(pkt_len, sizeof(uint32_t)) - pkt_len; + if (padding > 0) { + void *tail = rte_pktmbuf_append(mc, padding); + +@@ -557,14 +558,14 @@ rte_pcapng_copy(uint16_t port_id, uint32_t queue, + goto fail; + + epb->block_type = PCAPNG_ENHANCED_PACKET_BLOCK; +- epb->block_length = rte_pktmbuf_data_len(mc); ++ epb->block_length = rte_pktmbuf_pkt_len(mc); + + /* Interface index is filled in later during write */ + mc->port = port_id; + + epb->timestamp_hi = ns >> 32; + epb->timestamp_lo = (uint32_t)ns; +- epb->capture_length = data_len; ++ epb->capture_length = pkt_len; + epb->original_length = orig_len; + + /* set trailer of block length */ +@@ -593,7 +594,7 @@ rte_pcapng_write_packets(rte_pcapng_t *self, + /* sanity check that is really a pcapng mbuf */ + epb = rte_pktmbuf_mtod(m, struct pcapng_enhance_packet_block *); + if (unlikely(epb->block_type != PCAPNG_ENHANCED_PACKET_BLOCK || +- epb->block_length != rte_pktmbuf_data_len(m))) { ++ epb->block_length != rte_pktmbuf_pkt_len(m))) { + rte_errno = EINVAL; + return -1; + } diff --git a/dpdk/lib/pci/rte_pci.h b/dpdk/lib/pci/rte_pci.h index 5088157e74..9876c3fb9d 100644 --- a/dpdk/lib/pci/rte_pci.h @@ -84562,6 +90680,115 @@ index 969a9e5aaa..0117b856bb 100644 goto error; } /* QEMU needs a delay after connection */ +diff --git a/dpdk/lib/power/power_acpi_cpufreq.c b/dpdk/lib/power/power_acpi_cpufreq.c +index 6e57aca535..0937cddba2 100644 +--- a/dpdk/lib/power/power_acpi_cpufreq.c ++++ b/dpdk/lib/power/power_acpi_cpufreq.c +@@ -257,7 +257,11 @@ power_acpi_cpufreq_init(unsigned int lcore_id) + return -1; + } + +- pi->lcore_id = lcore_id; ++ if (power_get_lcore_mapped_cpu_id(lcore_id, &pi->lcore_id) < 0) { ++ RTE_LOG(ERR, POWER, "Cannot get CPU ID mapped for lcore %u\n", lcore_id); ++ return -1; ++ } ++ + /* Check and set the governor */ + if (power_set_governor_userspace(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to " +diff --git a/dpdk/lib/power/power_common.c b/dpdk/lib/power/power_common.c +index 1e09facb86..6e68a8f6d5 100644 +--- a/dpdk/lib/power/power_common.c ++++ b/dpdk/lib/power/power_common.c +@@ -9,6 +9,7 @@ + + #include + #include ++#include + + #include "power_common.h" + +@@ -202,3 +203,25 @@ out: + + return ret; + } ++ ++int power_get_lcore_mapped_cpu_id(uint32_t lcore_id, uint32_t *cpu_id) ++{ ++ rte_cpuset_t lcore_cpus; ++ uint32_t cpu; ++ ++ lcore_cpus = rte_lcore_cpuset(lcore_id); ++ if (CPU_COUNT(&lcore_cpus) != 1) { ++ RTE_LOG(ERR, POWER, ++ "Power library does not support lcore %u mapping to %u CPUs\n", ++ lcore_id, CPU_COUNT(&lcore_cpus)); ++ return -1; ++ } ++ ++ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) { ++ if (CPU_ISSET(cpu, &lcore_cpus)) ++ break; ++ } ++ *cpu_id = cpu; ++ ++ return 0; ++} +diff --git a/dpdk/lib/power/power_common.h b/dpdk/lib/power/power_common.h +index c1c7139276..b928df941f 100644 +--- a/dpdk/lib/power/power_common.h ++++ b/dpdk/lib/power/power_common.h +@@ -27,5 +27,6 @@ int open_core_sysfs_file(FILE **f, const char *mode, const char *format, ...) + int read_core_sysfs_u32(FILE *f, uint32_t *val); + int read_core_sysfs_s(FILE *f, char *buf, unsigned int len); + int write_core_sysfs_s(FILE *f, const char *str); ++int power_get_lcore_mapped_cpu_id(uint32_t lcore_id, uint32_t *cpu_id); + + #endif /* _POWER_COMMON_H_ */ +diff --git a/dpdk/lib/power/power_cppc_cpufreq.c b/dpdk/lib/power/power_cppc_cpufreq.c +index fc9cffef91..83b30d366e 100644 +--- a/dpdk/lib/power/power_cppc_cpufreq.c ++++ b/dpdk/lib/power/power_cppc_cpufreq.c +@@ -35,7 +35,7 @@ + #define POWER_SYSFILE_SYS_MAX \ + "/sys/devices/system/cpu/cpu%u/cpufreq/cpuinfo_max_freq" + +-#define POWER_CPPC_DRIVER "cppc-cpufreq" ++#define POWER_CPPC_DRIVER "cppc_cpufreq" + #define BUS_FREQ 100000 + + enum power_state { +@@ -361,7 +361,11 @@ power_cppc_cpufreq_init(unsigned int lcore_id) + return -1; + } + +- pi->lcore_id = lcore_id; ++ if (power_get_lcore_mapped_cpu_id(lcore_id, &pi->lcore_id) < 0) { ++ RTE_LOG(ERR, POWER, "Cannot get CPU ID mapped for lcore %u\n", lcore_id); ++ return -1; ++ } ++ + /* Check and set the governor */ + if (power_set_governor_userspace(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to " +diff --git a/dpdk/lib/power/power_pstate_cpufreq.c b/dpdk/lib/power/power_pstate_cpufreq.c +index 52aa64510e..5799102c2b 100644 +--- a/dpdk/lib/power/power_pstate_cpufreq.c ++++ b/dpdk/lib/power/power_pstate_cpufreq.c +@@ -563,7 +563,11 @@ power_pstate_cpufreq_init(unsigned int lcore_id) + return -1; + } + +- pi->lcore_id = lcore_id; ++ if (power_get_lcore_mapped_cpu_id(lcore_id, &pi->lcore_id) < 0) { ++ RTE_LOG(ERR, POWER, "Cannot get CPU ID mapped for lcore %u\n", lcore_id); ++ return -1; ++ } ++ + /* Check and set the governor */ + if (power_set_governor_performance(pi) < 0) { + RTE_LOG(ERR, POWER, "Cannot set governor of lcore %u to " diff --git a/dpdk/lib/power/rte_power_intel_uncore.c b/dpdk/lib/power/rte_power_intel_uncore.c index 3b8724385f..7193b86516 100644 --- a/dpdk/lib/power/rte_power_intel_uncore.c @@ -84576,10 +90803,37 @@ index 3b8724385f..7193b86516 100644 #define BUS_FREQ 100000 #define FILTER_LENGTH 18 diff --git a/dpdk/lib/power/rte_power_pmd_mgmt.c b/dpdk/lib/power/rte_power_pmd_mgmt.c -index ca1840387c..f9a2606e6c 100644 +index ca1840387c..18a9819289 100644 --- a/dpdk/lib/power/rte_power_pmd_mgmt.c +++ b/dpdk/lib/power/rte_power_pmd_mgmt.c -@@ -684,7 +684,7 @@ int +@@ -419,10 +419,11 @@ check_scale(unsigned int lcore) + { + enum power_management_env env; + +- /* only PSTATE and ACPI modes are supported */ ++ /* only PSTATE, ACPI and CPPC modes are supported */ + if (!rte_power_check_env_supported(PM_ENV_ACPI_CPUFREQ) && +- !rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ)) { +- RTE_LOG(DEBUG, POWER, "Neither ACPI nor PSTATE modes are supported\n"); ++ !rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ) && ++ !rte_power_check_env_supported(PM_ENV_CPPC_CPUFREQ)) { ++ RTE_LOG(DEBUG, POWER, "Only ACPI, PSTATE, or CPPC modes are supported\n"); + return -ENOTSUP; + } + /* ensure we could initialize the power library */ +@@ -431,8 +432,9 @@ check_scale(unsigned int lcore) + + /* ensure we initialized the correct env */ + env = rte_power_get_env(); +- if (env != PM_ENV_ACPI_CPUFREQ && env != PM_ENV_PSTATE_CPUFREQ) { +- RTE_LOG(DEBUG, POWER, "Neither ACPI nor PSTATE modes were initialized\n"); ++ if (env != PM_ENV_ACPI_CPUFREQ && env != PM_ENV_PSTATE_CPUFREQ && ++ env != PM_ENV_CPPC_CPUFREQ) { ++ RTE_LOG(DEBUG, POWER, "Unable to initialize ACPI, PSTATE, or CPPC modes\n"); + return -ENOTSUP; + } + +@@ -684,7 +686,7 @@ int rte_power_pmd_mgmt_set_pause_duration(unsigned int duration) { if (duration == 0) { @@ -84588,7 +90842,7 @@ index ca1840387c..f9a2606e6c 100644 return -EINVAL; } pause_duration = duration; -@@ -707,7 +707,7 @@ rte_power_pmd_mgmt_set_scaling_freq_min(unsigned int lcore, unsigned int min) +@@ -707,7 +709,7 @@ rte_power_pmd_mgmt_set_scaling_freq_min(unsigned int lcore, unsigned int min) } if (min > scale_freq_max[lcore]) { @@ -84597,7 +90851,7 @@ index ca1840387c..f9a2606e6c 100644 return -EINVAL; } scale_freq_min[lcore] = min; -@@ -727,7 +727,7 @@ rte_power_pmd_mgmt_set_scaling_freq_max(unsigned int lcore, unsigned int max) +@@ -727,7 +729,7 @@ rte_power_pmd_mgmt_set_scaling_freq_max(unsigned int lcore, unsigned int max) if (max == 0) max = UINT32_MAX; if (max < scale_freq_min[lcore]) { @@ -85241,7 +91495,7 @@ index 863a6f6d52..669c322e12 100644 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL; cmsg = CMSG_NXTHDR(&msgh, cmsg)) { diff --git a/dpdk/lib/vhost/vdpa.c b/dpdk/lib/vhost/vdpa.c -index 577cb00a43..cf51ca957a 100644 +index 577cb00a43..89d88b6876 100644 --- a/dpdk/lib/vhost/vdpa.c +++ b/dpdk/lib/vhost/vdpa.c @@ -19,6 +19,7 @@ @@ -85252,7 +91506,15 @@ index 577cb00a43..cf51ca957a 100644 /** Double linked list of vDPA devices. */ TAILQ_HEAD(vdpa_device_list, rte_vdpa_device); -@@ -191,17 +192,21 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) +@@ -172,6 +173,7 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) + idx = vq->used->idx; + idx_m = s_vring->used->idx; + ret = (uint16_t)(idx_m - idx); ++ vq->used->flags = s_vring->used->flags; + + while (idx != idx_m) { + /* copy used entry, used ring logging is not covered here */ +@@ -191,17 +193,21 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) if (unlikely(nr_descs > vq->size)) return -1; @@ -85274,7 +91536,7 @@ index 577cb00a43..cf51ca957a 100644 if (unlikely(!idesc)) return -1; -@@ -218,9 +223,12 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) +@@ -218,9 +224,12 @@ rte_vdpa_relay_vring_used(int vid, uint16_t qid, void *vring_m) if (unlikely(nr_descs-- == 0)) goto fail; desc = desc_ring[desc_id]; @@ -85609,7 +91871,7 @@ index b448b6685d..bd69d3b46e 100644 } diff --git a/dpdk/lib/vhost/vhost_user.c b/dpdk/lib/vhost/vhost_user.c -index 9902ae9944..8d7d04059c 100644 +index 9902ae9944..d665d6c7d9 100644 --- a/dpdk/lib/vhost/vhost_user.c +++ b/dpdk/lib/vhost/vhost_user.c @@ -1745,6 +1745,7 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, @@ -85639,6 +91901,15 @@ index 9902ae9944..8d7d04059c 100644 return RTE_VHOST_MSG_RESULT_REPLY; } +@@ -2266,7 +2269,7 @@ vhost_user_set_log_base(struct virtio_net **pdev, + * mmap from 0 to workaround a hugepage mmap bug: mmap will + * fail when offset is not page size aligned. + */ +- addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); ++ addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, off); + close(fd); + if (addr == MAP_FAILED) { + VHOST_LOG_CONFIG(dev->ifname, ERR, "mmap log base failed!\n"); @@ -2326,7 +2329,7 @@ static int vhost_user_set_log_fd(struct virtio_net **pdev, return RTE_VHOST_MSG_RESULT_ERR; @@ -85726,7 +91997,7 @@ index 9902ae9944..8d7d04059c 100644 if (request != VHOST_USER_IOTLB_MSG) VHOST_LOG_CONFIG(dev->ifname, INFO, diff --git a/dpdk/lib/vhost/virtio_net.c b/dpdk/lib/vhost/virtio_net.c -index 9abf752f30..9f314f83c7 100644 +index 9abf752f30..9c9d05d4d9 100644 --- a/dpdk/lib/vhost/virtio_net.c +++ b/dpdk/lib/vhost/virtio_net.c @@ -1453,6 +1453,12 @@ virtio_dev_rx_batch_packed_copy(struct virtio_net *dev, @@ -85760,7 +92031,17 @@ index 9abf752f30..9f314f83c7 100644 if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0)) return -1; -@@ -2856,7 +2862,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2587,6 +2593,9 @@ vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr, + */ + uint16_t csum = 0, off; + ++ if (hdr->csum_start >= rte_pktmbuf_pkt_len(m)) ++ return; ++ + if (rte_raw_cksum_mbuf(m, hdr->csum_start, + rte_pktmbuf_pkt_len(m) - hdr->csum_start, &csum) < 0) + return; +@@ -2856,7 +2865,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, { uint16_t i; uint16_t avail_entries; @@ -85768,7 +92049,7 @@ index 9abf752f30..9f314f83c7 100644 static bool allocerr_warned; /* -@@ -2895,11 +2900,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2895,11 +2903,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, update_shadow_used_ring_split(vq, head_idx, 0); @@ -85781,7 +92062,7 @@ index 9abf752f30..9f314f83c7 100644 buf_len -= dev->vhost_hlen; -@@ -2916,8 +2918,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2916,8 +2921,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, buf_len, mbuf_pool->name); allocerr_warned = true; } @@ -85790,7 +92071,7 @@ index 9abf752f30..9f314f83c7 100644 break; } -@@ -2928,27 +2928,21 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -2928,27 +2931,21 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n"); allocerr_warned = true; } @@ -85823,7 +92104,7 @@ index 9abf752f30..9f314f83c7 100644 } __rte_noinline -@@ -3470,6 +3464,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, +@@ -3470,6 +3467,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, allocerr_warned = true; } dropped = true; diff --git a/SPECS/openvswitch3.1.spec b/SPECS/openvswitch3.1.spec index cf9a32e..fa7250a 100644 --- a/SPECS/openvswitch3.1.spec +++ b/SPECS/openvswitch3.1.spec @@ -57,7 +57,7 @@ Summary: Open vSwitch Group: System Environment/Daemons daemon/database/utilities URL: http://www.openvswitch.org/ Version: 3.1.0 -Release: 147%{?dist} +Release: 148%{?dist} # Nearly all of openvswitch is ASL 2.0. The bugtool is LGPLv2+, and the # lib/sflow*.[ch] files are SISSL @@ -756,6 +756,181 @@ exit 0 %endif %changelog +* Tue Feb 18 2025 Open vSwitch CI - 3.1.0-148 +- Merging dpdk subtree [RH git: 9f82c46219] + Commit list: + 29498facc5 Merge tag 'v22.11.7' into 22.11 + 7ce7d3341d version: 22.11.7 + 1570aef08b net/virtio: fix Rx checksum calculation + 92bac8b798 Revert "test/bonding: fix loop on members" + d752210315 power: fix log message when checking lcore ID + 077a7044cc net/hns3: fix crash for NEON and SVE + 09fb920298 version: 22.11.7-rc1 + 07d2440237 devtools: fix check of multiple commits fixed at once + fec1fb4800 doc: correct definition of stats per queue feature + 8d8d069395 app/testpmd: remove redundant policy action condition + 0cbed5e68f test/eal: fix lcore check + f7b5b45e72 test/eal: fix loop coverage for alignment macros + c6448283e3 test/event: avoid duplicate initialization + d82f1f8760 test/bonding: fix MAC address comparison + 634cf74378 test/bonding: fix loop on members + 65491527fa net/mlx5: fix miniCQEs number calculation + 2e0f9b4eab app/testpmd: fix aged flow destroy + ad0890d82f member: fix choice of bucket for displacement + 0b010067c3 app/procinfo: fix leak on exit + 50b8a025c8 common/dpaax/caamflib: enable fallthrough warnings + e4545205cc net/i40e: check register read for outer VLAN + cc6ceb096e net/iavf: add segment-length check to Tx prep + cc3d964ab6 bus/dpaa: fix lock condition during error handling + d3789f7b2e net/mlx5: fix Rx queue reference count in flushing flows + 318bb3c11d net/mlx5: fix default RSS flows creation order + 102f2042b2 common/mlx5: fix misalignment + 0e162f1a25 net/mlx5: fix counter query loop getting stuck + 08bdeeee6b net/mlx5/hws: fix allocation of STCs + da5d3d9ce9 net/mlx5: fix shared queue port number in vector Rx + aabf15baa5 common/mlx5: fix error CQE handling for 128 bytes CQE + 591f0841c6 net/dpaa2: remove unnecessary check for null before free + af71652ab7 eventdev: fix possible array underflow/overflow + a137ec4342 examples/l2fwd-event: fix spinlock handling + 0bb02c5a2f net/cnxk: fix build on Ubuntu 24.04 + fcd2d7fa43 common/cnxk: fix build on Ubuntu 24.04 + e768697485 net/bnx2x: fix duplicate branch + 0ff8ac46ba net/bnx2x: fix possible infinite loop at startup + 3b501926d5 net/bnx2x: fix always true expression + 17006eb028 net/bnx2x: remove dead conditional + a73ed9bb24 net/bnxt: fix bad action offset in Tx BD + 3d5d7a20e0 net/bnxt: fix TCP and UDP checksum flags + 881d93b709 net/bnxt: fix reading SFF-8436 SFP EEPROMs + 768828f6a2 net/bnxt/tf_ulp: fix parent child DB counters + 0c320c9667 net/bnxt/tf_core: fix Thor TF EM key size check + aa75cb3e6e net/mlx5: fix real time counter reading from PCI BAR + 802f39367e crypto/qat: fix modexp/inv length + 9c9057bd06 test/crypto: fix synchronous API calls + 1811b05443 crypto/openssl: fix potential string overflow + 6e9955ee17 baseband/acc: fix ring memory allocation + 75ff38855b event/octeontx: fix possible integer overflow + 78043bb867 net/hns3: fix fully use hardware flow director table + e3cf757040 net/hns3: fix error code for repeatedly create counter + dd372d9141 net/vmxnet3: support larger MTU with version 6 + d19359e017 net/vmxnet3: fix potential out of bounds stats access + ded9039ea9 net/ngbe: restrict configuration of VLAN strip offload + 8d3229e87a net/ngbe: fix interrupt lost in legacy or MSI mode + c9d9948b04 net/ngbe: reconfigure more MAC Rx registers + fb66eb3c8a net/ngbe: fix driver load bit to inform firmware + 205cc63ef8 net/txgbe: fix driver load bit to inform firmware + ecf3bccd94 net/txgbe: remove outer UDP checksum capability + dfc18e3532 net/txgbe: fix VF-PF mbox interrupt + f6b5dc92b0 net/txgbe: fix SWFW mbox + 88a272ed2c net/hns3: remove ROH devices + 9af4afb790 net/vmxnet3: fix crash after configuration failure + b704e087ff net/netvsc: force Tx VLAN offload on 801.2Q packet + b4cf08fc16 examples/ntb: check info query return + 9ce0bf0e89 test/bonding: remove redundant info query + c0989bd756 examples/l3fwd: fix read beyond boundaries + 60135d1bde net/dpaa2: fix memory corruption in TM + 96ae1b7103 bus/fslmc: fix Coverity warnings in QBMAN + 95b15d5ff8 app/dumpcap: remove unused struct array + 77ffd1395b net/mlx5: fix reported Rx/Tx descriptor limits + 34e44ab00c net/mlx5: fix SWS meter state initialization + 12f2bc90eb net/mlx5: fix SQ flow item size + 505c7f3f0a net/mlx5: fix GRE flow item translation for root table + 614c294ea5 net/mlx5: fix memory leak in metering + 298a19d6d8 eal/unix: optimize thread creation + 15f924f62f crypto/openssl: fix 3DES-CTR with big endian CPUs + 7c2c50cb67 config/arm: fix warning for native build with meson >= 0.55 + b6454df58c net/mvneta: fix possible out-of-bounds write + a4ee1170ef net/ixgbe: fix link status delay on FreeBSD + 3dac25dc58 net/ice: detect stopping a flow director queue twice + b207e53f27 net/hns3: restrict tunnel flow rule to one header + 44c959d8c3 net/netvsc: fix using Tx queue higher than Rx queues + 8a4a31ed6e buildtools/chkincs: check driver specific headers + 3ca6090a4f net/mlx5: fix flex item header length field translation + 3db5656f1d net/mlx5: fix non full word sample fields in flex item + c837dececb net/mlx5: update flex parser arc types support + 0fc5b4da10 pcapng: fix handling of chained mbufs + eaf081b5aa common/idpf: fix use after free in mailbox init + 04984dc016 net/idpf: fix AVX-512 pointer copy on 32-bit + 8c1ee8e80c net/ionic: fix build on Fedora Rawhide + a73f3a97ff power: fix mapped lcore ID + 1e230f1ca2 net/gve/base: fix build with Fedora Rawhide + 45655bbf59 dmadev: fix potential null pointer access + fec66c08d4 hash: fix thash LFSR initialization + 70c3227ccd build: remove version check on compiler links function + 9015591db9 net/mlx5: fix next protocol validation after flex item + a56fc269ef app/testpmd: remove flex item init command leftover + f12c8f789d net/mlx5: fix number of supported flex parsers + 09fbaf6867 net/mlx5: workaround list management of Rx queue control + 873da637bb net/iavf: preserve MAC address with i40e PF Linux driver + 47a2900e1c net/ice/base: fix VLAN replay after reset + 19bb1781fe net/ice/base: add bounds check + 2012542358 net/pcap: fix blocking Rx + 4e6b6c42ce net/nfp: fix link change return value + 1cdad91ab4 net/hns3: verify reset type from firmware + 856f4dd1d2 ethdev: verify queue ID in Tx done cleanup + 1ef2a5f5f6 net/tap: restrict maximum number of MP FDs + 270f15f3fd net/memif: fix buffer overflow in zero copy Rx + 30d634d95f net/dpaa: fix reallocate mbuf handling + 9bc6a69f0b bus/dpaa: fix the fman details status + 6770a35bbf bus/dpaa: fix VSP for 1G fm1-mac9 and 10 + b4daf0e0fd net/dpaa: fix typecasting channel ID + 0c1fd86cee bus/dpaa: fix PFDRs leaks due to FQRNIs + a905d9d63e ethdev: fix overflow in descriptor count + 9c49c19a4d net/hns3: fix dump counter of registers + 849ffb29c6 net/hns3: remove some basic address dump + e7c27b6ffe net/ena: revert redefining memcpy + ead6f2f7f7 net/mana: support rdma-core via pkg-config + d3a78f0f8f net/pcap: set live interface as non-blocking + 7b317b7db9 app/testpmd: remove unnecessary cast + 8b8b03abd8 net/tap: avoid memcpy with null argument + b25fb707fa net/iavf: fix AVX-512 pointer copy on 32-bit + 8c93fc7726 net/ice: fix AVX-512 pointer copy on 32-bit + 0d727f9dd2 net/i40e: fix AVX-512 pointer copy on 32-bit + 6c69f14358 net/i40e/base: fix loop bounds + 75db5f0032 net/i40e/base: fix unchecked return value + 92dd240303 net/i40e/base: fix repeated register dumps + e06d6a124e net/i40e/base: fix DDP loading with reserved track ID + a6d819d27a net/i40e/base: fix blinking X722 with X557 PHY + aea7ea7acb net/i40e/base: fix misleading debug logs and comments + b2571f3563 net/i40e/base: fix setting flags in init function + c2cd652d0f net/ixgbe/base: fix unchecked return value + 822d0a9391 net/ice/base: fix iteration of TLVs in Preserved Fields Area + 6970864b0c net/ice/base: fix link speed for 200G + 90adb751e4 net/iavf: fix crash when link is unstable + 5780ca90f0 net/e1000: fix link status crash in secondary process + c0c610f426 fib: fix AVX512 lookup + 768ca32567 net/virtio-user: reset used index counter + 8473106d0c vdpa: update used flags in used ring relay + 659d8ff0cf vhost: fix offset while mapping log base address + e8d41d4db2 baseband/acc: fix access to deallocated mem + 5c7ce1ed01 common/cnxk: fix base log level + 0b67f7f759 common/cnxk: fix CPT HW word size for outbound SA + 082cd77172 event/cnxk: fix Rx timestamp handling + 1601beceba net/cnxk: fix Rx offloads to handle timestamp + 8d056353d1 net/cnxk: fix Rx timestamp handling for VF + 65c229defe app/dumpcap: fix handling of jumbo frames + e696cfc2f8 fib6: add runtime checks in AVX512 lookup + 2bb52b6763 power: enable CPPC + d56b8cfa79 net/nfb: fix use after free + afdb0a605d examples/vhost: fix free function mismatch + 8a86016238 raw/ifpga: fix free function mismatch in interrupt config + f838b3c8b9 raw/ifpga/base: fix use after free + d8c04bbc9b net/sfc: fix use after free in debug logs + 8bd09aed9c net/nfp: fix double free in flow destroy + 7a1ad4c97e net/e1000: fix use after free in filter flush + 4cf7c66de7 event/cnxk: fix free function mismatch in port config + 495fd2e8ca dma/idxd: fix free function mismatch in device probe + 08043b1ad9 crypto/bcmfs: fix free function mismatch + 38eba244a3 baseband/la12xx: fix use after free in modem config + b6364f5406 bpf: fix free function mismatch if convert fails + 4152dbbb8e examples/ipsec-secgw: fix dequeue count from cryptodev + 66228c9ae7 dev: fix callback lookup when unregistering device + 0900241918 common/dpaax/caamflib: fix PDCP SNOW-ZUC watchdog + 3ae58cc9a3 crypto/dpaa2_sec: fix memory leak + 44c20cfd7a examples/eventdev: fix queue crash with generic pipeline + fdda4a8d41 eal/x86: fix 32-bit write combining store + 8a32360685 devtools: fix forbidden token check with multiple files + + * Fri Feb 14 2025 Open vSwitch CI - 3.1.0-147 - Merging upstream branch-3.1 [RH git: 6161735248] Commit list: