|
|
c7ffa4 |
From 04b49e536cf78ec05203c96e8f5d4c5d9ceb6183 Mon Sep 17 00:00:00 2001
|
|
|
c7ffa4 |
From: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
|
|
|
c7ffa4 |
Date: Tue, 30 Jan 2018 16:34:56 +0100
|
|
|
c7ffa4 |
Subject: [PATCH 3/9] net/mlx5: move rdma-core calls to separate file
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
This lays the groundwork for externalizing rdma-core as an optional
|
|
|
c7ffa4 |
run-time dependency instead of a mandatory one.
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
No functional change.
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
|
|
|
c7ffa4 |
Signed-off-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
|
|
|
c7ffa4 |
(cherry picked from commit c89f0e24d4f0c775dcbfcaa964e9c8f1de815ce5)
|
|
|
c7ffa4 |
---
|
|
|
c7ffa4 |
drivers/net/mlx5/Makefile | 1 +
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5.c | 48 +++---
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5_ethdev.c | 5 +-
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5_flow.c | 96 ++++++-----
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5_glue.c | 359 +++++++++++++++++++++++++++++++++++++++++
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5_glue.h | 107 ++++++++++++
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5_mr.c | 7 +-
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5_rxq.c | 54 ++++---
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5_txq.c | 22 +--
|
|
|
c7ffa4 |
drivers/net/mlx5/mlx5_vlan.c | 13 +-
|
|
|
c7ffa4 |
10 files changed, 598 insertions(+), 114 deletions(-)
|
|
|
c7ffa4 |
create mode 100644 drivers/net/mlx5/mlx5_glue.c
|
|
|
c7ffa4 |
create mode 100644 drivers/net/mlx5/mlx5_glue.h
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
|
|
|
c7ffa4 |
index a3984eb..bdec306 100644
|
|
|
c7ffa4 |
--- a/drivers/net/mlx5/Makefile
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/Makefile
|
|
|
c7ffa4 |
@@ -53,6 +53,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
|
|
|
c7ffa4 |
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
|
|
|
c7ffa4 |
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
|
|
|
c7ffa4 |
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
|
|
|
c7ffa4 |
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_glue.c
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
# Basic CFLAGS.
|
|
|
c7ffa4 |
CFLAGS += -O3
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
|
|
|
c7ffa4 |
index 0548d17..f77bdda 100644
|
|
|
c7ffa4 |
--- a/drivers/net/mlx5/mlx5.c
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5.c
|
|
|
c7ffa4 |
@@ -63,6 +63,7 @@
|
|
|
c7ffa4 |
#include "mlx5_rxtx.h"
|
|
|
c7ffa4 |
#include "mlx5_autoconf.h"
|
|
|
c7ffa4 |
#include "mlx5_defs.h"
|
|
|
c7ffa4 |
+#include "mlx5_glue.h"
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/* Device parameter to enable RX completion queue compression. */
|
|
|
c7ffa4 |
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
|
|
|
c7ffa4 |
@@ -225,8 +226,8 @@ struct mlx5_args {
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (priv->pd != NULL) {
|
|
|
c7ffa4 |
assert(priv->ctx != NULL);
|
|
|
c7ffa4 |
- claim_zero(ibv_dealloc_pd(priv->pd));
|
|
|
c7ffa4 |
- claim_zero(ibv_close_device(priv->ctx));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->dealloc_pd(priv->pd));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->close_device(priv->ctx));
|
|
|
c7ffa4 |
} else
|
|
|
c7ffa4 |
assert(priv->ctx == NULL);
|
|
|
c7ffa4 |
if (priv->rss_conf.rss_key != NULL)
|
|
|
c7ffa4 |
@@ -565,7 +566,7 @@ struct mlx5_args {
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/* Save PCI address. */
|
|
|
c7ffa4 |
mlx5_dev[idx].pci_addr = pci_dev->addr;
|
|
|
c7ffa4 |
- list = ibv_get_device_list(&i);
|
|
|
c7ffa4 |
+ list = mlx5_glue->get_device_list(&i);
|
|
|
c7ffa4 |
if (list == NULL) {
|
|
|
c7ffa4 |
assert(errno);
|
|
|
c7ffa4 |
if (errno == ENOSYS)
|
|
|
c7ffa4 |
@@ -615,12 +616,12 @@ struct mlx5_args {
|
|
|
c7ffa4 |
" (SR-IOV: %s)",
|
|
|
c7ffa4 |
list[i]->name,
|
|
|
c7ffa4 |
sriov ? "true" : "false");
|
|
|
c7ffa4 |
- attr_ctx = ibv_open_device(list[i]);
|
|
|
c7ffa4 |
+ attr_ctx = mlx5_glue->open_device(list[i]);
|
|
|
c7ffa4 |
err = errno;
|
|
|
c7ffa4 |
break;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (attr_ctx == NULL) {
|
|
|
c7ffa4 |
- ibv_free_device_list(list);
|
|
|
c7ffa4 |
+ mlx5_glue->free_device_list(list);
|
|
|
c7ffa4 |
switch (err) {
|
|
|
c7ffa4 |
case 0:
|
|
|
c7ffa4 |
ERROR("cannot access device, is mlx5_ib loaded?");
|
|
|
c7ffa4 |
@@ -639,7 +640,7 @@ struct mlx5_args {
|
|
|
c7ffa4 |
* Multi-packet send is supported by ConnectX-4 Lx PF as well
|
|
|
c7ffa4 |
* as all ConnectX-5 devices.
|
|
|
c7ffa4 |
*/
|
|
|
c7ffa4 |
- mlx5dv_query_device(attr_ctx, &attrs_out);
|
|
|
c7ffa4 |
+ mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
|
|
|
c7ffa4 |
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
|
|
|
c7ffa4 |
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
|
|
|
c7ffa4 |
DEBUG("Enhanced MPW is supported");
|
|
|
c7ffa4 |
@@ -657,7 +658,7 @@ struct mlx5_args {
|
|
|
c7ffa4 |
cqe_comp = 0;
|
|
|
c7ffa4 |
else
|
|
|
c7ffa4 |
cqe_comp = 1;
|
|
|
c7ffa4 |
- if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
|
|
|
c7ffa4 |
+ if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr))
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
@@ -721,15 +722,15 @@ struct mlx5_args {
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
DEBUG("using port %u (%08" PRIx32 ")", port, test);
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
- ctx = ibv_open_device(ibv_dev);
|
|
|
c7ffa4 |
+ ctx = mlx5_glue->open_device(ibv_dev);
|
|
|
c7ffa4 |
if (ctx == NULL) {
|
|
|
c7ffa4 |
err = ENODEV;
|
|
|
c7ffa4 |
goto port_error;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
- ibv_query_device_ex(ctx, NULL, &device_attr);
|
|
|
c7ffa4 |
+ mlx5_glue->query_device_ex(ctx, NULL, &device_attr);
|
|
|
c7ffa4 |
/* Check port status. */
|
|
|
c7ffa4 |
- err = ibv_query_port(ctx, port, &port_attr);
|
|
|
c7ffa4 |
+ err = mlx5_glue->query_port(ctx, port, &port_attr);
|
|
|
c7ffa4 |
if (err) {
|
|
|
c7ffa4 |
ERROR("port query failed: %s", strerror(err));
|
|
|
c7ffa4 |
goto port_error;
|
|
|
c7ffa4 |
@@ -744,11 +745,11 @@ struct mlx5_args {
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
if (port_attr.state != IBV_PORT_ACTIVE)
|
|
|
c7ffa4 |
DEBUG("port %d is not active: \"%s\" (%d)",
|
|
|
c7ffa4 |
- port, ibv_port_state_str(port_attr.state),
|
|
|
c7ffa4 |
+ port, mlx5_glue->port_state_str(port_attr.state),
|
|
|
c7ffa4 |
port_attr.state);
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/* Allocate protection domain. */
|
|
|
c7ffa4 |
- pd = ibv_alloc_pd(ctx);
|
|
|
c7ffa4 |
+ pd = mlx5_glue->alloc_pd(ctx);
|
|
|
c7ffa4 |
if (pd == NULL) {
|
|
|
c7ffa4 |
ERROR("PD allocation failure");
|
|
|
c7ffa4 |
err = ENOMEM;
|
|
|
c7ffa4 |
@@ -787,7 +788,7 @@ struct mlx5_args {
|
|
|
c7ffa4 |
goto port_error;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
mlx5_args_assign(priv, &args);
|
|
|
c7ffa4 |
- if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
|
|
|
c7ffa4 |
+ if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
|
|
|
c7ffa4 |
ERROR("ibv_query_device_ex() failed");
|
|
|
c7ffa4 |
goto port_error;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
@@ -807,7 +808,7 @@ struct mlx5_args {
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
|
|
c7ffa4 |
priv->counter_set_supported = !!(device_attr.max_counter_sets);
|
|
|
c7ffa4 |
- ibv_describe_counter_set(ctx, 0, &cs_desc);
|
|
|
c7ffa4 |
+ mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
|
|
|
c7ffa4 |
DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
|
|
|
c7ffa4 |
cs_desc.counter_type, cs_desc.num_of_cs,
|
|
|
c7ffa4 |
cs_desc.attributes);
|
|
|
c7ffa4 |
@@ -933,8 +934,9 @@ struct mlx5_args {
|
|
|
c7ffa4 |
.free = &mlx5_free_verbs_buf,
|
|
|
c7ffa4 |
.data = priv,
|
|
|
c7ffa4 |
};
|
|
|
c7ffa4 |
- mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
|
|
|
c7ffa4 |
- (void *)((uintptr_t)&alctr));
|
|
|
c7ffa4 |
+ mlx5_glue->dv_set_context_attr(ctx,
|
|
|
c7ffa4 |
+ MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
|
|
|
c7ffa4 |
+ (void *)((uintptr_t)&alctr));
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/* Bring Ethernet device up. */
|
|
|
c7ffa4 |
DEBUG("forcing Ethernet interface up");
|
|
|
c7ffa4 |
@@ -946,9 +948,9 @@ struct mlx5_args {
|
|
|
c7ffa4 |
if (priv)
|
|
|
c7ffa4 |
rte_free(priv);
|
|
|
c7ffa4 |
if (pd)
|
|
|
c7ffa4 |
- claim_zero(ibv_dealloc_pd(pd));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->dealloc_pd(pd));
|
|
|
c7ffa4 |
if (ctx)
|
|
|
c7ffa4 |
- claim_zero(ibv_close_device(ctx));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->close_device(ctx));
|
|
|
c7ffa4 |
break;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
@@ -967,9 +969,9 @@ struct mlx5_args {
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
error:
|
|
|
c7ffa4 |
if (attr_ctx)
|
|
|
c7ffa4 |
- claim_zero(ibv_close_device(attr_ctx));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->close_device(attr_ctx));
|
|
|
c7ffa4 |
if (list)
|
|
|
c7ffa4 |
- ibv_free_device_list(list);
|
|
|
c7ffa4 |
+ mlx5_glue->free_device_list(list);
|
|
|
c7ffa4 |
assert(err >= 0);
|
|
|
c7ffa4 |
return -err;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
@@ -1040,7 +1042,7 @@ struct mlx5_args {
|
|
|
c7ffa4 |
/* Match the size of Rx completion entry to the size of a cacheline. */
|
|
|
c7ffa4 |
if (RTE_CACHE_LINE_SIZE == 128)
|
|
|
c7ffa4 |
setenv("MLX5_CQE_SIZE", "128", 0);
|
|
|
c7ffa4 |
- ibv_fork_init();
|
|
|
c7ffa4 |
+ mlx5_glue->fork_init();
|
|
|
c7ffa4 |
rte_pci_register(&mlx5_driver);
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
|
|
|
c7ffa4 |
index a3cef68..5620cce 100644
|
|
|
c7ffa4 |
--- a/drivers/net/mlx5/mlx5_ethdev.c
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5_ethdev.c
|
|
|
c7ffa4 |
@@ -64,6 +64,7 @@
|
|
|
c7ffa4 |
#include <rte_malloc.h>
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
#include "mlx5.h"
|
|
|
c7ffa4 |
+#include "mlx5_glue.h"
|
|
|
c7ffa4 |
#include "mlx5_rxtx.h"
|
|
|
c7ffa4 |
#include "mlx5_utils.h"
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
@@ -1191,7 +1192,7 @@ struct priv *
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/* Read all message and acknowledge them. */
|
|
|
c7ffa4 |
for (;;) {
|
|
|
c7ffa4 |
- if (ibv_get_async_event(priv->ctx, &event))
|
|
|
c7ffa4 |
+ if (mlx5_glue->get_async_event(priv->ctx, &event))
|
|
|
c7ffa4 |
break;
|
|
|
c7ffa4 |
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
|
|
|
c7ffa4 |
event.event_type == IBV_EVENT_PORT_ERR) &&
|
|
|
c7ffa4 |
@@ -1203,7 +1204,7 @@ struct priv *
|
|
|
c7ffa4 |
else
|
|
|
c7ffa4 |
DEBUG("event type %d on port %d not handled",
|
|
|
c7ffa4 |
event.event_type, event.element.port_num);
|
|
|
c7ffa4 |
- ibv_ack_async_event(&event);
|
|
|
c7ffa4 |
+ mlx5_glue->ack_async_event(&event);
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
|
|
|
c7ffa4 |
if (priv_link_status_update(priv))
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
|
|
|
c7ffa4 |
index f32dfdd..fb85877 100644
|
|
|
c7ffa4 |
--- a/drivers/net/mlx5/mlx5_flow.c
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5_flow.c
|
|
|
c7ffa4 |
@@ -51,6 +51,7 @@
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
#include "mlx5.h"
|
|
|
c7ffa4 |
#include "mlx5_prm.h"
|
|
|
c7ffa4 |
+#include "mlx5_glue.h"
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/* Define minimal priority for control plane flows. */
|
|
|
c7ffa4 |
#define MLX5_CTRL_FLOW_PRIORITY 4
|
|
|
c7ffa4 |
@@ -60,22 +61,9 @@
|
|
|
c7ffa4 |
#define MLX5_IPV6 6
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
|
|
c7ffa4 |
-struct ibv_counter_set_init_attr {
|
|
|
c7ffa4 |
- int dummy;
|
|
|
c7ffa4 |
-};
|
|
|
c7ffa4 |
struct ibv_flow_spec_counter_action {
|
|
|
c7ffa4 |
int dummy;
|
|
|
c7ffa4 |
};
|
|
|
c7ffa4 |
-struct ibv_counter_set {
|
|
|
c7ffa4 |
- int dummy;
|
|
|
c7ffa4 |
-};
|
|
|
c7ffa4 |
-
|
|
|
c7ffa4 |
-static inline int
|
|
|
c7ffa4 |
-ibv_destroy_counter_set(struct ibv_counter_set *cs)
|
|
|
c7ffa4 |
-{
|
|
|
c7ffa4 |
- (void)cs;
|
|
|
c7ffa4 |
- return -ENOTSUP;
|
|
|
c7ffa4 |
-}
|
|
|
c7ffa4 |
#endif
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/* Dev ops structure defined in mlx5.c */
|
|
|
c7ffa4 |
@@ -1664,7 +1652,7 @@ struct ibv_spec_header {
|
|
|
c7ffa4 |
};
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
init_attr.counter_set_id = 0;
|
|
|
c7ffa4 |
- parser->cs = ibv_create_counter_set(priv->ctx, &init_attr);
|
|
|
c7ffa4 |
+ parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
|
|
|
c7ffa4 |
if (!parser->cs)
|
|
|
c7ffa4 |
return EINVAL;
|
|
|
c7ffa4 |
counter.counter_set_handle = parser->cs->handle;
|
|
|
c7ffa4 |
@@ -1715,8 +1703,8 @@ struct ibv_spec_header {
|
|
|
c7ffa4 |
if (!priv->dev->data->dev_started)
|
|
|
c7ffa4 |
return 0;
|
|
|
c7ffa4 |
parser->drop_q.ibv_attr = NULL;
|
|
|
c7ffa4 |
- flow->drxq.ibv_flow = ibv_create_flow(priv->flow_drop_queue->qp,
|
|
|
c7ffa4 |
- flow->drxq.ibv_attr);
|
|
|
c7ffa4 |
+ flow->drxq.ibv_flow = mlx5_glue->create_flow(priv->flow_drop_queue->qp,
|
|
|
c7ffa4 |
+ flow->drxq.ibv_attr);
|
|
|
c7ffa4 |
if (!flow->drxq.ibv_flow) {
|
|
|
c7ffa4 |
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
|
|
|
c7ffa4 |
NULL, "flow rule creation failure");
|
|
|
c7ffa4 |
@@ -1727,7 +1715,7 @@ struct ibv_spec_header {
|
|
|
c7ffa4 |
error:
|
|
|
c7ffa4 |
assert(flow);
|
|
|
c7ffa4 |
if (flow->drxq.ibv_flow) {
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_flow(flow->drxq.ibv_flow));
|
|
|
c7ffa4 |
flow->drxq.ibv_flow = NULL;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (flow->drxq.ibv_attr) {
|
|
|
c7ffa4 |
@@ -1735,7 +1723,7 @@ struct ibv_spec_header {
|
|
|
c7ffa4 |
flow->drxq.ibv_attr = NULL;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (flow->cs) {
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_counter_set(flow->cs));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
|
|
|
c7ffa4 |
flow->cs = NULL;
|
|
|
c7ffa4 |
parser->cs = NULL;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
@@ -1839,8 +1827,8 @@ struct ibv_spec_header {
|
|
|
c7ffa4 |
if (!flow->frxq[i].hrxq)
|
|
|
c7ffa4 |
continue;
|
|
|
c7ffa4 |
flow->frxq[i].ibv_flow =
|
|
|
c7ffa4 |
- ibv_create_flow(flow->frxq[i].hrxq->qp,
|
|
|
c7ffa4 |
- flow->frxq[i].ibv_attr);
|
|
|
c7ffa4 |
+ mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
|
|
|
c7ffa4 |
+ flow->frxq[i].ibv_attr);
|
|
|
c7ffa4 |
if (!flow->frxq[i].ibv_flow) {
|
|
|
c7ffa4 |
rte_flow_error_set(error, ENOMEM,
|
|
|
c7ffa4 |
RTE_FLOW_ERROR_TYPE_HANDLE,
|
|
|
c7ffa4 |
@@ -1866,7 +1854,7 @@ struct ibv_spec_header {
|
|
|
c7ffa4 |
if (flow->frxq[i].ibv_flow) {
|
|
|
c7ffa4 |
struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_flow(ibv_flow));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_flow(ibv_flow));
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (flow->frxq[i].hrxq)
|
|
|
c7ffa4 |
mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
|
|
|
c7ffa4 |
@@ -1874,7 +1862,7 @@ struct ibv_spec_header {
|
|
|
c7ffa4 |
rte_free(flow->frxq[i].ibv_attr);
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (flow->cs) {
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_counter_set(flow->cs));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
|
|
|
c7ffa4 |
flow->cs = NULL;
|
|
|
c7ffa4 |
parser->cs = NULL;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
@@ -2056,14 +2044,16 @@ struct rte_flow *
|
|
|
c7ffa4 |
free:
|
|
|
c7ffa4 |
if (flow->drop) {
|
|
|
c7ffa4 |
if (flow->drxq.ibv_flow)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_flow
|
|
|
c7ffa4 |
+ (flow->drxq.ibv_flow));
|
|
|
c7ffa4 |
rte_free(flow->drxq.ibv_attr);
|
|
|
c7ffa4 |
} else {
|
|
|
c7ffa4 |
for (i = 0; i != hash_rxq_init_n; ++i) {
|
|
|
c7ffa4 |
struct mlx5_flow *frxq = &flow->frxq[i];
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
if (frxq->ibv_flow)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_flow(frxq->ibv_flow));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_flow
|
|
|
c7ffa4 |
+ (frxq->ibv_flow));
|
|
|
c7ffa4 |
if (frxq->hrxq)
|
|
|
c7ffa4 |
mlx5_priv_hrxq_release(priv, frxq->hrxq);
|
|
|
c7ffa4 |
if (frxq->ibv_attr)
|
|
|
c7ffa4 |
@@ -2071,7 +2061,7 @@ struct rte_flow *
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (flow->cs) {
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_counter_set(flow->cs));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
|
|
|
c7ffa4 |
flow->cs = NULL;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
TAILQ_REMOVE(list, flow, next);
|
|
|
c7ffa4 |
@@ -2119,35 +2109,38 @@ struct rte_flow *
|
|
|
c7ffa4 |
WARN("cannot allocate memory for drop queue");
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
- fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
|
|
|
c7ffa4 |
+ fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
|
|
|
c7ffa4 |
if (!fdq->cq) {
|
|
|
c7ffa4 |
WARN("cannot allocate CQ for drop queue");
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
- fdq->wq = ibv_create_wq(priv->ctx,
|
|
|
c7ffa4 |
- &(struct ibv_wq_init_attr){
|
|
|
c7ffa4 |
+ fdq->wq = mlx5_glue->create_wq
|
|
|
c7ffa4 |
+ (priv->ctx,
|
|
|
c7ffa4 |
+ &(struct ibv_wq_init_attr){
|
|
|
c7ffa4 |
.wq_type = IBV_WQT_RQ,
|
|
|
c7ffa4 |
.max_wr = 1,
|
|
|
c7ffa4 |
.max_sge = 1,
|
|
|
c7ffa4 |
.pd = priv->pd,
|
|
|
c7ffa4 |
.cq = fdq->cq,
|
|
|
c7ffa4 |
- });
|
|
|
c7ffa4 |
+ });
|
|
|
c7ffa4 |
if (!fdq->wq) {
|
|
|
c7ffa4 |
WARN("cannot allocate WQ for drop queue");
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
- fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
|
|
|
c7ffa4 |
- &(struct ibv_rwq_ind_table_init_attr){
|
|
|
c7ffa4 |
+ fdq->ind_table = mlx5_glue->create_rwq_ind_table
|
|
|
c7ffa4 |
+ (priv->ctx,
|
|
|
c7ffa4 |
+ &(struct ibv_rwq_ind_table_init_attr){
|
|
|
c7ffa4 |
.log_ind_tbl_size = 0,
|
|
|
c7ffa4 |
.ind_tbl = &fdq->wq,
|
|
|
c7ffa4 |
.comp_mask = 0,
|
|
|
c7ffa4 |
- });
|
|
|
c7ffa4 |
+ });
|
|
|
c7ffa4 |
if (!fdq->ind_table) {
|
|
|
c7ffa4 |
WARN("cannot allocate indirection table for drop queue");
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
- fdq->qp = ibv_create_qp_ex(priv->ctx,
|
|
|
c7ffa4 |
- &(struct ibv_qp_init_attr_ex){
|
|
|
c7ffa4 |
+ fdq->qp = mlx5_glue->create_qp_ex
|
|
|
c7ffa4 |
+ (priv->ctx,
|
|
|
c7ffa4 |
+ &(struct ibv_qp_init_attr_ex){
|
|
|
c7ffa4 |
.qp_type = IBV_QPT_RAW_PACKET,
|
|
|
c7ffa4 |
.comp_mask =
|
|
|
c7ffa4 |
IBV_QP_INIT_ATTR_PD |
|
|
|
c7ffa4 |
@@ -2162,7 +2155,7 @@ struct rte_flow *
|
|
|
c7ffa4 |
},
|
|
|
c7ffa4 |
.rwq_ind_tbl = fdq->ind_table,
|
|
|
c7ffa4 |
.pd = priv->pd
|
|
|
c7ffa4 |
- });
|
|
|
c7ffa4 |
+ });
|
|
|
c7ffa4 |
if (!fdq->qp) {
|
|
|
c7ffa4 |
WARN("cannot allocate QP for drop queue");
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
@@ -2171,13 +2164,13 @@ struct rte_flow *
|
|
|
c7ffa4 |
return 0;
|
|
|
c7ffa4 |
error:
|
|
|
c7ffa4 |
if (fdq->qp)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_qp(fdq->qp));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_qp(fdq->qp));
|
|
|
c7ffa4 |
if (fdq->ind_table)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
|
|
|
c7ffa4 |
if (fdq->wq)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_wq(fdq->wq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_wq(fdq->wq));
|
|
|
c7ffa4 |
if (fdq->cq)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_cq(fdq->cq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_cq(fdq->cq));
|
|
|
c7ffa4 |
if (fdq)
|
|
|
c7ffa4 |
rte_free(fdq);
|
|
|
c7ffa4 |
priv->flow_drop_queue = NULL;
|
|
|
c7ffa4 |
@@ -2198,13 +2191,13 @@ struct rte_flow *
|
|
|
c7ffa4 |
if (!fdq)
|
|
|
c7ffa4 |
return;
|
|
|
c7ffa4 |
if (fdq->qp)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_qp(fdq->qp));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_qp(fdq->qp));
|
|
|
c7ffa4 |
if (fdq->ind_table)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
|
|
|
c7ffa4 |
if (fdq->wq)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_wq(fdq->wq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_wq(fdq->wq));
|
|
|
c7ffa4 |
if (fdq->cq)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_cq(fdq->cq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_cq(fdq->cq));
|
|
|
c7ffa4 |
rte_free(fdq);
|
|
|
c7ffa4 |
priv->flow_drop_queue = NULL;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
@@ -2228,7 +2221,8 @@ struct rte_flow *
|
|
|
c7ffa4 |
if (flow->drop) {
|
|
|
c7ffa4 |
if (!flow->drxq.ibv_flow)
|
|
|
c7ffa4 |
continue;
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_flow
|
|
|
c7ffa4 |
+ (flow->drxq.ibv_flow));
|
|
|
c7ffa4 |
flow->drxq.ibv_flow = NULL;
|
|
|
c7ffa4 |
/* Next flow. */
|
|
|
c7ffa4 |
continue;
|
|
|
c7ffa4 |
@@ -2248,7 +2242,8 @@ struct rte_flow *
|
|
|
c7ffa4 |
for (i = 0; i != hash_rxq_init_n; ++i) {
|
|
|
c7ffa4 |
if (!flow->frxq[i].ibv_flow)
|
|
|
c7ffa4 |
continue;
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_flow
|
|
|
c7ffa4 |
+ (flow->frxq[i].ibv_flow));
|
|
|
c7ffa4 |
flow->frxq[i].ibv_flow = NULL;
|
|
|
c7ffa4 |
mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
|
|
|
c7ffa4 |
flow->frxq[i].hrxq = NULL;
|
|
|
c7ffa4 |
@@ -2278,8 +2273,9 @@ struct rte_flow *
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
if (flow->drop) {
|
|
|
c7ffa4 |
flow->drxq.ibv_flow =
|
|
|
c7ffa4 |
- ibv_create_flow(priv->flow_drop_queue->qp,
|
|
|
c7ffa4 |
- flow->drxq.ibv_attr);
|
|
|
c7ffa4 |
+ mlx5_glue->create_flow
|
|
|
c7ffa4 |
+ (priv->flow_drop_queue->qp,
|
|
|
c7ffa4 |
+ flow->drxq.ibv_attr);
|
|
|
c7ffa4 |
if (!flow->drxq.ibv_flow) {
|
|
|
c7ffa4 |
DEBUG("Flow %p cannot be applied",
|
|
|
c7ffa4 |
(void *)flow);
|
|
|
c7ffa4 |
@@ -2315,8 +2311,8 @@ struct rte_flow *
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
flow_create:
|
|
|
c7ffa4 |
flow->frxq[i].ibv_flow =
|
|
|
c7ffa4 |
- ibv_create_flow(flow->frxq[i].hrxq->qp,
|
|
|
c7ffa4 |
- flow->frxq[i].ibv_attr);
|
|
|
c7ffa4 |
+ mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
|
|
|
c7ffa4 |
+ flow->frxq[i].ibv_attr);
|
|
|
c7ffa4 |
if (!flow->frxq[i].ibv_flow) {
|
|
|
c7ffa4 |
DEBUG("Flow %p cannot be applied",
|
|
|
c7ffa4 |
(void *)flow);
|
|
|
c7ffa4 |
@@ -2523,7 +2519,7 @@ struct rte_flow *
|
|
|
c7ffa4 |
.out = counters,
|
|
|
c7ffa4 |
.outlen = 2 * sizeof(uint64_t),
|
|
|
c7ffa4 |
};
|
|
|
c7ffa4 |
- int res = ibv_query_counter_set(&query_cs_attr, &query_out);
|
|
|
c7ffa4 |
+ int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
if (res) {
|
|
|
c7ffa4 |
rte_flow_error_set(error, -res,
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c
|
|
|
c7ffa4 |
new file mode 100644
|
|
|
c7ffa4 |
index 0000000..ff48c1e
|
|
|
c7ffa4 |
--- /dev/null
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5_glue.c
|
|
|
c7ffa4 |
@@ -0,0 +1,359 @@
|
|
|
c7ffa4 |
+/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
c7ffa4 |
+ * Copyright 2018 6WIND S.A.
|
|
|
c7ffa4 |
+ * Copyright 2018 Mellanox Technologies, Ltd.
|
|
|
c7ffa4 |
+ */
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+#include <errno.h>
|
|
|
c7ffa4 |
+#include <stddef.h>
|
|
|
c7ffa4 |
+#include <stdint.h>
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+/* Verbs headers do not support -pedantic. */
|
|
|
c7ffa4 |
+#ifdef PEDANTIC
|
|
|
c7ffa4 |
+#pragma GCC diagnostic ignored "-Wpedantic"
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+#include <infiniband/mlx5dv.h>
|
|
|
c7ffa4 |
+#include <infiniband/verbs.h>
|
|
|
c7ffa4 |
+#ifdef PEDANTIC
|
|
|
c7ffa4 |
+#pragma GCC diagnostic error "-Wpedantic"
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+#include "mlx5_autoconf.h"
|
|
|
c7ffa4 |
+#include "mlx5_glue.h"
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_fork_init(void)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_fork_init();
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_pd *
|
|
|
c7ffa4 |
+mlx5_glue_alloc_pd(struct ibv_context *context)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_alloc_pd(context);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_dealloc_pd(struct ibv_pd *pd)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_dealloc_pd(pd);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_device **
|
|
|
c7ffa4 |
+mlx5_glue_get_device_list(int *num_devices)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_get_device_list(num_devices);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static void
|
|
|
c7ffa4 |
+mlx5_glue_free_device_list(struct ibv_device **list)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ ibv_free_device_list(list);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_context *
|
|
|
c7ffa4 |
+mlx5_glue_open_device(struct ibv_device *device)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_open_device(device);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_close_device(struct ibv_context *context)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_close_device(context);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static const char *
|
|
|
c7ffa4 |
+mlx5_glue_get_device_name(struct ibv_device *device)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_get_device_name(device);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_query_device(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_device_attr *device_attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_query_device(context, device_attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_query_device_ex(struct ibv_context *context,
|
|
|
c7ffa4 |
+ const struct ibv_query_device_ex_input *input,
|
|
|
c7ffa4 |
+ struct ibv_device_attr_ex *attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_query_device_ex(context, input, attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,
|
|
|
c7ffa4 |
+ struct ibv_port_attr *port_attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_query_port(context, port_num, port_attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_comp_channel *
|
|
|
c7ffa4 |
+mlx5_glue_create_comp_channel(struct ibv_context *context)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_create_comp_channel(context);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_destroy_comp_channel(channel);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_cq *
|
|
|
c7ffa4 |
+mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
|
|
|
c7ffa4 |
+ struct ibv_comp_channel *channel, int comp_vector)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_destroy_cq(struct ibv_cq *cq)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_destroy_cq(cq);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
|
|
|
c7ffa4 |
+ void **cq_context)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_get_cq_event(channel, cq, cq_context);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static void
|
|
|
c7ffa4 |
+mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ ibv_ack_cq_events(cq, nevents);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_rwq_ind_table *
|
|
|
c7ffa4 |
+mlx5_glue_create_rwq_ind_table(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_rwq_ind_table_init_attr *init_attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_create_rwq_ind_table(context, init_attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_destroy_rwq_ind_table(rwq_ind_table);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_wq *
|
|
|
c7ffa4 |
+mlx5_glue_create_wq(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_wq_init_attr *wq_init_attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_create_wq(context, wq_init_attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_destroy_wq(struct ibv_wq *wq)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_destroy_wq(wq);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_modify_wq(wq, wq_attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_flow *
|
|
|
c7ffa4 |
+mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_create_flow(qp, flow);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_destroy_flow(struct ibv_flow *flow_id)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_destroy_flow(flow_id);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_qp *
|
|
|
c7ffa4 |
+mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_create_qp(pd, qp_init_attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_qp *
|
|
|
c7ffa4 |
+mlx5_glue_create_qp_ex(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_create_qp_ex(context, qp_init_attr_ex);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_destroy_qp(struct ibv_qp *qp)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_destroy_qp(qp);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_modify_qp(qp, attr, attr_mask);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_mr *
|
|
|
c7ffa4 |
+mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_reg_mr(pd, addr, length, access);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_dereg_mr(struct ibv_mr *mr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_dereg_mr(mr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_counter_set *
|
|
|
c7ffa4 |
+mlx5_glue_create_counter_set(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_counter_set_init_attr *init_attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
|
|
c7ffa4 |
+ (void)context;
|
|
|
c7ffa4 |
+ (void)init_attr;
|
|
|
c7ffa4 |
+ return NULL;
|
|
|
c7ffa4 |
+#else
|
|
|
c7ffa4 |
+ return ibv_create_counter_set(context, init_attr);
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
|
|
c7ffa4 |
+ (void)cs;
|
|
|
c7ffa4 |
+ return ENOTSUP;
|
|
|
c7ffa4 |
+#else
|
|
|
c7ffa4 |
+ return ibv_destroy_counter_set(cs);
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_describe_counter_set(struct ibv_context *context,
|
|
|
c7ffa4 |
+ uint16_t counter_set_id,
|
|
|
c7ffa4 |
+ struct ibv_counter_set_description *cs_desc)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
|
|
c7ffa4 |
+ (void)context;
|
|
|
c7ffa4 |
+ (void)counter_set_id;
|
|
|
c7ffa4 |
+ (void)cs_desc;
|
|
|
c7ffa4 |
+ return ENOTSUP;
|
|
|
c7ffa4 |
+#else
|
|
|
c7ffa4 |
+ return ibv_describe_counter_set(context, counter_set_id, cs_desc);
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,
|
|
|
c7ffa4 |
+ struct ibv_counter_set_data *cs_data)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
|
|
c7ffa4 |
+ (void)query_attr;
|
|
|
c7ffa4 |
+ (void)cs_data;
|
|
|
c7ffa4 |
+ return ENOTSUP;
|
|
|
c7ffa4 |
+#else
|
|
|
c7ffa4 |
+ return ibv_query_counter_set(query_attr, cs_data);
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static void
|
|
|
c7ffa4 |
+mlx5_glue_ack_async_event(struct ibv_async_event *event)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ ibv_ack_async_event(event);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_get_async_event(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_async_event *event)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_get_async_event(context, event);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static const char *
|
|
|
c7ffa4 |
+mlx5_glue_port_state_str(enum ibv_port_state port_state)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_port_state_str(port_state);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_cq *
|
|
|
c7ffa4 |
+mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return ibv_cq_ex_to_cq(cq);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static struct ibv_cq_ex *
|
|
|
c7ffa4 |
+mlx5_glue_dv_create_cq(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_cq_init_attr_ex *cq_attr,
|
|
|
c7ffa4 |
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_dv_query_device(struct ibv_context *ctx,
|
|
|
c7ffa4 |
+ struct mlx5dv_context *attrs_out)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return mlx5dv_query_device(ctx, attrs_out);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,
|
|
|
c7ffa4 |
+ enum mlx5dv_set_ctx_attr_type type, void *attr)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return mlx5dv_set_context_attr(ibv_ctx, type, attr);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+static int
|
|
|
c7ffa4 |
+mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)
|
|
|
c7ffa4 |
+{
|
|
|
c7ffa4 |
+ return mlx5dv_init_obj(obj, obj_type);
|
|
|
c7ffa4 |
+}
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
|
|
|
c7ffa4 |
+ .fork_init = mlx5_glue_fork_init,
|
|
|
c7ffa4 |
+ .alloc_pd = mlx5_glue_alloc_pd,
|
|
|
c7ffa4 |
+ .dealloc_pd = mlx5_glue_dealloc_pd,
|
|
|
c7ffa4 |
+ .get_device_list = mlx5_glue_get_device_list,
|
|
|
c7ffa4 |
+ .free_device_list = mlx5_glue_free_device_list,
|
|
|
c7ffa4 |
+ .open_device = mlx5_glue_open_device,
|
|
|
c7ffa4 |
+ .close_device = mlx5_glue_close_device,
|
|
|
c7ffa4 |
+ .get_device_name = mlx5_glue_get_device_name,
|
|
|
c7ffa4 |
+ .query_device = mlx5_glue_query_device,
|
|
|
c7ffa4 |
+ .query_device_ex = mlx5_glue_query_device_ex,
|
|
|
c7ffa4 |
+ .query_port = mlx5_glue_query_port,
|
|
|
c7ffa4 |
+ .create_comp_channel = mlx5_glue_create_comp_channel,
|
|
|
c7ffa4 |
+ .destroy_comp_channel = mlx5_glue_destroy_comp_channel,
|
|
|
c7ffa4 |
+ .create_cq = mlx5_glue_create_cq,
|
|
|
c7ffa4 |
+ .destroy_cq = mlx5_glue_destroy_cq,
|
|
|
c7ffa4 |
+ .get_cq_event = mlx5_glue_get_cq_event,
|
|
|
c7ffa4 |
+ .ack_cq_events = mlx5_glue_ack_cq_events,
|
|
|
c7ffa4 |
+ .create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,
|
|
|
c7ffa4 |
+ .destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,
|
|
|
c7ffa4 |
+ .create_wq = mlx5_glue_create_wq,
|
|
|
c7ffa4 |
+ .destroy_wq = mlx5_glue_destroy_wq,
|
|
|
c7ffa4 |
+ .modify_wq = mlx5_glue_modify_wq,
|
|
|
c7ffa4 |
+ .create_flow = mlx5_glue_create_flow,
|
|
|
c7ffa4 |
+ .destroy_flow = mlx5_glue_destroy_flow,
|
|
|
c7ffa4 |
+ .create_qp = mlx5_glue_create_qp,
|
|
|
c7ffa4 |
+ .create_qp_ex = mlx5_glue_create_qp_ex,
|
|
|
c7ffa4 |
+ .destroy_qp = mlx5_glue_destroy_qp,
|
|
|
c7ffa4 |
+ .modify_qp = mlx5_glue_modify_qp,
|
|
|
c7ffa4 |
+ .reg_mr = mlx5_glue_reg_mr,
|
|
|
c7ffa4 |
+ .dereg_mr = mlx5_glue_dereg_mr,
|
|
|
c7ffa4 |
+ .create_counter_set = mlx5_glue_create_counter_set,
|
|
|
c7ffa4 |
+ .destroy_counter_set = mlx5_glue_destroy_counter_set,
|
|
|
c7ffa4 |
+ .describe_counter_set = mlx5_glue_describe_counter_set,
|
|
|
c7ffa4 |
+ .query_counter_set = mlx5_glue_query_counter_set,
|
|
|
c7ffa4 |
+ .ack_async_event = mlx5_glue_ack_async_event,
|
|
|
c7ffa4 |
+ .get_async_event = mlx5_glue_get_async_event,
|
|
|
c7ffa4 |
+ .port_state_str = mlx5_glue_port_state_str,
|
|
|
c7ffa4 |
+ .cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
|
|
|
c7ffa4 |
+ .dv_create_cq = mlx5_glue_dv_create_cq,
|
|
|
c7ffa4 |
+ .dv_query_device = mlx5_glue_dv_query_device,
|
|
|
c7ffa4 |
+ .dv_set_context_attr = mlx5_glue_dv_set_context_attr,
|
|
|
c7ffa4 |
+ .dv_init_obj = mlx5_glue_dv_init_obj,
|
|
|
c7ffa4 |
+};
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h
|
|
|
c7ffa4 |
new file mode 100644
|
|
|
c7ffa4 |
index 0000000..67bd8d0
|
|
|
c7ffa4 |
--- /dev/null
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5_glue.h
|
|
|
c7ffa4 |
@@ -0,0 +1,107 @@
|
|
|
c7ffa4 |
+/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
c7ffa4 |
+ * Copyright 2018 6WIND S.A.
|
|
|
c7ffa4 |
+ * Copyright 2018 Mellanox Technologies, Ltd.
|
|
|
c7ffa4 |
+ */
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+#ifndef MLX5_GLUE_H_
|
|
|
c7ffa4 |
+#define MLX5_GLUE_H_
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+#include <stdint.h>
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+/* Verbs headers do not support -pedantic. */
|
|
|
c7ffa4 |
+#ifdef PEDANTIC
|
|
|
c7ffa4 |
+#pragma GCC diagnostic ignored "-Wpedantic"
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+#include <infiniband/mlx5dv.h>
|
|
|
c7ffa4 |
+#include <infiniband/verbs.h>
|
|
|
c7ffa4 |
+#ifdef PEDANTIC
|
|
|
c7ffa4 |
+#pragma GCC diagnostic error "-Wpedantic"
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
|
|
|
c7ffa4 |
+struct ibv_counter_set;
|
|
|
c7ffa4 |
+struct ibv_counter_set_data;
|
|
|
c7ffa4 |
+struct ibv_counter_set_description;
|
|
|
c7ffa4 |
+struct ibv_counter_set_init_attr;
|
|
|
c7ffa4 |
+struct ibv_query_counter_set_attr;
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+struct mlx5_glue {
|
|
|
c7ffa4 |
+ int (*fork_init)(void);
|
|
|
c7ffa4 |
+ struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
|
|
|
c7ffa4 |
+ int (*dealloc_pd)(struct ibv_pd *pd);
|
|
|
c7ffa4 |
+ struct ibv_device **(*get_device_list)(int *num_devices);
|
|
|
c7ffa4 |
+ void (*free_device_list)(struct ibv_device **list);
|
|
|
c7ffa4 |
+ struct ibv_context *(*open_device)(struct ibv_device *device);
|
|
|
c7ffa4 |
+ int (*close_device)(struct ibv_context *context);
|
|
|
c7ffa4 |
+ const char *(*get_device_name)(struct ibv_device *device);
|
|
|
c7ffa4 |
+ int (*query_device)(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_device_attr *device_attr);
|
|
|
c7ffa4 |
+ int (*query_device_ex)(struct ibv_context *context,
|
|
|
c7ffa4 |
+ const struct ibv_query_device_ex_input *input,
|
|
|
c7ffa4 |
+ struct ibv_device_attr_ex *attr);
|
|
|
c7ffa4 |
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
|
|
|
c7ffa4 |
+ struct ibv_port_attr *port_attr);
|
|
|
c7ffa4 |
+ struct ibv_comp_channel *(*create_comp_channel)
|
|
|
c7ffa4 |
+ (struct ibv_context *context);
|
|
|
c7ffa4 |
+ int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
|
|
|
c7ffa4 |
+ struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
|
|
|
c7ffa4 |
+ void *cq_context,
|
|
|
c7ffa4 |
+ struct ibv_comp_channel *channel,
|
|
|
c7ffa4 |
+ int comp_vector);
|
|
|
c7ffa4 |
+ int (*destroy_cq)(struct ibv_cq *cq);
|
|
|
c7ffa4 |
+ int (*get_cq_event)(struct ibv_comp_channel *channel,
|
|
|
c7ffa4 |
+ struct ibv_cq **cq, void **cq_context);
|
|
|
c7ffa4 |
+ void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
|
|
|
c7ffa4 |
+ struct ibv_rwq_ind_table *(*create_rwq_ind_table)
|
|
|
c7ffa4 |
+ (struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_rwq_ind_table_init_attr *init_attr);
|
|
|
c7ffa4 |
+ int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
|
|
|
c7ffa4 |
+ struct ibv_wq *(*create_wq)(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_wq_init_attr *wq_init_attr);
|
|
|
c7ffa4 |
+ int (*destroy_wq)(struct ibv_wq *wq);
|
|
|
c7ffa4 |
+ int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
|
|
|
c7ffa4 |
+ struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
|
|
|
c7ffa4 |
+ struct ibv_flow_attr *flow);
|
|
|
c7ffa4 |
+ int (*destroy_flow)(struct ibv_flow *flow_id);
|
|
|
c7ffa4 |
+ struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
|
|
|
c7ffa4 |
+ struct ibv_qp_init_attr *qp_init_attr);
|
|
|
c7ffa4 |
+ struct ibv_qp *(*create_qp_ex)
|
|
|
c7ffa4 |
+ (struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex);
|
|
|
c7ffa4 |
+ int (*destroy_qp)(struct ibv_qp *qp);
|
|
|
c7ffa4 |
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
|
|
|
c7ffa4 |
+ int attr_mask);
|
|
|
c7ffa4 |
+ struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
|
|
|
c7ffa4 |
+ size_t length, int access);
|
|
|
c7ffa4 |
+ int (*dereg_mr)(struct ibv_mr *mr);
|
|
|
c7ffa4 |
+ struct ibv_counter_set *(*create_counter_set)
|
|
|
c7ffa4 |
+ (struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_counter_set_init_attr *init_attr);
|
|
|
c7ffa4 |
+ int (*destroy_counter_set)(struct ibv_counter_set *cs);
|
|
|
c7ffa4 |
+ int (*describe_counter_set)
|
|
|
c7ffa4 |
+ (struct ibv_context *context,
|
|
|
c7ffa4 |
+ uint16_t counter_set_id,
|
|
|
c7ffa4 |
+ struct ibv_counter_set_description *cs_desc);
|
|
|
c7ffa4 |
+ int (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,
|
|
|
c7ffa4 |
+ struct ibv_counter_set_data *cs_data);
|
|
|
c7ffa4 |
+ void (*ack_async_event)(struct ibv_async_event *event);
|
|
|
c7ffa4 |
+ int (*get_async_event)(struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_async_event *event);
|
|
|
c7ffa4 |
+ const char *(*port_state_str)(enum ibv_port_state port_state);
|
|
|
c7ffa4 |
+ struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
|
|
|
c7ffa4 |
+ struct ibv_cq_ex *(*dv_create_cq)
|
|
|
c7ffa4 |
+ (struct ibv_context *context,
|
|
|
c7ffa4 |
+ struct ibv_cq_init_attr_ex *cq_attr,
|
|
|
c7ffa4 |
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr);
|
|
|
c7ffa4 |
+ int (*dv_query_device)(struct ibv_context *ctx_in,
|
|
|
c7ffa4 |
+ struct mlx5dv_context *attrs_out);
|
|
|
c7ffa4 |
+ int (*dv_set_context_attr)(struct ibv_context *ibv_ctx,
|
|
|
c7ffa4 |
+ enum mlx5dv_set_ctx_attr_type type,
|
|
|
c7ffa4 |
+ void *attr);
|
|
|
c7ffa4 |
+ int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);
|
|
|
c7ffa4 |
+};
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+const struct mlx5_glue *mlx5_glue;
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
+#endif /* MLX5_GLUE_H_ */
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
|
|
|
c7ffa4 |
index 6b29eed..dea540a 100644
|
|
|
c7ffa4 |
--- a/drivers/net/mlx5/mlx5_mr.c
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5_mr.c
|
|
|
c7ffa4 |
@@ -46,6 +46,7 @@
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
#include "mlx5.h"
|
|
|
c7ffa4 |
#include "mlx5_rxtx.h"
|
|
|
c7ffa4 |
+#include "mlx5_glue.h"
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
struct mlx5_check_mempool_data {
|
|
|
c7ffa4 |
int ret;
|
|
|
c7ffa4 |
@@ -305,8 +306,8 @@ struct mlx5_mr*
|
|
|
c7ffa4 |
DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
|
|
|
c7ffa4 |
(void *)mp, (void *)start, (void *)end,
|
|
|
c7ffa4 |
(size_t)(end - start));
|
|
|
c7ffa4 |
- mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
|
|
|
c7ffa4 |
- IBV_ACCESS_LOCAL_WRITE);
|
|
|
c7ffa4 |
+ mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
|
|
|
c7ffa4 |
+ IBV_ACCESS_LOCAL_WRITE);
|
|
|
c7ffa4 |
mr->mp = mp;
|
|
|
c7ffa4 |
mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
|
|
|
c7ffa4 |
mr->start = start;
|
|
|
c7ffa4 |
@@ -364,7 +365,7 @@ struct mlx5_mr*
|
|
|
c7ffa4 |
DEBUG("Memory Region %p refcnt: %d",
|
|
|
c7ffa4 |
(void *)mr, rte_atomic32_read(&mr->refcnt));
|
|
|
c7ffa4 |
if (rte_atomic32_dec_and_test(&mr->refcnt)) {
|
|
|
c7ffa4 |
- claim_zero(ibv_dereg_mr(mr->mr));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->dereg_mr(mr->mr));
|
|
|
c7ffa4 |
LIST_REMOVE(mr, next);
|
|
|
c7ffa4 |
rte_free(mr);
|
|
|
c7ffa4 |
return 0;
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
|
|
|
c7ffa4 |
index 85399ef..f5778b7 100644
|
|
|
c7ffa4 |
--- a/drivers/net/mlx5/mlx5_rxq.c
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5_rxq.c
|
|
|
c7ffa4 |
@@ -63,6 +63,7 @@
|
|
|
c7ffa4 |
#include "mlx5_utils.h"
|
|
|
c7ffa4 |
#include "mlx5_autoconf.h"
|
|
|
c7ffa4 |
#include "mlx5_defs.h"
|
|
|
c7ffa4 |
+#include "mlx5_glue.h"
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/* Default RSS hash key also used for ConnectX-3. */
|
|
|
c7ffa4 |
uint8_t rss_hash_default_key[] = {
|
|
|
c7ffa4 |
@@ -526,13 +527,13 @@
|
|
|
c7ffa4 |
ret = EINVAL;
|
|
|
c7ffa4 |
goto exit;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
- ret = ibv_get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
|
|
|
c7ffa4 |
+ ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
|
|
|
c7ffa4 |
if (ret || ev_cq != rxq_ibv->cq) {
|
|
|
c7ffa4 |
ret = EINVAL;
|
|
|
c7ffa4 |
goto exit;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
rxq_data->cq_arm_sn++;
|
|
|
c7ffa4 |
- ibv_ack_cq_events(rxq_ibv->cq, 1);
|
|
|
c7ffa4 |
+ mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
|
|
|
c7ffa4 |
exit:
|
|
|
c7ffa4 |
if (rxq_ibv)
|
|
|
c7ffa4 |
mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
|
|
|
c7ffa4 |
@@ -597,7 +598,7 @@ struct mlx5_rxq_ibv*
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
if (rxq_ctrl->irq) {
|
|
|
c7ffa4 |
- tmpl->channel = ibv_create_comp_channel(priv->ctx);
|
|
|
c7ffa4 |
+ tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
|
|
|
c7ffa4 |
if (!tmpl->channel) {
|
|
|
c7ffa4 |
ERROR("%p: Comp Channel creation failure",
|
|
|
c7ffa4 |
(void *)rxq_ctrl);
|
|
|
c7ffa4 |
@@ -625,8 +626,9 @@ struct mlx5_rxq_ibv*
|
|
|
c7ffa4 |
} else if (priv->cqe_comp && rxq_data->hw_timestamp) {
|
|
|
c7ffa4 |
DEBUG("Rx CQE compression is disabled for HW timestamp");
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
- tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
|
|
|
c7ffa4 |
- &attr.cq.mlx5));
|
|
|
c7ffa4 |
+ tmpl->cq = mlx5_glue->cq_ex_to_cq
|
|
|
c7ffa4 |
+ (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
|
|
|
c7ffa4 |
+ &attr.cq.mlx5));
|
|
|
c7ffa4 |
if (tmpl->cq == NULL) {
|
|
|
c7ffa4 |
ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
@@ -662,7 +664,7 @@ struct mlx5_rxq_ibv*
|
|
|
c7ffa4 |
attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
#endif
|
|
|
c7ffa4 |
- tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq);
|
|
|
c7ffa4 |
+ tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
|
|
|
c7ffa4 |
if (tmpl->wq == NULL) {
|
|
|
c7ffa4 |
ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
@@ -686,7 +688,7 @@ struct mlx5_rxq_ibv*
|
|
|
c7ffa4 |
.attr_mask = IBV_WQ_ATTR_STATE,
|
|
|
c7ffa4 |
.wq_state = IBV_WQS_RDY,
|
|
|
c7ffa4 |
};
|
|
|
c7ffa4 |
- ret = ibv_modify_wq(tmpl->wq, &mod);
|
|
|
c7ffa4 |
+ ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
|
|
|
c7ffa4 |
if (ret) {
|
|
|
c7ffa4 |
ERROR("%p: WQ state to IBV_WQS_RDY failed",
|
|
|
c7ffa4 |
(void *)rxq_ctrl);
|
|
|
c7ffa4 |
@@ -696,7 +698,7 @@ struct mlx5_rxq_ibv*
|
|
|
c7ffa4 |
obj.cq.out = &cq_info;
|
|
|
c7ffa4 |
obj.rwq.in = tmpl->wq;
|
|
|
c7ffa4 |
obj.rwq.out = &rw;;
|
|
|
c7ffa4 |
- ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
|
|
|
c7ffa4 |
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
|
|
|
c7ffa4 |
if (ret != 0)
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
|
|
|
c7ffa4 |
@@ -745,11 +747,11 @@ struct mlx5_rxq_ibv*
|
|
|
c7ffa4 |
return tmpl;
|
|
|
c7ffa4 |
error:
|
|
|
c7ffa4 |
if (tmpl->wq)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_wq(tmpl->wq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
|
|
|
c7ffa4 |
if (tmpl->cq)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_cq(tmpl->cq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
|
|
|
c7ffa4 |
if (tmpl->channel)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_comp_channel(tmpl->channel));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
|
|
|
c7ffa4 |
if (tmpl->mr)
|
|
|
c7ffa4 |
priv_mr_release(priv, tmpl->mr);
|
|
|
c7ffa4 |
return NULL;
|
|
|
c7ffa4 |
@@ -814,10 +816,11 @@ struct mlx5_rxq_ibv*
|
|
|
c7ffa4 |
(void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
|
|
|
c7ffa4 |
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
|
|
|
c7ffa4 |
rxq_free_elts(rxq_ibv->rxq_ctrl);
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_wq(rxq_ibv->wq));
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_cq(rxq_ibv->cq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
|
|
|
c7ffa4 |
if (rxq_ibv->channel)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_comp_channel(rxq_ibv->channel));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_comp_channel
|
|
|
c7ffa4 |
+ (rxq_ibv->channel));
|
|
|
c7ffa4 |
LIST_REMOVE(rxq_ibv, next);
|
|
|
c7ffa4 |
rte_free(rxq_ibv);
|
|
|
c7ffa4 |
return 0;
|
|
|
c7ffa4 |
@@ -1143,13 +1146,13 @@ struct mlx5_ind_table_ibv*
|
|
|
c7ffa4 |
/* Finalise indirection table. */
|
|
|
c7ffa4 |
for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
|
|
|
c7ffa4 |
wq[i] = wq[j];
|
|
|
c7ffa4 |
- ind_tbl->ind_table = ibv_create_rwq_ind_table(
|
|
|
c7ffa4 |
- priv->ctx,
|
|
|
c7ffa4 |
- &(struct ibv_rwq_ind_table_init_attr){
|
|
|
c7ffa4 |
+ ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
|
|
|
c7ffa4 |
+ (priv->ctx,
|
|
|
c7ffa4 |
+ &(struct ibv_rwq_ind_table_init_attr){
|
|
|
c7ffa4 |
.log_ind_tbl_size = wq_n,
|
|
|
c7ffa4 |
.ind_tbl = wq,
|
|
|
c7ffa4 |
.comp_mask = 0,
|
|
|
c7ffa4 |
- });
|
|
|
c7ffa4 |
+ });
|
|
|
c7ffa4 |
if (!ind_tbl->ind_table)
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
rte_atomic32_inc(&ind_tbl->refcnt);
|
|
|
c7ffa4 |
@@ -1221,7 +1224,8 @@ struct mlx5_ind_table_ibv*
|
|
|
c7ffa4 |
DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
|
|
|
c7ffa4 |
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
|
|
|
c7ffa4 |
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_rwq_ind_table
|
|
|
c7ffa4 |
+ (ind_tbl->ind_table));
|
|
|
c7ffa4 |
for (i = 0; i != ind_tbl->queues_n; ++i)
|
|
|
c7ffa4 |
claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
|
|
|
c7ffa4 |
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
|
|
|
c7ffa4 |
@@ -1288,9 +1292,9 @@ struct mlx5_hrxq*
|
|
|
c7ffa4 |
ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
|
|
|
c7ffa4 |
if (!ind_tbl)
|
|
|
c7ffa4 |
return NULL;
|
|
|
c7ffa4 |
- qp = ibv_create_qp_ex(
|
|
|
c7ffa4 |
- priv->ctx,
|
|
|
c7ffa4 |
- &(struct ibv_qp_init_attr_ex){
|
|
|
c7ffa4 |
+ qp = mlx5_glue->create_qp_ex
|
|
|
c7ffa4 |
+ (priv->ctx,
|
|
|
c7ffa4 |
+ &(struct ibv_qp_init_attr_ex){
|
|
|
c7ffa4 |
.qp_type = IBV_QPT_RAW_PACKET,
|
|
|
c7ffa4 |
.comp_mask =
|
|
|
c7ffa4 |
IBV_QP_INIT_ATTR_PD |
|
|
|
c7ffa4 |
@@ -1304,7 +1308,7 @@ struct mlx5_hrxq*
|
|
|
c7ffa4 |
},
|
|
|
c7ffa4 |
.rwq_ind_tbl = ind_tbl->ind_table,
|
|
|
c7ffa4 |
.pd = priv->pd,
|
|
|
c7ffa4 |
- });
|
|
|
c7ffa4 |
+ });
|
|
|
c7ffa4 |
if (!qp)
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
|
|
|
c7ffa4 |
@@ -1323,7 +1327,7 @@ struct mlx5_hrxq*
|
|
|
c7ffa4 |
error:
|
|
|
c7ffa4 |
mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
|
|
|
c7ffa4 |
if (qp)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_qp(qp));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_qp(qp));
|
|
|
c7ffa4 |
return NULL;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
@@ -1391,7 +1395,7 @@ struct mlx5_hrxq*
|
|
|
c7ffa4 |
DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
|
|
|
c7ffa4 |
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
|
|
|
c7ffa4 |
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_qp(hrxq->qp));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
|
|
|
c7ffa4 |
mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
|
|
|
c7ffa4 |
LIST_REMOVE(hrxq, next);
|
|
|
c7ffa4 |
rte_free(hrxq);
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
|
|
|
c7ffa4 |
index 9c5860f..52cf005 100644
|
|
|
c7ffa4 |
--- a/drivers/net/mlx5/mlx5_txq.c
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5_txq.c
|
|
|
c7ffa4 |
@@ -59,6 +59,7 @@
|
|
|
c7ffa4 |
#include "mlx5.h"
|
|
|
c7ffa4 |
#include "mlx5_rxtx.h"
|
|
|
c7ffa4 |
#include "mlx5_autoconf.h"
|
|
|
c7ffa4 |
+#include "mlx5_glue.h"
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/**
|
|
|
c7ffa4 |
* Allocate TX queue elements.
|
|
|
c7ffa4 |
@@ -324,7 +325,7 @@ struct mlx5_txq_ibv*
|
|
|
c7ffa4 |
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
|
|
|
c7ffa4 |
if (priv->mps == MLX5_MPW_ENHANCED)
|
|
|
c7ffa4 |
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
|
|
|
c7ffa4 |
- tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
|
|
|
c7ffa4 |
+ tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
|
|
|
c7ffa4 |
if (tmpl.cq == NULL) {
|
|
|
c7ffa4 |
ERROR("%p: CQ creation failure", (void *)txq_ctrl);
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
@@ -365,7 +366,7 @@ struct mlx5_txq_ibv*
|
|
|
c7ffa4 |
attr.init.max_tso_header = txq_ctrl->max_tso_header;
|
|
|
c7ffa4 |
attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
- tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
|
|
|
c7ffa4 |
+ tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
|
|
|
c7ffa4 |
if (tmpl.qp == NULL) {
|
|
|
c7ffa4 |
ERROR("%p: QP creation failure", (void *)txq_ctrl);
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
@@ -376,7 +377,8 @@ struct mlx5_txq_ibv*
|
|
|
c7ffa4 |
/* Primary port number. */
|
|
|
c7ffa4 |
.port_num = priv->port
|
|
|
c7ffa4 |
};
|
|
|
c7ffa4 |
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
|
|
|
c7ffa4 |
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
|
|
|
c7ffa4 |
+ (IBV_QP_STATE | IBV_QP_PORT));
|
|
|
c7ffa4 |
if (ret) {
|
|
|
c7ffa4 |
ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
@@ -384,13 +386,13 @@ struct mlx5_txq_ibv*
|
|
|
c7ffa4 |
attr.mod = (struct ibv_qp_attr){
|
|
|
c7ffa4 |
.qp_state = IBV_QPS_RTR
|
|
|
c7ffa4 |
};
|
|
|
c7ffa4 |
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
|
|
|
c7ffa4 |
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
|
|
|
c7ffa4 |
if (ret) {
|
|
|
c7ffa4 |
ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
attr.mod.qp_state = IBV_QPS_RTS;
|
|
|
c7ffa4 |
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
|
|
|
c7ffa4 |
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
|
|
|
c7ffa4 |
if (ret) {
|
|
|
c7ffa4 |
ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
@@ -405,7 +407,7 @@ struct mlx5_txq_ibv*
|
|
|
c7ffa4 |
obj.cq.out = &cq_info;
|
|
|
c7ffa4 |
obj.qp.in = tmpl.qp;
|
|
|
c7ffa4 |
obj.qp.out = &qp;
|
|
|
c7ffa4 |
- ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
|
|
|
c7ffa4 |
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
|
|
|
c7ffa4 |
if (ret != 0)
|
|
|
c7ffa4 |
goto error;
|
|
|
c7ffa4 |
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
|
|
|
c7ffa4 |
@@ -442,9 +444,9 @@ struct mlx5_txq_ibv*
|
|
|
c7ffa4 |
return txq_ibv;
|
|
|
c7ffa4 |
error:
|
|
|
c7ffa4 |
if (tmpl.cq)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_cq(tmpl.cq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
|
|
|
c7ffa4 |
if (tmpl.qp)
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_qp(tmpl.qp));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
|
|
|
c7ffa4 |
return NULL;
|
|
|
c7ffa4 |
}
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
@@ -497,8 +499,8 @@ struct mlx5_txq_ibv*
|
|
|
c7ffa4 |
DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
|
|
|
c7ffa4 |
(void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
|
|
|
c7ffa4 |
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_qp(txq_ibv->qp));
|
|
|
c7ffa4 |
- claim_zero(ibv_destroy_cq(txq_ibv->cq));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
|
|
|
c7ffa4 |
+ claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
|
|
|
c7ffa4 |
LIST_REMOVE(txq_ibv, next);
|
|
|
c7ffa4 |
rte_free(txq_ibv);
|
|
|
c7ffa4 |
return 0;
|
|
|
c7ffa4 |
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
|
|
|
c7ffa4 |
index 6fc315e..841f238 100644
|
|
|
c7ffa4 |
--- a/drivers/net/mlx5/mlx5_vlan.c
|
|
|
c7ffa4 |
+++ b/drivers/net/mlx5/mlx5_vlan.c
|
|
|
c7ffa4 |
@@ -36,12 +36,23 @@
|
|
|
c7ffa4 |
#include <assert.h>
|
|
|
c7ffa4 |
#include <stdint.h>
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
+/* Verbs headers do not support -pedantic. */
|
|
|
c7ffa4 |
+#ifdef PEDANTIC
|
|
|
c7ffa4 |
+#pragma GCC diagnostic ignored "-Wpedantic"
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+#include <infiniband/mlx5dv.h>
|
|
|
c7ffa4 |
+#include <infiniband/verbs.h>
|
|
|
c7ffa4 |
+#ifdef PEDANTIC
|
|
|
c7ffa4 |
+#pragma GCC diagnostic error "-Wpedantic"
|
|
|
c7ffa4 |
+#endif
|
|
|
c7ffa4 |
+
|
|
|
c7ffa4 |
#include <rte_ethdev.h>
|
|
|
c7ffa4 |
#include <rte_common.h>
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
#include "mlx5_utils.h"
|
|
|
c7ffa4 |
#include "mlx5.h"
|
|
|
c7ffa4 |
#include "mlx5_autoconf.h"
|
|
|
c7ffa4 |
+#include "mlx5_glue.h"
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
/**
|
|
|
c7ffa4 |
* DPDK callback to configure a VLAN filter.
|
|
|
c7ffa4 |
@@ -133,7 +144,7 @@
|
|
|
c7ffa4 |
.flags = vlan_offloads,
|
|
|
c7ffa4 |
};
|
|
|
c7ffa4 |
|
|
|
c7ffa4 |
- err = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
|
|
|
c7ffa4 |
+ err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
|
|
|
c7ffa4 |
if (err) {
|
|
|
c7ffa4 |
ERROR("%p: failed to modified stripping mode: %s",
|
|
|
c7ffa4 |
(void *)priv, strerror(err));
|
|
|
c7ffa4 |
--
|
|
|
c7ffa4 |
1.8.3.1
|
|
|
c7ffa4 |
|