|
|
b91920 |
From c276398e43bec444eb207c3184f667b3d97361f8 Mon Sep 17 00:00:00 2001
|
|
|
b91920 |
From: Tiwei Bie <tiwei.bie@intel.com>
|
|
|
b91920 |
Date: Wed, 23 Jan 2019 01:01:40 +0800
|
|
|
b91920 |
Subject: [PATCH 16/18] net/virtio: fix control VQ
|
|
|
b91920 |
|
|
|
b91920 |
[ upstream commit 2923b8f9c41da37d63bd196ba2f037c154a6ebd5 ]
|
|
|
b91920 |
|
|
|
b91920 |
This patch mainly fixed below issues in the packed ring based
|
|
|
b91920 |
control vq support in virtio driver:
|
|
|
b91920 |
|
|
|
b91920 |
1. When parsing the used descriptors, we have to track the
|
|
|
b91920 |
number of descs that we need to skip;
|
|
|
b91920 |
2. vq->vq_free_cnt was decreased twice for a same desc;
|
|
|
b91920 |
|
|
|
b91920 |
Meanwhile, make the function name consistent with other parts.
|
|
|
b91920 |
|
|
|
b91920 |
Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command")
|
|
|
b91920 |
Fixes: a4270ea4ff79 ("net/virtio: check head desc with correct wrap counter")
|
|
|
b91920 |
|
|
|
b91920 |
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
|
|
|
b91920 |
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
|
|
|
b91920 |
[changed parameters to virtio_rmb/_wmb()]
|
|
|
b91920 |
(cherry picked from commit 2923b8f9c41da37d63bd196ba2f037c154a6ebd5)
|
|
|
b91920 |
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
|
|
|
b91920 |
---
|
|
|
b91920 |
drivers/net/virtio/virtio_ethdev.c | 62 ++++++++++++++----------------
|
|
|
b91920 |
drivers/net/virtio/virtqueue.h | 12 +-----
|
|
|
b91920 |
2 files changed, 31 insertions(+), 43 deletions(-)
|
|
|
b91920 |
|
|
|
b91920 |
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
|
|
|
b91920 |
index 7bd38a292..c12fb157e 100644
|
|
|
b91920 |
--- a/drivers/net/virtio/virtio_ethdev.c
|
|
|
b91920 |
+++ b/drivers/net/virtio/virtio_ethdev.c
|
|
|
b91920 |
@@ -142,16 +142,17 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
|
|
|
b91920 |
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
|
|
|
b91920 |
|
|
|
b91920 |
static struct virtio_pmd_ctrl *
|
|
|
b91920 |
-virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
|
|
|
b91920 |
- int *dlen, int pkt_num)
|
|
|
b91920 |
+virtio_send_command_packed(struct virtnet_ctl *cvq,
|
|
|
b91920 |
+ struct virtio_pmd_ctrl *ctrl,
|
|
|
b91920 |
+ int *dlen, int pkt_num)
|
|
|
b91920 |
{
|
|
|
b91920 |
struct virtqueue *vq = cvq->vq;
|
|
|
b91920 |
int head;
|
|
|
b91920 |
struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
|
|
|
b91920 |
struct virtio_pmd_ctrl *result;
|
|
|
b91920 |
- bool avail_wrap_counter, used_wrap_counter;
|
|
|
b91920 |
- uint16_t flags;
|
|
|
b91920 |
+ bool avail_wrap_counter;
|
|
|
b91920 |
int sum = 0;
|
|
|
b91920 |
+ int nb_descs = 0;
|
|
|
b91920 |
int k;
|
|
|
b91920 |
|
|
|
b91920 |
/*
|
|
|
b91920 |
@@ -162,11 +163,10 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
|
|
|
b91920 |
*/
|
|
|
b91920 |
head = vq->vq_avail_idx;
|
|
|
b91920 |
avail_wrap_counter = vq->avail_wrap_counter;
|
|
|
b91920 |
- used_wrap_counter = vq->used_wrap_counter;
|
|
|
b91920 |
- desc[head].flags = VRING_DESC_F_NEXT;
|
|
|
b91920 |
desc[head].addr = cvq->virtio_net_hdr_mem;
|
|
|
b91920 |
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
|
|
|
b91920 |
vq->vq_free_cnt--;
|
|
|
b91920 |
+ nb_descs++;
|
|
|
b91920 |
if (++vq->vq_avail_idx >= vq->vq_nentries) {
|
|
|
b91920 |
vq->vq_avail_idx -= vq->vq_nentries;
|
|
|
b91920 |
vq->avail_wrap_counter ^= 1;
|
|
|
b91920 |
@@ -177,55 +177,51 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
|
|
|
b91920 |
+ sizeof(struct virtio_net_ctrl_hdr)
|
|
|
b91920 |
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
|
|
|
b91920 |
desc[vq->vq_avail_idx].len = dlen[k];
|
|
|
b91920 |
- flags = VRING_DESC_F_NEXT;
|
|
|
b91920 |
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
|
|
|
b91920 |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
|
|
|
b91920 |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
|
|
|
b91920 |
sum += dlen[k];
|
|
|
b91920 |
vq->vq_free_cnt--;
|
|
|
b91920 |
- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
|
|
|
b91920 |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
|
|
|
b91920 |
- desc[vq->vq_avail_idx].flags = flags;
|
|
|
b91920 |
- rte_smp_wmb();
|
|
|
b91920 |
- vq->vq_free_cnt--;
|
|
|
b91920 |
+ nb_descs++;
|
|
|
b91920 |
if (++vq->vq_avail_idx >= vq->vq_nentries) {
|
|
|
b91920 |
vq->vq_avail_idx -= vq->vq_nentries;
|
|
|
b91920 |
vq->avail_wrap_counter ^= 1;
|
|
|
b91920 |
}
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
-
|
|
|
b91920 |
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
|
|
|
b91920 |
+ sizeof(struct virtio_net_ctrl_hdr);
|
|
|
b91920 |
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
|
|
|
b91920 |
- flags = VRING_DESC_F_WRITE;
|
|
|
b91920 |
- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
|
|
|
b91920 |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
|
|
|
b91920 |
- desc[vq->vq_avail_idx].flags = flags;
|
|
|
b91920 |
- flags = VRING_DESC_F_NEXT;
|
|
|
b91920 |
- flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) |
|
|
|
b91920 |
- VRING_DESC_F_USED(!avail_wrap_counter);
|
|
|
b91920 |
- desc[head].flags = flags;
|
|
|
b91920 |
- rte_smp_wmb();
|
|
|
b91920 |
-
|
|
|
b91920 |
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
|
|
|
b91920 |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
|
|
|
b91920 |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
|
|
|
b91920 |
vq->vq_free_cnt--;
|
|
|
b91920 |
+ nb_descs++;
|
|
|
b91920 |
if (++vq->vq_avail_idx >= vq->vq_nentries) {
|
|
|
b91920 |
vq->vq_avail_idx -= vq->vq_nentries;
|
|
|
b91920 |
vq->avail_wrap_counter ^= 1;
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
+ virtio_wmb();
|
|
|
b91920 |
+ desc[head].flags = VRING_DESC_F_NEXT |
|
|
|
b91920 |
+ VRING_DESC_F_AVAIL(avail_wrap_counter) |
|
|
|
b91920 |
+ VRING_DESC_F_USED(!avail_wrap_counter);
|
|
|
b91920 |
+
|
|
|
b91920 |
+ virtio_wmb();
|
|
|
b91920 |
virtqueue_notify(vq);
|
|
|
b91920 |
|
|
|
b91920 |
/* wait for used descriptors in virtqueue */
|
|
|
b91920 |
- do {
|
|
|
b91920 |
- rte_rmb();
|
|
|
b91920 |
+ while (!desc_is_used(&desc[head], vq))
|
|
|
b91920 |
usleep(100);
|
|
|
b91920 |
- } while (!__desc_is_used(&desc[head], used_wrap_counter));
|
|
|
b91920 |
+
|
|
|
b91920 |
+ virtio_rmb();
|
|
|
b91920 |
|
|
|
b91920 |
/* now get used descriptors */
|
|
|
b91920 |
- while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
|
|
|
b91920 |
- vq->vq_free_cnt++;
|
|
|
b91920 |
- if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
|
|
|
b91920 |
- vq->vq_used_cons_idx -= vq->vq_nentries;
|
|
|
b91920 |
- vq->used_wrap_counter ^= 1;
|
|
|
b91920 |
- }
|
|
|
b91920 |
+ vq->vq_free_cnt += nb_descs;
|
|
|
b91920 |
+ vq->vq_used_cons_idx += nb_descs;
|
|
|
b91920 |
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
|
|
|
b91920 |
+ vq->vq_used_cons_idx -= vq->vq_nentries;
|
|
|
b91920 |
+ vq->used_wrap_counter ^= 1;
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
result = cvq->virtio_net_hdr_mz->addr;
|
|
|
b91920 |
@@ -266,7 +262,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
|
|
|
b91920 |
sizeof(struct virtio_pmd_ctrl));
|
|
|
b91920 |
|
|
|
b91920 |
if (vtpci_packed_queue(vq->hw)) {
|
|
|
b91920 |
- result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
|
|
|
b91920 |
+ result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
|
|
|
b91920 |
goto out_unlock;
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
|
|
|
b91920 |
index 75f5782bc..9e74b7bd0 100644
|
|
|
b91920 |
--- a/drivers/net/virtio/virtqueue.h
|
|
|
b91920 |
+++ b/drivers/net/virtio/virtqueue.h
|
|
|
b91920 |
@@ -256,7 +256,7 @@ struct virtio_tx_region {
|
|
|
b91920 |
};
|
|
|
b91920 |
|
|
|
b91920 |
static inline int
|
|
|
b91920 |
-__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
|
|
|
b91920 |
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
|
|
|
b91920 |
{
|
|
|
b91920 |
uint16_t used, avail, flags;
|
|
|
b91920 |
|
|
|
b91920 |
@@ -264,16 +264,9 @@ __desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
|
|
|
b91920 |
used = !!(flags & VRING_DESC_F_USED(1));
|
|
|
b91920 |
avail = !!(flags & VRING_DESC_F_AVAIL(1));
|
|
|
b91920 |
|
|
|
b91920 |
- return avail == used && used == wrap_counter;
|
|
|
b91920 |
-}
|
|
|
b91920 |
-
|
|
|
b91920 |
-static inline int
|
|
|
b91920 |
-desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
|
|
|
b91920 |
-{
|
|
|
b91920 |
- return __desc_is_used(desc, vq->used_wrap_counter);
|
|
|
b91920 |
+ return avail == used && used == vq->used_wrap_counter;
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
-
|
|
|
b91920 |
static inline void
|
|
|
b91920 |
vring_desc_init_packed(struct virtqueue *vq, int n)
|
|
|
b91920 |
{
|
|
|
b91920 |
@@ -329,7 +322,6 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)
|
|
|
b91920 |
{
|
|
|
b91920 |
uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
|
|
|
b91920 |
|
|
|
b91920 |
-
|
|
|
b91920 |
if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
|
|
|
b91920 |
virtio_wmb();
|
|
|
b91920 |
vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
|
|
|
b91920 |
--
|
|
|
b91920 |
2.21.0
|
|
|
b91920 |
|