Dominique Leuenberger 2021-05-22 22:06:13 +00:00 committed by Git OBS Bridge
commit 8f47e85f8c
12 changed files with 41 additions and 1220 deletions

View File

@ -3,14 +3,11 @@ From: =?UTF-8?q?Jaime=20Caama=C3=B1o=20Ruiz?= <jcaamano@suse.com>
Date: Mon, 21 Sep 2020 14:50:13 +0200
Subject: [PATCH] SLE15 SP3 compatibility patch for kni
---
kernel/linux/kni/compat.h | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
Updated 2021-05-14
diff --git a/kernel/linux/kni/compat.h b/kernel/linux/kni/compat.h
index 9ee45dbf6..9dee63af2 100644
--- a/kernel/linux/kni/compat.h
+++ b/kernel/linux/kni/compat.h
diff -Nur dpdk-stable-19.11.8/kernel/linux/kni/compat.h new/kernel/linux/kni/compat.h
--- dpdk-stable-19.11.8/kernel/linux/kni/compat.h 2021-04-16 10:13:47.000000000 +0200
+++ new/kernel/linux/kni/compat.h 2021-05-14 14:19:13.576601634 +0200
@@ -14,7 +14,10 @@
#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c)
#endif
@ -23,15 +20,11 @@ index 9ee45dbf6..9dee63af2 100644
/* SLES12SP3 is at least 4.4.57+ based */
#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 28))
@@ -131,6 +134,7 @@
#define HAVE_IOVA_TO_KVA_MAPPING_SUPPORT
@@ -132,6 +135,7 @@
#endif
-#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE
+#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE || \
+ (SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(15, 3, 0))
#if KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE || \
+ (SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(15, 3, 0)) || \
(defined(RHEL_RELEASE_CODE) && \
RHEL_RELEASE_VERSION(8, 3) <= RHEL_RELEASE_CODE)
#define HAVE_TX_TIMEOUT_TXQUEUE
#endif
--
2.26.2

View File

@ -1,49 +0,0 @@
From 87efaea6376c8ae1a69e471450744a973995726b Mon Sep 17 00:00:00 2001
From: Ferruh Yigit <ferruh.yigit@intel.com>
Date: Mon, 17 Aug 2020 11:32:47 +0100
Subject: [PATCH] kni: fix build with Linux 5.9
Starting from Linux 5.9 'get_user_pages_remote()' API doesn't get
'struct task_struct' parameter:
commit 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
The change reflected to the KNI with version check.
Cc: stable@dpdk.org
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
kernel/linux/kni/compat.h | 4 ++++
kernel/linux/kni/kni_dev.h | 5 +++++
2 files changed, 9 insertions(+)
Index: dpdk-stable-19.11.4/kernel/linux/kni/compat.h
===================================================================
--- dpdk-stable-19.11.4.orig/kernel/linux/kni/compat.h
+++ dpdk-stable-19.11.4/kernel/linux/kni/compat.h
@@ -138,3 +138,7 @@
(SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(15, 3, 0))
#define HAVE_TX_TIMEOUT_TXQUEUE
#endif
+
+#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+#define HAVE_TSK_IN_GUP
+#endif
Index: dpdk-stable-19.11.4/kernel/linux/kni/kni_dev.h
===================================================================
--- dpdk-stable-19.11.4.orig/kernel/linux/kni/kni_dev.h
+++ dpdk-stable-19.11.4/kernel/linux/kni/kni_dev.h
@@ -101,8 +101,13 @@ static inline phys_addr_t iova_to_phys(s
offset = iova & (PAGE_SIZE - 1);
/* Read one page struct info */
+#ifdef HAVE_TSK_IN_GUP
ret = get_user_pages_remote(tsk, tsk->mm, iova, 1,
FOLL_TOUCH, &page, NULL, NULL);
+#else
+ ret = get_user_pages_remote(tsk->mm, iova, 1,
+ FOLL_TOUCH, &page, NULL, NULL);
+#endif
if (ret < 0)
return 0;

View File

@ -1,46 +0,0 @@
From b04635713247368935040234d11d33914312096c Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang@intel.com>
Date: Tue, 14 Apr 2020 16:19:51 +0100
Subject: [PATCH 1/6] vhost/crypto: fix pool allocation
This patch fixes the missing iv space allocation in crypto
operation mempool.
Fixes: 709521f4c2cd ("examples/vhost_crypto: support multi-core")
Cc: stable@dpdk.org
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
---
examples/vhost_crypto/main.c | 2 +-
lib/librte_vhost/rte_vhost_crypto.h | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/examples/vhost_crypto/main.c b/examples/vhost_crypto/main.c
index 1d7ba9419..11b022e81 100644
--- a/examples/vhost_crypto/main.c
+++ b/examples/vhost_crypto/main.c
@@ -544,7 +544,7 @@ main(int argc, char *argv[])
snprintf(name, 127, "COPPOOL_%u", lo->lcore_id);
info->cop_pool = rte_crypto_op_pool_create(name,
RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MEMPOOL_OBJS,
- NB_CACHE_OBJS, 0,
+ NB_CACHE_OBJS, VHOST_CRYPTO_MAX_IV_LEN,
rte_lcore_to_socket_id(lo->lcore_id));
if (!info->cop_pool) {
diff --git a/lib/librte_vhost/rte_vhost_crypto.h b/lib/librte_vhost/rte_vhost_crypto.h
index d29871c7e..866a592a5 100644
--- a/lib/librte_vhost/rte_vhost_crypto.h
+++ b/lib/librte_vhost/rte_vhost_crypto.h
@@ -10,6 +10,7 @@
#define VHOST_CRYPTO_SESSION_MAP_ENTRIES (1024) /**< Max nb sessions */
/** max nb virtual queues in a burst for finalizing*/
#define VIRTIO_CRYPTO_MAX_NUM_BURST_VQS (64)
+#define VHOST_CRYPTO_MAX_IV_LEN (32)
enum rte_vhost_crypto_zero_copy {
RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE = 0,
--
2.26.2

View File

@ -1,40 +0,0 @@
From b485f950c85374f4969c5fa380b574b34622df91 Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang@intel.com>
Date: Tue, 14 Apr 2020 16:52:47 +0100
Subject: [PATCH 2/6] vhost/crypto: fix incorrect descriptor deduction
This patch fixes the incorrect descriptor deduction for vhost crypto.
CVE-2020-14378
Fixes: 16d2e718b8ce ("vhost/crypto: fix possible out of bound access")
Cc: stable@dpdk.org
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
---
lib/librte_vhost/vhost_crypto.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 0f9df4059..86747dd5f 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -530,13 +530,14 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
int left = size - desc->len;
while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
- (*nb_descs)--;
if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
return -1;
desc = &head[desc->next];
rte_prefetch0(&head[desc->next]);
left -= desc->len;
+ if (left > 0)
+ (*nb_descs)--;
}
if (unlikely(left > 0))
--
2.26.2

View File

@ -1,161 +0,0 @@
From 50d3b2ef804fed4c46515dc67ec51d4b08c4165b Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang@intel.com>
Date: Tue, 14 Apr 2020 17:26:48 +0100
Subject: [PATCH 3/6] vhost/crypto: fix missed request check for copy mode
This patch fixes the missed request check to vhost crypto
copy mode.
CVE-2020-14376
CVE-2020-14377
Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
Cc: stable@dpdk.org
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
---
lib/librte_vhost/vhost_crypto.c | 68 +++++++++++++++++++++++----------
1 file changed, 47 insertions(+), 21 deletions(-)
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 86747dd5f..494f49084 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -756,7 +756,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
}
wb_data->dst = dst;
- wb_data->len = desc->len - offset;
+ wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
write_back_len -= wb_data->len;
src += offset + wb_data->len;
offset = 0;
@@ -840,6 +840,17 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
return NULL;
}
+static __rte_always_inline uint8_t
+vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req)
+{
+ if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
+ (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
+ (req->para.dst_data_len >= req->para.src_data_len) &&
+ (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE)))
+ return VIRTIO_CRYPTO_OK;
+ return VIRTIO_CRYPTO_BADMSG;
+}
+
static uint8_t
prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_data_req *vc_req,
@@ -851,7 +862,10 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_writeback_data *ewb = NULL;
struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
- uint8_t ret = 0;
+ uint8_t ret = vhost_crypto_check_cipher_request(cipher);
+
+ if (unlikely(ret != VIRTIO_CRYPTO_OK))
+ goto error_exit;
/* prepare */
/* iv */
@@ -861,10 +875,9 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
goto error_exit;
}
- m_src->data_len = cipher->para.src_data_len;
-
switch (vcrypto->option) {
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ m_src->data_len = cipher->para.src_data_len;
m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
cipher->para.src_data_len);
m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
@@ -886,13 +899,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
break;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
vc_req->wb_pool = vcrypto->wb_pool;
-
- if (unlikely(cipher->para.src_data_len >
- RTE_MBUF_DEFAULT_BUF_SIZE)) {
- VC_LOG_ERR("Not enough space to do data copy");
- ret = VIRTIO_CRYPTO_ERR;
- goto error_exit;
- }
+ m_src->data_len = cipher->para.src_data_len;
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
vc_req, &desc, cipher->para.src_data_len,
nb_descs, vq_size) < 0)) {
@@ -975,6 +982,29 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
return ret;
}
+static __rte_always_inline uint8_t
+vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req)
+{
+ if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
+ (req->para.src_data_len <= RTE_MBUF_DEFAULT_DATAROOM) &&
+ (req->para.dst_data_len >= req->para.src_data_len) &&
+ (req->para.dst_data_len <= RTE_MBUF_DEFAULT_DATAROOM) &&
+ (req->para.cipher_start_src_offset <
+ RTE_MBUF_DEFAULT_DATAROOM) &&
+ (req->para.len_to_cipher < RTE_MBUF_DEFAULT_DATAROOM) &&
+ (req->para.hash_start_src_offset <
+ RTE_MBUF_DEFAULT_DATAROOM) &&
+ (req->para.len_to_hash < RTE_MBUF_DEFAULT_DATAROOM) &&
+ (req->para.cipher_start_src_offset + req->para.len_to_cipher <=
+ req->para.src_data_len) &&
+ (req->para.hash_start_src_offset + req->para.len_to_hash <=
+ req->para.src_data_len) &&
+ (req->para.dst_data_len + req->para.hash_result_len <=
+ RTE_MBUF_DEFAULT_DATAROOM)))
+ return VIRTIO_CRYPTO_OK;
+ return VIRTIO_CRYPTO_BADMSG;
+}
+
static uint8_t
prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_data_req *vc_req,
@@ -988,7 +1018,10 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
uint32_t digest_offset;
void *digest_addr;
- uint8_t ret = 0;
+ uint8_t ret = vhost_crypto_check_chain_request(chain);
+
+ if (unlikely(ret != VIRTIO_CRYPTO_OK))
+ goto error_exit;
/* prepare */
/* iv */
@@ -998,10 +1031,9 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
goto error_exit;
}
- m_src->data_len = chain->para.src_data_len;
-
switch (vcrypto->option) {
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ m_src->data_len = chain->para.src_data_len;
m_dst->data_len = chain->para.dst_data_len;
m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
@@ -1023,13 +1055,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
break;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
vc_req->wb_pool = vcrypto->wb_pool;
-
- if (unlikely(chain->para.src_data_len >
- RTE_MBUF_DEFAULT_BUF_SIZE)) {
- VC_LOG_ERR("Not enough space to do data copy");
- ret = VIRTIO_CRYPTO_ERR;
- goto error_exit;
- }
+ m_src->data_len = chain->para.src_data_len;
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
vc_req, &desc, chain->para.src_data_len,
nb_descs, vq_size) < 0)) {
--
2.26.2

View File

@ -1,51 +0,0 @@
From 03aa702205544346d11ed7ca5693f9382ef51922 Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang@intel.com>
Date: Wed, 15 Apr 2020 11:48:52 +0100
Subject: [PATCH 4/6] vhost/crypto: fix incorrect write back source
This patch fixes vhost crypto library for the incorrect source and
destination buffer calculation in the copy mode.
Fixes: cd1e8f03abf0 ("vhost/crypto: fix packet copy in chaining mode")
Cc: stable@dpdk.org
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
---
lib/librte_vhost/vhost_crypto.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 494f49084..f1cc32a9b 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -749,14 +749,14 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
wb_data->src = src + offset;
dlen = desc->len;
dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
- &dlen, VHOST_ACCESS_RW) + offset;
+ &dlen, VHOST_ACCESS_RW);
if (unlikely(!dst || dlen != desc->len)) {
VC_LOG_ERR("Failed to map descriptor");
goto error_exit;
}
- wb_data->dst = dst;
- wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
+ wb_data->dst = dst + offset;
+ wb_data->len = RTE_MIN(dlen - offset, write_back_len);
write_back_len -= wb_data->len;
src += offset + wb_data->len;
offset = 0;
@@ -801,7 +801,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
goto error_exit;
}
- wb_data->src = src;
+ wb_data->src = src + offset;
wb_data->dst = dst;
wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
write_back_len -= wb_data->len;
--
2.26.2

View File

@ -1,38 +0,0 @@
From 2fca489d58acfee297d0b9a7dc14e7fa119e8867 Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang@intel.com>
Date: Thu, 16 Apr 2020 11:29:06 +0100
Subject: [PATCH 5/6] vhost/crypto: fix data length check
This patch fixes the incorrect data length check to vhost crypto.
Instead of blindly accepting the descriptor length as data length, the
change compare the request provided data length and descriptor length
first. The security issue CVE-2020-14374 is not fixed alone by this
patch, part of the fix is done through:
"vhost/crypto: fix missed request check for copy mode".
CVE-2020-14374
Fixes: 3c79609fda7c ("vhost/crypto: handle virtually non-contiguous buffers")
Cc: stable@dpdk.org
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
---
lib/librte_vhost/vhost_crypto.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index f1cc32a9b..cf9aa2566 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -624,7 +624,7 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
desc = &vc_req->head[desc->next];
rte_prefetch0(&vc_req->head[desc->next]);
to_copy = RTE_MIN(desc->len, (uint32_t)left);
- dlen = desc->len;
+ dlen = to_copy;
src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
VHOST_ACCESS_RO);
if (unlikely(!src || !dlen)) {
--
2.26.2

View File

@ -1,801 +0,0 @@
From 2cc5ac3a94fef1a1055b2adfa926113e5e06731f Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang@intel.com>
Date: Wed, 9 Sep 2020 09:35:53 +0100
Subject: [PATCH 6/6] vhost/crypto: fix possible TOCTOU attack
This patch fixes the possible time-of-check to time-of-use (TOCTOU)
attack problem by copying request data and descriptor index to local
variable prior to process.
Also the original sequential read of descriptors may lead to TOCTOU
attack. This patch fixes the problem by loading all descriptors of a
request to local buffer before processing.
CVE-2020-14375
Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
Cc: stable@dpdk.org
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
---
lib/librte_vhost/rte_vhost_crypto.h | 2 +
lib/librte_vhost/vhost_crypto.c | 391 ++++++++++++++--------------
2 files changed, 202 insertions(+), 191 deletions(-)
diff --git a/lib/librte_vhost/rte_vhost_crypto.h b/lib/librte_vhost/rte_vhost_crypto.h
index 866a592a5..b54d61db6 100644
--- a/lib/librte_vhost/rte_vhost_crypto.h
+++ b/lib/librte_vhost/rte_vhost_crypto.h
@@ -7,10 +7,12 @@
#define VHOST_CRYPTO_MBUF_POOL_SIZE (8192)
#define VHOST_CRYPTO_MAX_BURST_SIZE (64)
+#define VHOST_CRYPTO_MAX_DATA_SIZE (4096)
#define VHOST_CRYPTO_SESSION_MAP_ENTRIES (1024) /**< Max nb sessions */
/** max nb virtual queues in a burst for finalizing*/
#define VIRTIO_CRYPTO_MAX_NUM_BURST_VQS (64)
#define VHOST_CRYPTO_MAX_IV_LEN (32)
+#define VHOST_CRYPTO_MAX_N_DESC (32)
enum rte_vhost_crypto_zero_copy {
RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE = 0,
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index cf9aa2566..e08f9c6d7 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -46,6 +46,14 @@
#define IOVA_TO_VVA(t, r, a, l, p) \
((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
+/*
+ * vhost_crypto_desc is used to copy original vring_desc to the local buffer
+ * before processing (except the next index). The copy result will be an
+ * array of vhost_crypto_desc elements that follows the sequence of original
+ * vring_desc.next is arranged.
+ */
+#define vhost_crypto_desc vring_desc
+
static int
cipher_algo_transform(uint32_t virtio_cipher_algo,
enum rte_crypto_cipher_algorithm *algo)
@@ -479,83 +487,71 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
return ret;
}
-static __rte_always_inline struct vring_desc *
-find_write_desc(struct vring_desc *head, struct vring_desc *desc,
- uint32_t *nb_descs, uint32_t vq_size)
+static __rte_always_inline struct vhost_crypto_desc *
+find_write_desc(struct vhost_crypto_desc *head, struct vhost_crypto_desc *desc,
+ uint32_t max_n_descs)
{
- if (desc->flags & VRING_DESC_F_WRITE)
- return desc;
-
- while (desc->flags & VRING_DESC_F_NEXT) {
- if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
- return NULL;
- (*nb_descs)--;
+ if (desc < head)
+ return NULL;
- desc = &head[desc->next];
+ while (desc - head < (int)max_n_descs) {
if (desc->flags & VRING_DESC_F_WRITE)
return desc;
+ desc++;
}
return NULL;
}
-static struct virtio_crypto_inhdr *
-reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
- uint32_t *nb_descs, uint32_t vq_size)
+static __rte_always_inline struct virtio_crypto_inhdr *
+reach_inhdr(struct vhost_crypto_data_req *vc_req,
+ struct vhost_crypto_desc *head,
+ uint32_t max_n_descs)
{
- uint64_t dlen;
struct virtio_crypto_inhdr *inhdr;
+ struct vhost_crypto_desc *last = head + (max_n_descs - 1);
+ uint64_t dlen = last->len;
- while (desc->flags & VRING_DESC_F_NEXT) {
- if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
- return NULL;
- (*nb_descs)--;
- desc = &vc_req->head[desc->next];
- }
+ if (unlikely(dlen != sizeof(*inhdr)))
+ return NULL;
- dlen = desc->len;
- inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
+ inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, last->addr,
&dlen, VHOST_ACCESS_WO);
- if (unlikely(!inhdr || dlen != desc->len))
+ if (unlikely(!inhdr || dlen != last->len))
return NULL;
return inhdr;
}
static __rte_always_inline int
-move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
- uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
+move_desc(struct vhost_crypto_desc *head,
+ struct vhost_crypto_desc **cur_desc,
+ uint32_t size, uint32_t max_n_descs)
{
- struct vring_desc *desc = *cur_desc;
+ struct vhost_crypto_desc *desc = *cur_desc;
int left = size - desc->len;
- while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
- if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
- return -1;
-
- desc = &head[desc->next];
- rte_prefetch0(&head[desc->next]);
+ while (desc->flags & VRING_DESC_F_NEXT && left > 0 &&
+ desc >= head &&
+ desc - head < (int)max_n_descs) {
+ desc++;
left -= desc->len;
- if (left > 0)
- (*nb_descs)--;
}
if (unlikely(left > 0))
return -1;
- if (unlikely(*nb_descs == 0))
+ if (unlikely(head - desc == (int)max_n_descs))
*cur_desc = NULL;
- else {
- if (unlikely(desc->next >= vq_size))
- return -1;
- *cur_desc = &head[desc->next];
- }
+ else
+ *cur_desc = desc + 1;
return 0;
}
static __rte_always_inline void *
-get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
+get_data_ptr(struct vhost_crypto_data_req *vc_req,
+ struct vhost_crypto_desc *cur_desc,
uint8_t perm)
{
void *data;
@@ -570,12 +566,13 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
return data;
}
-static int
+static __rte_always_inline int
copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
- struct vring_desc **cur_desc, uint32_t size,
- uint32_t *nb_descs, uint32_t vq_size)
+ struct vhost_crypto_desc *head,
+ struct vhost_crypto_desc **cur_desc,
+ uint32_t size, uint32_t max_n_descs)
{
- struct vring_desc *desc = *cur_desc;
+ struct vhost_crypto_desc *desc = *cur_desc;
uint64_t remain, addr, dlen, len;
uint32_t to_copy;
uint8_t *data = dst_data;
@@ -614,15 +611,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
left -= to_copy;
- while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
- if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
- VC_LOG_ERR("Invalid descriptors");
- return -1;
- }
- (*nb_descs)--;
-
- desc = &vc_req->head[desc->next];
- rte_prefetch0(&vc_req->head[desc->next]);
+ while (desc >= head && desc - head < (int)max_n_descs && left) {
+ desc++;
to_copy = RTE_MIN(desc->len, (uint32_t)left);
dlen = to_copy;
src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
@@ -663,13 +653,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
return -1;
}
- if (unlikely(*nb_descs == 0))
+ if (unlikely(desc - head == (int)max_n_descs))
*cur_desc = NULL;
- else {
- if (unlikely(desc->next >= vq_size))
- return -1;
- *cur_desc = &vc_req->head[desc->next];
- }
+ else
+ *cur_desc = desc + 1;
return 0;
}
@@ -681,6 +668,7 @@ write_back_data(struct vhost_crypto_data_req *vc_req)
while (wb_data) {
rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
+ memset(wb_data->src, 0, wb_data->len);
wb_last = wb_data;
wb_data = wb_data->next;
rte_mempool_put(vc_req->wb_pool, wb_last);
@@ -722,17 +710,18 @@ free_wb_data(struct vhost_crypto_writeback_data *wb_data,
* @return
* The pointer to the start of the write back data linked list.
*/
-static struct vhost_crypto_writeback_data *
+static __rte_always_inline struct vhost_crypto_writeback_data *
prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
- struct vring_desc **cur_desc,
+ struct vhost_crypto_desc *head_desc,
+ struct vhost_crypto_desc **cur_desc,
struct vhost_crypto_writeback_data **end_wb_data,
uint8_t *src,
uint32_t offset,
uint64_t write_back_len,
- uint32_t *nb_descs, uint32_t vq_size)
+ uint32_t max_n_descs)
{
struct vhost_crypto_writeback_data *wb_data, *head;
- struct vring_desc *desc = *cur_desc;
+ struct vhost_crypto_desc *desc = *cur_desc;
uint64_t dlen;
uint8_t *dst;
int ret;
@@ -775,14 +764,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
} else
offset -= desc->len;
- while (write_back_len) {
- if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
- VC_LOG_ERR("Invalid descriptors");
- goto error_exit;
- }
- (*nb_descs)--;
-
- desc = &vc_req->head[desc->next];
+ while (write_back_len &&
+ desc >= head_desc &&
+ desc - head_desc < (int)max_n_descs) {
+ desc++;
if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
VC_LOG_ERR("incorrect descriptor");
goto error_exit;
@@ -821,13 +806,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
wb_data->next = NULL;
}
- if (unlikely(*nb_descs == 0))
+ if (unlikely(desc - head_desc == (int)max_n_descs))
*cur_desc = NULL;
- else {
- if (unlikely(desc->next >= vq_size))
- goto error_exit;
- *cur_desc = &vc_req->head[desc->next];
- }
+ else
+ *cur_desc = desc + 1;
*end_wb_data = wb_data;
@@ -851,14 +833,14 @@ vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req)
return VIRTIO_CRYPTO_BADMSG;
}
-static uint8_t
+static __rte_always_inline uint8_t
prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_data_req *vc_req,
struct virtio_crypto_cipher_data_req *cipher,
- struct vring_desc *cur_desc,
- uint32_t *nb_descs, uint32_t vq_size)
+ struct vhost_crypto_desc *head,
+ uint32_t max_n_descs)
{
- struct vring_desc *desc = cur_desc;
+ struct vhost_crypto_desc *desc = head;
struct vhost_crypto_writeback_data *ewb = NULL;
struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
@@ -869,8 +851,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
/* prepare */
/* iv */
- if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
- nb_descs, vq_size) < 0)) {
+ if (unlikely(copy_data(iv_data, vc_req, head, &desc,
+ cipher->para.iv_len, max_n_descs))) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -888,9 +870,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
goto error_exit;
}
- if (unlikely(move_desc(vc_req->head, &desc,
- cipher->para.src_data_len, nb_descs,
- vq_size) < 0)) {
+ if (unlikely(move_desc(head, &desc, cipher->para.src_data_len,
+ max_n_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -901,8 +882,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
vc_req->wb_pool = vcrypto->wb_pool;
m_src->data_len = cipher->para.src_data_len;
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
- vc_req, &desc, cipher->para.src_data_len,
- nb_descs, vq_size) < 0)) {
+ vc_req, head, &desc, cipher->para.src_data_len,
+ max_n_descs) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -913,7 +894,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
/* dst */
- desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
+ desc = find_write_desc(head, desc, max_n_descs);
if (unlikely(!desc)) {
VC_LOG_ERR("Cannot find write location");
ret = VIRTIO_CRYPTO_BADMSG;
@@ -931,9 +912,8 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
goto error_exit;
}
- if (unlikely(move_desc(vc_req->head, &desc,
- cipher->para.dst_data_len,
- nb_descs, vq_size) < 0)) {
+ if (unlikely(move_desc(head, &desc, cipher->para.dst_data_len,
+ max_n_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -942,9 +922,9 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
m_dst->data_len = cipher->para.dst_data_len;
break;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
- vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
+ vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
rte_pktmbuf_mtod(m_src, uint8_t *), 0,
- cipher->para.dst_data_len, nb_descs, vq_size);
+ cipher->para.dst_data_len, max_n_descs);
if (unlikely(vc_req->wb == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -986,33 +966,33 @@ static __rte_always_inline uint8_t
vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req)
{
if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
- (req->para.src_data_len <= RTE_MBUF_DEFAULT_DATAROOM) &&
+ (req->para.src_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
(req->para.dst_data_len >= req->para.src_data_len) &&
- (req->para.dst_data_len <= RTE_MBUF_DEFAULT_DATAROOM) &&
+ (req->para.dst_data_len <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
(req->para.cipher_start_src_offset <
- RTE_MBUF_DEFAULT_DATAROOM) &&
- (req->para.len_to_cipher < RTE_MBUF_DEFAULT_DATAROOM) &&
+ VHOST_CRYPTO_MAX_DATA_SIZE) &&
+ (req->para.len_to_cipher <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
(req->para.hash_start_src_offset <
- RTE_MBUF_DEFAULT_DATAROOM) &&
- (req->para.len_to_hash < RTE_MBUF_DEFAULT_DATAROOM) &&
+ VHOST_CRYPTO_MAX_DATA_SIZE) &&
+ (req->para.len_to_hash <= VHOST_CRYPTO_MAX_DATA_SIZE) &&
(req->para.cipher_start_src_offset + req->para.len_to_cipher <=
req->para.src_data_len) &&
(req->para.hash_start_src_offset + req->para.len_to_hash <=
req->para.src_data_len) &&
(req->para.dst_data_len + req->para.hash_result_len <=
- RTE_MBUF_DEFAULT_DATAROOM)))
+ VHOST_CRYPTO_MAX_DATA_SIZE)))
return VIRTIO_CRYPTO_OK;
return VIRTIO_CRYPTO_BADMSG;
}
-static uint8_t
+static __rte_always_inline uint8_t
prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
struct vhost_crypto_data_req *vc_req,
struct virtio_crypto_alg_chain_data_req *chain,
- struct vring_desc *cur_desc,
- uint32_t *nb_descs, uint32_t vq_size)
+ struct vhost_crypto_desc *head,
+ uint32_t max_n_descs)
{
- struct vring_desc *desc = cur_desc, *digest_desc;
+ struct vhost_crypto_desc *desc = head, *digest_desc;
struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
@@ -1025,8 +1005,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
/* prepare */
/* iv */
- if (unlikely(copy_data(iv_data, vc_req, &desc,
- chain->para.iv_len, nb_descs, vq_size) < 0)) {
+ if (unlikely(copy_data(iv_data, vc_req, head, &desc,
+ chain->para.iv_len, max_n_descs) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -1045,9 +1025,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
goto error_exit;
}
- if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.src_data_len,
- nb_descs, vq_size) < 0)) {
+ if (unlikely(move_desc(head, &desc, chain->para.src_data_len,
+ max_n_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -1057,8 +1036,8 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
vc_req->wb_pool = vcrypto->wb_pool;
m_src->data_len = chain->para.src_data_len;
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
- vc_req, &desc, chain->para.src_data_len,
- nb_descs, vq_size) < 0)) {
+ vc_req, head, &desc, chain->para.src_data_len,
+ max_n_descs) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -1070,7 +1049,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
/* dst */
- desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
+ desc = find_write_desc(head, desc, max_n_descs);
if (unlikely(!desc)) {
VC_LOG_ERR("Cannot find write location");
ret = VIRTIO_CRYPTO_BADMSG;
@@ -1089,8 +1068,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
}
if (unlikely(move_desc(vc_req->head, &desc,
- chain->para.dst_data_len,
- nb_descs, vq_size) < 0)) {
+ chain->para.dst_data_len, max_n_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -1106,9 +1084,9 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
goto error_exit;
}
- if (unlikely(move_desc(vc_req->head, &desc,
+ if (unlikely(move_desc(head, &desc,
chain->para.hash_result_len,
- nb_descs, vq_size) < 0)) {
+ max_n_descs) < 0)) {
VC_LOG_ERR("Incorrect descriptor");
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
@@ -1116,34 +1094,34 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
break;
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
- vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
+ vc_req->wb = prepare_write_back_data(vc_req, head, &desc, &ewb,
rte_pktmbuf_mtod(m_src, uint8_t *),
chain->para.cipher_start_src_offset,
chain->para.dst_data_len -
- chain->para.cipher_start_src_offset,
- nb_descs, vq_size);
+ chain->para.cipher_start_src_offset,
+ max_n_descs);
if (unlikely(vc_req->wb == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
+ digest_desc = desc;
digest_offset = m_src->data_len;
digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
digest_offset);
- digest_desc = desc;
/** create a wb_data for digest */
- ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
- digest_addr, 0, chain->para.hash_result_len,
- nb_descs, vq_size);
+ ewb->next = prepare_write_back_data(vc_req, head, &desc,
+ &ewb2, digest_addr, 0,
+ chain->para.hash_result_len, max_n_descs);
if (unlikely(ewb->next == NULL)) {
ret = VIRTIO_CRYPTO_ERR;
goto error_exit;
}
- if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
+ if (unlikely(copy_data(digest_addr, vc_req, head, &digest_desc,
chain->para.hash_result_len,
- nb_descs, vq_size) < 0)) {
+ max_n_descs) < 0)) {
ret = VIRTIO_CRYPTO_BADMSG;
goto error_exit;
}
@@ -1193,74 +1171,103 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
static __rte_always_inline int
vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
struct vhost_virtqueue *vq, struct rte_crypto_op *op,
- struct vring_desc *head, uint16_t desc_idx)
+ struct vring_desc *head, struct vhost_crypto_desc *descs,
+ uint16_t desc_idx)
{
struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
struct rte_cryptodev_sym_session *session;
- struct virtio_crypto_op_data_req *req, tmp_req;
+ struct virtio_crypto_op_data_req req;
struct virtio_crypto_inhdr *inhdr;
- struct vring_desc *desc = NULL;
+ struct vhost_crypto_desc *desc = descs;
+ struct vring_desc *src_desc;
uint64_t session_id;
uint64_t dlen;
- uint32_t nb_descs = vq->size;
- int err = 0;
+ uint32_t nb_descs = 0, max_n_descs, i;
+ int err;
vc_req->desc_idx = desc_idx;
vc_req->dev = vcrypto->dev;
vc_req->vq = vq;
- if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
- dlen = head->len;
- nb_descs = dlen / sizeof(struct vring_desc);
- /* drop invalid descriptors */
- if (unlikely(nb_descs > vq->size))
- return -1;
- desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
- &dlen, VHOST_ACCESS_RO);
- if (unlikely(!desc || dlen != head->len))
- return -1;
- desc_idx = 0;
- head = desc;
- } else {
- desc = head;
+ if (unlikely((head->flags & VRING_DESC_F_INDIRECT) == 0)) {
+ VC_LOG_ERR("Invalid descriptor");
+ return -1;
}
- vc_req->head = head;
- vc_req->zero_copy = vcrypto->option;
+ dlen = head->len;
+ src_desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
+ &dlen, VHOST_ACCESS_RO);
+ if (unlikely(!src_desc || dlen != head->len)) {
+ VC_LOG_ERR("Invalid descriptor");
+ return -1;
+ }
+ head = src_desc;
- req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
- if (unlikely(req == NULL)) {
- switch (vcrypto->option) {
- case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
- err = VIRTIO_CRYPTO_BADMSG;
- VC_LOG_ERR("Invalid descriptor");
- goto error_exit;
- case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
- req = &tmp_req;
- if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
- &nb_descs, vq->size) < 0)) {
- err = VIRTIO_CRYPTO_BADMSG;
- VC_LOG_ERR("Invalid descriptor");
- goto error_exit;
+ nb_descs = max_n_descs = dlen / sizeof(struct vring_desc);
+ if (unlikely(nb_descs > VHOST_CRYPTO_MAX_N_DESC || nb_descs == 0)) {
+ err = VIRTIO_CRYPTO_ERR;
+ VC_LOG_ERR("Cannot process num of descriptors %u", nb_descs);
+ if (nb_descs > 0) {
+ struct vring_desc *inhdr_desc = head;
+ while (inhdr_desc->flags & VRING_DESC_F_NEXT) {
+ if (inhdr_desc->next >= max_n_descs)
+ return -1;
+ inhdr_desc = &head[inhdr_desc->next];
}
- break;
- default:
- err = VIRTIO_CRYPTO_ERR;
- VC_LOG_ERR("Invalid option");
- goto error_exit;
+ if (inhdr_desc->len != sizeof(*inhdr))
+ return -1;
+ inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *,
+ vc_req, inhdr_desc->addr, &dlen,
+ VHOST_ACCESS_WO);
+ if (unlikely(!inhdr || dlen != inhdr_desc->len))
+ return -1;
+ inhdr->status = VIRTIO_CRYPTO_ERR;
+ return -1;
}
- } else {
- if (unlikely(move_desc(vc_req->head, &desc,
- sizeof(*req), &nb_descs, vq->size) < 0)) {
- VC_LOG_ERR("Incorrect descriptor");
+ }
+
+ /* copy descriptors to local variable */
+ for (i = 0; i < max_n_descs; i++) {
+ desc->addr = src_desc->addr;
+ desc->len = src_desc->len;
+ desc->flags = src_desc->flags;
+ desc++;
+ if (unlikely((src_desc->flags & VRING_DESC_F_NEXT) == 0))
+ break;
+ if (unlikely(src_desc->next >= max_n_descs)) {
+ err = VIRTIO_CRYPTO_BADMSG;
+ VC_LOG_ERR("Invalid descriptor");
goto error_exit;
}
+ src_desc = &head[src_desc->next];
+ }
+
+ vc_req->head = head;
+ vc_req->zero_copy = vcrypto->option;
+
+ nb_descs = desc - descs;
+ desc = descs;
+
+ if (unlikely(desc->len < sizeof(req))) {
+ err = VIRTIO_CRYPTO_BADMSG;
+ VC_LOG_ERR("Invalid descriptor");
+ goto error_exit;
}
- switch (req->header.opcode) {
+ if (unlikely(copy_data(&req, vc_req, descs, &desc, sizeof(req),
+ max_n_descs) < 0)) {
+ err = VIRTIO_CRYPTO_BADMSG;
+ VC_LOG_ERR("Invalid descriptor");
+ goto error_exit;
+ }
+
+ /* desc is advanced by 1 now */
+ max_n_descs -= 1;
+
+ switch (req.header.opcode) {
case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
case VIRTIO_CRYPTO_CIPHER_DECRYPT:
- session_id = req->header.session_id;
+ session_id = req.header.session_id;
/* one branch to avoid unnecessary table lookup */
if (vcrypto->cache_session_id != session_id) {
@@ -1286,19 +1293,19 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
goto error_exit;
}
- switch (req->u.sym_req.op_type) {
+ switch (req.u.sym_req.op_type) {
case VIRTIO_CRYPTO_SYM_OP_NONE:
err = VIRTIO_CRYPTO_NOTSUPP;
break;
case VIRTIO_CRYPTO_SYM_OP_CIPHER:
err = prepare_sym_cipher_op(vcrypto, op, vc_req,
- &req->u.sym_req.u.cipher, desc,
- &nb_descs, vq->size);
+ &req.u.sym_req.u.cipher, desc,
+ max_n_descs);
break;
case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
err = prepare_sym_chain_op(vcrypto, op, vc_req,
- &req->u.sym_req.u.chain, desc,
- &nb_descs, vq->size);
+ &req.u.sym_req.u.chain, desc,
+ max_n_descs);
break;
}
if (unlikely(err != 0)) {
@@ -1307,8 +1314,9 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
}
break;
default:
+ err = VIRTIO_CRYPTO_ERR;
VC_LOG_ERR("Unsupported symmetric crypto request type %u",
- req->header.opcode);
+ req.header.opcode);
goto error_exit;
}
@@ -1316,7 +1324,7 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
error_exit:
- inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
+ inhdr = reach_inhdr(vc_req, descs, max_n_descs);
if (likely(inhdr != NULL))
inhdr->status = (uint8_t)err;
@@ -1330,17 +1338,16 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
struct rte_mbuf *m_src = op->sym->m_src;
struct rte_mbuf *m_dst = op->sym->m_dst;
struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
- uint16_t desc_idx;
+ struct vhost_virtqueue *vq = vc_req->vq;
+ uint16_t used_idx = vc_req->desc_idx, desc_idx;
if (unlikely(!vc_req)) {
VC_LOG_ERR("Failed to retrieve vc_req");
return NULL;
}
- if (old_vq && (vc_req->vq != old_vq))
- return vc_req->vq;
-
- desc_idx = vc_req->desc_idx;
+ if (old_vq && (vq != old_vq))
+ return vq;
if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
@@ -1349,8 +1356,9 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
write_back_data(vc_req);
}
- vc_req->vq->used->ring[desc_idx].id = desc_idx;
- vc_req->vq->used->ring[desc_idx].len = vc_req->len;
+ desc_idx = vq->avail->ring[used_idx];
+ vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx];
+ vq->used->ring[desc_idx].len = vc_req->len;
rte_mempool_put(m_src->pool, (void *)m_src);
@@ -1448,7 +1456,7 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
sizeof(struct vhost_crypto_data_req),
- RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
+ VHOST_CRYPTO_MAX_DATA_SIZE + RTE_PKTMBUF_HEADROOM,
rte_socket_id());
if (!vcrypto->mbuf_pool) {
VC_LOG_ERR("Failed to creath mbuf pool");
@@ -1574,6 +1582,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
+ struct vhost_crypto_desc descs[VHOST_CRYPTO_MAX_N_DESC];
struct virtio_net *dev = get_device(vid);
struct vhost_crypto *vcrypto;
struct vhost_virtqueue *vq;
@@ -1632,7 +1641,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
op->sym->m_dst->data_off = 0;
if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
- op, head, desc_idx) < 0))
+ op, head, descs, used_idx) < 0))
break;
}
@@ -1661,7 +1670,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
op->sym->m_src->data_off = 0;
if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
- op, head, desc_idx) < 0))
+ op, head, descs, desc_idx) < 0))
break;
}
--
2.26.2

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:657070a669364efef66b8b8bbe862f2611b367b6126969c43acdc06f2a172dbd
size 12433520

3
dpdk-19.11.8.tar.xz Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4ec0f2fb563a533cabd3e6ec4110a412c63eb7da1aa1ccdb802ab65d166f4efa
size 12449904

View File

@ -1,3 +1,28 @@
-------------------------------------------------------------------
Fri May 14 11:58:06 UTC 2021 - Ferdinand Thiessen <rpm@fthiessen.de>
- Update LTS version to maintenance release 19.11.8
* vhost/crypto: fix data length check (CVE-2020-14374)
* vhost/crypto: fix incorrect descriptor deduction (CVE-2020-14378)
* vhost/crypto: fix incorrect write back source
* vhost/crypto: fix missed request check for copy mode
(CVE-2020-14376 CVE-2020-14377)
* vhost/crypto: fix pool allocation
* vhost/crypto: fix possible TOCTOU attack (CVE-2020-14375)
* Changes from 19.11.6:
https://doc.dpdk.org/guides-19.11/rel_notes/release_19_11.html#id14
* Changes from 19.11.7:
https://doc.dpdk.org/guides-19.11/rel_notes/release_19_11.html#id18
- Drop security patches, fixed with 19.11.5
* 0001-vhost-crypto-fix-pool-allocation.patch
* 0002-vhost-crypto-fix-incorrect-descriptor-deduction.patch
* 0003-vhost-crypto-fix-missed-request-check-for-copy-mode.patch
* 0004-vhost-crypto-fix-incorrect-write-back-source.patch
* 0005-vhost-crypto-fix-data-length-check.patch
* 0006-vhost-crypto-fix-possible-TOCTOU-attack.patch
- Drop 0001-kni-fix-build-with-Linux-5.9.patch, fixed with 19.11.6
- Updated 0001-SLE15-SP3-compatibility-patch-for-kni.patch
-------------------------------------------------------------------
Wed Apr 21 10:36:13 UTC 2021 - Matthias Gerstner <matthias.gerstner@suse.com>

View File

@ -53,7 +53,7 @@
# Add option to build without tools
%bcond_without tools
Name: dpdk%{name_tag}
Version: 19.11.4
Version: 19.11.8
Release: 0
Summary: Set of libraries and drivers for fast packet processing
License: BSD-3-Clause AND GPL-2.0-only AND LGPL-2.1-only
@ -63,13 +63,6 @@ Source: http://fast.dpdk.org/rel/dpdk-%{version}.tar.xz
Source1: preamble
Patch1: 0001-fix-cpu-compatibility.patch
Patch2: 0001-SLE15-SP3-compatibility-patch-for-kni.patch
Patch3: 0001-vhost-crypto-fix-pool-allocation.patch
Patch4: 0002-vhost-crypto-fix-incorrect-descriptor-deduction.patch
Patch5: 0003-vhost-crypto-fix-missed-request-check-for-copy-mode.patch
Patch6: 0004-vhost-crypto-fix-incorrect-write-back-source.patch
Patch7: 0005-vhost-crypto-fix-data-length-check.patch
Patch8: 0006-vhost-crypto-fix-possible-TOCTOU-attack.patch
Patch9: 0001-kni-fix-build-with-Linux-5.9.patch
BuildRequires: doxygen
BuildRequires: fdupes
BuildRequires: libelf-devel
@ -168,13 +161,6 @@ The DPDK Kernel NIC Interface (KNI) allows userspace applications access to the
%setup -q -n dpdk-stable-%{version}
%patch1 -p1 -z .init
%patch2 -p1 -z .init
%patch3 -p1 -z .init
%patch4 -p1 -z .init
%patch5 -p1 -z .init
%patch6 -p1 -z .init
%patch7 -p1 -z .init
%patch8 -p1 -z .init
%patch9 -p1 -z .init
# This fixes CROSS compilation (broken) in the mk file for ThunderX
sed -i '/^CROSS /s/^/#/' mk/machine/thunderx/rte.vars.mk
@ -351,6 +337,9 @@ mv %{buildroot}%{_datadir}/doc/dpdk %{buildroot}%{_docdir}/
ln -s %{_bindir}/dpdk-procinfo %{buildroot}%{_bindir}/dpdk_proc_info
ln -s %{_sbindir}/dpdk-devbind %{buildroot}%{_sbindir}/dpdk_nic_bind
# Fix interpreter
find %{buildroot} -name "*.py" -exec sed -i 's|env python|python|' \{\} +
# Remove duplicates
%fdupes %{buildroot}/%{_prefix}