56a2f731df
- Add patches to fix vulnerability where malicious guest can harm the host using vhost crypto, this includes executing code in host (VM Escape), reading host application memory space to guest and causing partially denial of service in the host (bsc#1176590). * 0001-vhost-crypto-fix-pool-allocation.patch * 0002-vhost-crypto-fix-incorrect-descriptor-deduction.patch * 0003-vhost-crypto-fix-missed-request-check-for-copy-mode.patch * 0004-vhost-crypto-fix-incorrect-write-back-source.patch * 0005-vhost-crypto-fix-data-length-check.patch * 0006-vhost-crypto-fix-possible-TOCTOU-attack.patch OBS-URL: https://build.opensuse.org/request/show/838479 OBS-URL: https://build.opensuse.org/package/show/network/dpdk?expand=0&rev=121
162 lines
5.7 KiB
Diff
162 lines
5.7 KiB
Diff
From 50d3b2ef804fed4c46515dc67ec51d4b08c4165b Mon Sep 17 00:00:00 2001
|
|
From: Fan Zhang <roy.fan.zhang@intel.com>
|
|
Date: Tue, 14 Apr 2020 17:26:48 +0100
|
|
Subject: [PATCH 3/6] vhost/crypto: fix missed request check for copy mode
|
|
|
|
This patch fixes the missed request check to vhost crypto
|
|
copy mode.
|
|
|
|
CVE-2020-14376
|
|
CVE-2020-14377
|
|
Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
|
|
Cc: stable@dpdk.org
|
|
|
|
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
|
|
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
|
|
---
|
|
lib/librte_vhost/vhost_crypto.c | 68 +++++++++++++++++++++++----------
|
|
1 file changed, 47 insertions(+), 21 deletions(-)
|
|
|
|
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
|
|
index 86747dd5f..494f49084 100644
|
|
--- a/lib/librte_vhost/vhost_crypto.c
|
|
+++ b/lib/librte_vhost/vhost_crypto.c
|
|
@@ -756,7 +756,7 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
|
|
}
|
|
|
|
wb_data->dst = dst;
|
|
- wb_data->len = desc->len - offset;
|
|
+ wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
|
|
write_back_len -= wb_data->len;
|
|
src += offset + wb_data->len;
|
|
offset = 0;
|
|
@@ -840,6 +840,17 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
|
|
return NULL;
|
|
}
|
|
|
|
+static __rte_always_inline uint8_t
|
|
+vhost_crypto_check_cipher_request(struct virtio_crypto_cipher_data_req *req)
|
|
+{
|
|
+ if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
|
|
+ (req->para.src_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE) &&
|
|
+ (req->para.dst_data_len >= req->para.src_data_len) &&
|
|
+ (req->para.dst_data_len <= RTE_MBUF_DEFAULT_BUF_SIZE)))
|
|
+ return VIRTIO_CRYPTO_OK;
|
|
+ return VIRTIO_CRYPTO_BADMSG;
|
|
+}
|
|
+
|
|
static uint8_t
|
|
prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
struct vhost_crypto_data_req *vc_req,
|
|
@@ -851,7 +862,10 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
struct vhost_crypto_writeback_data *ewb = NULL;
|
|
struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
|
|
uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
|
|
- uint8_t ret = 0;
|
|
+ uint8_t ret = vhost_crypto_check_cipher_request(cipher);
|
|
+
|
|
+ if (unlikely(ret != VIRTIO_CRYPTO_OK))
|
|
+ goto error_exit;
|
|
|
|
/* prepare */
|
|
/* iv */
|
|
@@ -861,10 +875,9 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
goto error_exit;
|
|
}
|
|
|
|
- m_src->data_len = cipher->para.src_data_len;
|
|
-
|
|
switch (vcrypto->option) {
|
|
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
|
|
+ m_src->data_len = cipher->para.src_data_len;
|
|
m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
|
|
cipher->para.src_data_len);
|
|
m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
|
|
@@ -886,13 +899,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
break;
|
|
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
|
|
vc_req->wb_pool = vcrypto->wb_pool;
|
|
-
|
|
- if (unlikely(cipher->para.src_data_len >
|
|
- RTE_MBUF_DEFAULT_BUF_SIZE)) {
|
|
- VC_LOG_ERR("Not enough space to do data copy");
|
|
- ret = VIRTIO_CRYPTO_ERR;
|
|
- goto error_exit;
|
|
- }
|
|
+ m_src->data_len = cipher->para.src_data_len;
|
|
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
|
|
vc_req, &desc, cipher->para.src_data_len,
|
|
nb_descs, vq_size) < 0)) {
|
|
@@ -975,6 +982,29 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
return ret;
|
|
}
|
|
|
|
+static __rte_always_inline uint8_t
|
|
+vhost_crypto_check_chain_request(struct virtio_crypto_alg_chain_data_req *req)
|
|
+{
|
|
+ if (likely((req->para.iv_len <= VHOST_CRYPTO_MAX_IV_LEN) &&
|
|
+ (req->para.src_data_len <= RTE_MBUF_DEFAULT_DATAROOM) &&
|
|
+ (req->para.dst_data_len >= req->para.src_data_len) &&
|
|
+ (req->para.dst_data_len <= RTE_MBUF_DEFAULT_DATAROOM) &&
|
|
+ (req->para.cipher_start_src_offset <
|
|
+ RTE_MBUF_DEFAULT_DATAROOM) &&
|
|
+ (req->para.len_to_cipher < RTE_MBUF_DEFAULT_DATAROOM) &&
|
|
+ (req->para.hash_start_src_offset <
|
|
+ RTE_MBUF_DEFAULT_DATAROOM) &&
|
|
+ (req->para.len_to_hash < RTE_MBUF_DEFAULT_DATAROOM) &&
|
|
+ (req->para.cipher_start_src_offset + req->para.len_to_cipher <=
|
|
+ req->para.src_data_len) &&
|
|
+ (req->para.hash_start_src_offset + req->para.len_to_hash <=
|
|
+ req->para.src_data_len) &&
|
|
+ (req->para.dst_data_len + req->para.hash_result_len <=
|
|
+ RTE_MBUF_DEFAULT_DATAROOM)))
|
|
+ return VIRTIO_CRYPTO_OK;
|
|
+ return VIRTIO_CRYPTO_BADMSG;
|
|
+}
|
|
+
|
|
static uint8_t
|
|
prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
struct vhost_crypto_data_req *vc_req,
|
|
@@ -988,7 +1018,10 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
|
|
uint32_t digest_offset;
|
|
void *digest_addr;
|
|
- uint8_t ret = 0;
|
|
+ uint8_t ret = vhost_crypto_check_chain_request(chain);
|
|
+
|
|
+ if (unlikely(ret != VIRTIO_CRYPTO_OK))
|
|
+ goto error_exit;
|
|
|
|
/* prepare */
|
|
/* iv */
|
|
@@ -998,10 +1031,9 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
goto error_exit;
|
|
}
|
|
|
|
- m_src->data_len = chain->para.src_data_len;
|
|
-
|
|
switch (vcrypto->option) {
|
|
case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
|
|
+ m_src->data_len = chain->para.src_data_len;
|
|
m_dst->data_len = chain->para.dst_data_len;
|
|
|
|
m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
|
|
@@ -1023,13 +1055,7 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
|
|
break;
|
|
case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
|
|
vc_req->wb_pool = vcrypto->wb_pool;
|
|
-
|
|
- if (unlikely(chain->para.src_data_len >
|
|
- RTE_MBUF_DEFAULT_BUF_SIZE)) {
|
|
- VC_LOG_ERR("Not enough space to do data copy");
|
|
- ret = VIRTIO_CRYPTO_ERR;
|
|
- goto error_exit;
|
|
- }
|
|
+ m_src->data_len = chain->para.src_data_len;
|
|
if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
|
|
vc_req, &desc, chain->para.src_data_len,
|
|
nb_descs, vq_size) < 0)) {
|
|
--
|
|
2.26.2
|
|
|