xen/21151-trace-bounds-check.patch

206 lines
6.2 KiB
Diff

# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1271091288 -3600
# Node ID 94cae4dfa25bcf9aaeb93fb374926cb40411ebdf
# Parent 78488a63bbc200095413824cc146134b54635da9
xentrace: Bounds checking and error handling
Check tbuf_size to make sure that it will fit on the t_info struct
allocated at boot. Also deal with allocation failures more
gracefully.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1278093165 -3600
# Node ID 2f3a68a0b55b1b7df4d6632dfc151040ba08e9ea
# Parent 2846fd19945cb2ab32d1513531c3500278133484
trace: Fix T_INFO_FIRST_OFFSET calculation
This wasn't defined correctly, thus allowing in the
num_online_cpus() == NR_CPUS case to pass a corrupted MFN to
Dom0.
Reported-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
# HG changeset patch
# User Keir Fraser <keir.fraser@citrix.com>
# Date 1278093190 -3600
# Node ID 1390e2ab45c7b63d79ba9496d609cf59af4b44ee
# Parent 2f3a68a0b55b1b7df4d6632dfc151040ba08e9ea
trace: improve check_tbuf_size()
It didn't consider the case of the incoming size not allowing for the
2*data_size range for t_buf->{prod,cons}
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -48,10 +48,12 @@ integer_param("tbuf_size", opt_tbuf_size
/* Pointers to the meta-data objects for all system trace buffers */
static struct t_info *t_info;
#define T_INFO_PAGES 2 /* Size fixed at 2 pages for now. */
+#define T_INFO_SIZE ((T_INFO_PAGES)*(PAGE_SIZE))
static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
static DEFINE_PER_CPU_READ_MOSTLY(spinlock_t, t_lock);
static int data_size;
+static u32 t_info_first_offset __read_mostly;
/* High water mark for trace buffers; */
/* Send virtual interrupt when buffer level reaches this point */
@@ -71,6 +73,39 @@ static cpumask_t tb_cpu_mask = CPU_MASK_
/* which tracing events are enabled */
static u32 tb_event_mask = TRC_ALL;
+/* Return the number of elements _type necessary to store at least _x bytes of data
+ * i.e., sizeof(_type) * ans >= _x. */
+#define fit_to_type(_type, _x) (((_x)+sizeof(_type)-1) / sizeof(_type))
+
+static void calc_tinfo_first_offset(void)
+{
+ int offset_in_bytes;
+
+ offset_in_bytes = offsetof(struct t_info, mfn_offset[NR_CPUS]);
+
+ t_info_first_offset = fit_to_type(uint32_t, offset_in_bytes);
+
+ gdprintk(XENLOG_INFO, "%s: NR_CPUs %d, offset_in_bytes %d, t_info_first_offset %u\n",
+ __func__, NR_CPUS, offset_in_bytes, (unsigned)t_info_first_offset);
+}
+
+/**
+ * check_tbuf_size - check to make sure that the proposed size will fit
+ * in the currently sized struct t_info and allows prod and cons to
+ * reach double the value without overflow.
+ */
+static int check_tbuf_size(u32 pages)
+{
+ struct t_buf dummy;
+ typeof(dummy.prod) size;
+
+ size = ((typeof(dummy.prod))pages) * PAGE_SIZE;
+
+ return (size / PAGE_SIZE != pages)
+ || (size + size < size)
+ || (num_online_cpus() * pages + t_info_first_offset > T_INFO_SIZE / sizeof(uint32_t));
+}
+
/**
* alloc_trace_bufs - performs initialization of the per-cpu trace buffers.
*
@@ -87,7 +122,9 @@ static int alloc_trace_bufs(void)
unsigned long nr_pages;
/* Start after a fixed-size array of NR_CPUS */
uint32_t *t_info_mfn_list = (uint32_t *)t_info;
- int offset = (NR_CPUS * 2 + 1 + 1) / 4;
+ int offset = t_info_first_offset;
+
+ BUG_ON(check_tbuf_size(opt_tbuf_size));
if ( opt_tbuf_size == 0 )
return -EINVAL;
@@ -180,7 +217,8 @@ out_dealloc:
}
spin_unlock_irqrestore(&per_cpu(t_lock, cpu), flags);
}
- return -EINVAL;
+
+ return -ENOMEM;
}
@@ -197,19 +235,35 @@ static int tb_set_size(int size)
* boot time or via control tools, but not by both. Once buffers
* are created they cannot be destroyed.
*/
- if ( (opt_tbuf_size != 0) || (size <= 0) )
+ int ret = 0;
+
+
+
+ if ( (opt_tbuf_size != 0) )
{
- gdprintk(XENLOG_INFO, "tb_set_size from %d to %d not implemented\n",
- opt_tbuf_size, size);
+ if ( size != opt_tbuf_size )
+ gdprintk(XENLOG_INFO, "tb_set_size from %d to %d not implemented\n",
+ opt_tbuf_size, size);
return -EINVAL;
}
- opt_tbuf_size = size;
- if ( alloc_trace_bufs() != 0 )
+ if ( size <= 0 )
return -EINVAL;
- printk("Xen trace buffers: initialized\n");
- return 0;
+ if ( check_tbuf_size(size) )
+ {
+ gdprintk(XENLOG_INFO, "tb size %d too large\n", size);
+ return -EINVAL;
+ }
+
+ opt_tbuf_size = size;
+
+ if ( (ret = alloc_trace_bufs()) == 0 )
+ printk("Xen trace buffers: initialized\n");
+ else
+ opt_tbuf_size = 0;
+
+ return ret;
}
int trace_will_trace_event(u32 event)
@@ -248,6 +302,10 @@ int trace_will_trace_event(u32 event)
void __init init_trace_bufs(void)
{
int i;
+
+ /* Calculate offset in u32 of first mfn */
+ calc_tinfo_first_offset();
+
/* t_info size fixed at 2 pages for now. That should be big enough / small enough
* until it's worth making it dynamic. */
t_info = alloc_xenheap_pages(1, 0);
@@ -265,13 +323,18 @@ void __init init_trace_bufs(void)
share_xen_page_with_privileged_guests(
virt_to_page(t_info) + i, XENSHARE_writable);
-
-
if ( opt_tbuf_size == 0 )
{
printk("Xen trace buffers: disabled\n");
return;
}
+ else if ( check_tbuf_size(opt_tbuf_size) )
+ {
+ gdprintk(XENLOG_INFO, "Xen trace buffers: "
+ "tb size %d too large, disabling\n",
+ opt_tbuf_size);
+ opt_tbuf_size = 0;
+ }
if ( alloc_trace_bufs() == 0 )
{
@@ -279,6 +342,13 @@ void __init init_trace_bufs(void)
wmb(); /* above must be visible before tb_init_done flag set */
tb_init_done = 1;
}
+ else
+ {
+ gdprintk(XENLOG_INFO, "Xen trace buffers: "
+ "allocation size %d failed, disabling\n",
+ opt_tbuf_size);
+ opt_tbuf_size = 0;
+ }
}
/**