Compare commits
10 Commits
v0.11.0-rc
...
v0.11.0
Author | SHA1 | Date | |
---|---|---|---|
|
1bec86a0bc | ||
|
621cc70bfb | ||
|
ebeab049b0 | ||
|
bbaadd3ef1 | ||
|
6fd6e9f0bd | ||
|
868136d4e3 | ||
|
67867f3837 | ||
|
6fe270e6e4 | ||
|
bdebf0094c | ||
|
5309423828 |
11
Changelog
11
Changelog
@@ -1,3 +1,14 @@
|
||||
version 0.11.0
|
||||
- fix rtc polling mode (Bernhard Kauer)
|
||||
- qcow2: order concurrent aio requests (Kevin Wolf)
|
||||
- qemu-io: port to win32 (Stefan Weil)
|
||||
- alpha: fix extlh instruction (Vince Weaver)
|
||||
- tcg: fix size of local variables in tcg_gen_bswap64_i64 (Stefan Weil)
|
||||
- net: fix send ordering (Jan Kiszka)
|
||||
- escc: fix IRQ routing (Aurelien Jarno)
|
||||
- versatile: fix Linux task preemption (Aurelien Jarno)
|
||||
- curses: reduce memory usage by 250MB (Aurelien Jarno)
|
||||
|
||||
version 0.11.0-rc2
|
||||
- mips: fix conditional move off fp conditions codes (Nath Froyd)
|
||||
- fix migration to obey -S (Paolo Bonzini)
|
||||
|
@@ -684,6 +684,7 @@ uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
|
||||
int l2_index, ret;
|
||||
uint64_t l2_offset, *l2_table, cluster_offset;
|
||||
int nb_clusters, i = 0;
|
||||
QCowL2Meta *old_alloc;
|
||||
|
||||
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
|
||||
if (ret == 0)
|
||||
@@ -732,6 +733,44 @@ uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
|
||||
}
|
||||
nb_clusters = i;
|
||||
|
||||
/*
|
||||
* Check if there already is an AIO write request in flight which allocates
|
||||
* the same cluster. In this case we need to wait until the previous
|
||||
* request has completed and updated the L2 table accordingly.
|
||||
*/
|
||||
LIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
|
||||
|
||||
uint64_t end_offset = offset + nb_clusters * s->cluster_size;
|
||||
uint64_t old_offset = old_alloc->offset;
|
||||
uint64_t old_end_offset = old_alloc->offset +
|
||||
old_alloc->nb_clusters * s->cluster_size;
|
||||
|
||||
if (end_offset < old_offset || offset > old_end_offset) {
|
||||
/* No intersection */
|
||||
} else {
|
||||
if (offset < old_offset) {
|
||||
/* Stop at the start of a running allocation */
|
||||
nb_clusters = (old_offset - offset) >> s->cluster_bits;
|
||||
} else {
|
||||
nb_clusters = 0;
|
||||
}
|
||||
|
||||
if (nb_clusters == 0) {
|
||||
/* Set dependency and wait for a callback */
|
||||
m->depends_on = old_alloc;
|
||||
m->nb_clusters = 0;
|
||||
*num = 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!nb_clusters) {
|
||||
abort();
|
||||
}
|
||||
|
||||
LIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
|
||||
|
||||
/* allocate a new cluster */
|
||||
|
||||
cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size);
|
||||
|
@@ -219,6 +219,8 @@ static int qcow_open(BlockDriverState *bs, const char *filename, int flags)
|
||||
if (qcow2_refcount_init(bs) < 0)
|
||||
goto fail;
|
||||
|
||||
LIST_INIT(&s->cluster_allocs);
|
||||
|
||||
/* read qcow2 extensions */
|
||||
if (header.backing_file_offset)
|
||||
ext_end = header.backing_file_offset;
|
||||
@@ -338,6 +340,7 @@ typedef struct QCowAIOCB {
|
||||
QEMUIOVector hd_qiov;
|
||||
QEMUBH *bh;
|
||||
QCowL2Meta l2meta;
|
||||
LIST_ENTRY(QCowAIOCB) next_depend;
|
||||
} QCowAIOCB;
|
||||
|
||||
static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
|
||||
@@ -500,6 +503,7 @@ static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
|
||||
acb->n = 0;
|
||||
acb->cluster_offset = 0;
|
||||
acb->l2meta.nb_clusters = 0;
|
||||
LIST_INIT(&acb->l2meta.dependent_requests);
|
||||
return acb;
|
||||
}
|
||||
|
||||
@@ -517,6 +521,33 @@ static BlockDriverAIOCB *qcow_aio_readv(BlockDriverState *bs,
|
||||
return &acb->common;
|
||||
}
|
||||
|
||||
static void qcow_aio_write_cb(void *opaque, int ret);
|
||||
|
||||
static void run_dependent_requests(QCowL2Meta *m)
|
||||
{
|
||||
QCowAIOCB *req;
|
||||
QCowAIOCB *next;
|
||||
|
||||
/* Take the request off the list of running requests */
|
||||
if (m->nb_clusters != 0) {
|
||||
LIST_REMOVE(m, next_in_flight);
|
||||
}
|
||||
|
||||
/*
|
||||
* Restart all dependent requests.
|
||||
* Can't use LIST_FOREACH here - the next link might not be the same
|
||||
* any more after the callback (request could depend on a different
|
||||
* request now)
|
||||
*/
|
||||
for (req = m->dependent_requests.lh_first; req != NULL; req = next) {
|
||||
next = req->next_depend.le_next;
|
||||
qcow_aio_write_cb(req, 0);
|
||||
}
|
||||
|
||||
/* Empty the list for the next part of the request */
|
||||
LIST_INIT(&m->dependent_requests);
|
||||
}
|
||||
|
||||
static void qcow_aio_write_cb(void *opaque, int ret)
|
||||
{
|
||||
QCowAIOCB *acb = opaque;
|
||||
@@ -528,14 +559,15 @@ static void qcow_aio_write_cb(void *opaque, int ret)
|
||||
|
||||
acb->hd_aiocb = NULL;
|
||||
|
||||
if (ret >= 0) {
|
||||
ret = qcow2_alloc_cluster_link_l2(bs, acb->cluster_offset, &acb->l2meta);
|
||||
}
|
||||
|
||||
run_dependent_requests(&acb->l2meta);
|
||||
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
|
||||
if (qcow2_alloc_cluster_link_l2(bs, acb->cluster_offset, &acb->l2meta) < 0) {
|
||||
qcow2_free_any_clusters(bs, acb->cluster_offset, acb->l2meta.nb_clusters);
|
||||
goto done;
|
||||
}
|
||||
|
||||
acb->nb_sectors -= acb->n;
|
||||
acb->sector_num += acb->n;
|
||||
acb->buf += acb->n * 512;
|
||||
@@ -555,6 +587,14 @@ static void qcow_aio_write_cb(void *opaque, int ret)
|
||||
acb->cluster_offset = qcow2_alloc_cluster_offset(bs, acb->sector_num << 9,
|
||||
index_in_cluster,
|
||||
n_end, &acb->n, &acb->l2meta);
|
||||
|
||||
/* Need to wait for another request? If so, we are done for now. */
|
||||
if (!acb->cluster_offset && acb->l2meta.depends_on != NULL) {
|
||||
LIST_INSERT_HEAD(&acb->l2meta.depends_on->dependent_requests,
|
||||
acb, next_depend);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!acb->cluster_offset || (acb->cluster_offset & 511) != 0) {
|
||||
ret = -EIO;
|
||||
goto done;
|
||||
|
@@ -98,6 +98,7 @@ typedef struct BDRVQcowState {
|
||||
uint8_t *cluster_cache;
|
||||
uint8_t *cluster_data;
|
||||
uint64_t cluster_cache_offset;
|
||||
LIST_HEAD(QCowClusterAlloc, QCowL2Meta) cluster_allocs;
|
||||
|
||||
uint64_t *refcount_table;
|
||||
uint64_t refcount_table_offset;
|
||||
@@ -128,6 +129,8 @@ typedef struct QCowCreateState {
|
||||
int64_t refcount_block_offset;
|
||||
} QCowCreateState;
|
||||
|
||||
struct QCowAIOCB;
|
||||
|
||||
/* XXX This could be private for qcow2-cluster.c */
|
||||
typedef struct QCowL2Meta
|
||||
{
|
||||
@@ -135,6 +138,10 @@ typedef struct QCowL2Meta
|
||||
int n_start;
|
||||
int nb_available;
|
||||
int nb_clusters;
|
||||
struct QCowL2Meta *depends_on;
|
||||
LIST_HEAD(QCowAioDependencies, QCowAIOCB) dependent_requests;
|
||||
|
||||
LIST_ENTRY(QCowL2Meta) next_in_flight;
|
||||
} QCowL2Meta;
|
||||
|
||||
static inline int size_to_clusters(BDRVQcowState *s, int64_t size)
|
||||
|
23
cmd.c
23
cmd.c
@@ -20,6 +20,7 @@
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "cmd.h"
|
||||
|
||||
@@ -283,6 +284,26 @@ fetchline(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static char *qemu_strsep(char **input, const char *delim)
|
||||
{
|
||||
char *result = *input;
|
||||
if (result != NULL) {
|
||||
char *p = result;
|
||||
for (p = result; *p != '\0'; p++) {
|
||||
if (strchr(delim, *p)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (*p == '\0') {
|
||||
*input = NULL;
|
||||
} else {
|
||||
*p = '\0';
|
||||
*input = p + 1;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
char **
|
||||
breakline(
|
||||
char *input,
|
||||
@@ -292,7 +313,7 @@ breakline(
|
||||
char *p;
|
||||
char **rval = calloc(sizeof(char *), 1);
|
||||
|
||||
while (rval && (p = strsep(&input, " ")) != NULL) {
|
||||
while (rval && (p = qemu_strsep(&input, " ")) != NULL) {
|
||||
if (!*p)
|
||||
continue;
|
||||
c++;
|
||||
|
2
configure
vendored
2
configure
vendored
@@ -1802,6 +1802,8 @@ if test `expr "$target_list" : ".*softmmu.*"` != 0 ; then
|
||||
tools="qemu-img\$(EXESUF) $tools"
|
||||
if [ "$linux" = "yes" ] ; then
|
||||
tools="qemu-nbd\$(EXESUF) qemu-io\$(EXESUF) $tools"
|
||||
elif test "$mingw32" = "yes" ; then
|
||||
tools="qemu-io\$(EXESUF) $tools"
|
||||
fi
|
||||
fi
|
||||
echo "TOOLS=$tools" >> $config_host_mak
|
||||
|
3
curses.c
3
curses.c
@@ -368,7 +368,4 @@ void curses_display_init(DisplayState *ds, int full_screen)
|
||||
ds->surface = qemu_create_displaysurface_from(640, 400, 0, 0, (uint8_t*) screen);
|
||||
|
||||
invalidate = 1;
|
||||
|
||||
/* Standard VGA initial text mode dimensions */
|
||||
curses_resize(ds);
|
||||
}
|
||||
|
@@ -7,6 +7,8 @@
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include "hw.h"
|
||||
#include "qemu-timer.h"
|
||||
#include "sysbus.h"
|
||||
#include "primecell.h"
|
||||
#include "sysemu.h"
|
||||
@@ -71,8 +73,7 @@ static uint32_t arm_sysctl_read(void *opaque, target_phys_addr_t offset)
|
||||
case 0x58: /* BOOTCS */
|
||||
return 0;
|
||||
case 0x5c: /* 24MHz */
|
||||
/* ??? not implemented. */
|
||||
return 0;
|
||||
return muldiv64(qemu_get_clock(vm_clock), 24000000, ticks_per_sec);
|
||||
case 0x60: /* MISC */
|
||||
return 0;
|
||||
case 0x84: /* PROCID0 */
|
||||
|
@@ -743,8 +743,8 @@ int escc_init(target_phys_addr_t base, qemu_irq irqA, qemu_irq irqB,
|
||||
qdev_prop_set_uint32(dev, "chnAtype", ser);
|
||||
qdev_init(dev);
|
||||
s = sysbus_from_qdev(dev);
|
||||
sysbus_connect_irq(s, 0, irqA);
|
||||
sysbus_connect_irq(s, 1, irqB);
|
||||
sysbus_connect_irq(s, 0, irqB);
|
||||
sysbus_connect_irq(s, 1, irqA);
|
||||
if (base) {
|
||||
sysbus_mmio_map(s, 0, base);
|
||||
}
|
||||
|
@@ -421,9 +421,10 @@ static void rtc_update_second2(void *opaque)
|
||||
}
|
||||
|
||||
/* update ended interrupt */
|
||||
s->cmos_data[RTC_REG_C] |= REG_C_UF;
|
||||
if (s->cmos_data[RTC_REG_B] & REG_B_UIE) {
|
||||
s->cmos_data[RTC_REG_C] |= 0x90;
|
||||
rtc_irq_raise(s->irq);
|
||||
s->cmos_data[RTC_REG_C] |= REG_C_IRQF;
|
||||
rtc_irq_raise(s->irq);
|
||||
}
|
||||
|
||||
/* clear update in progress bit */
|
||||
|
29
net.c
29
net.c
@@ -465,33 +465,28 @@ qemu_deliver_packet(VLANClientState *sender, const uint8_t *buf, int size)
|
||||
|
||||
void qemu_purge_queued_packets(VLANClientState *vc)
|
||||
{
|
||||
VLANPacket **pp = &vc->vlan->send_queue;
|
||||
|
||||
while (*pp != NULL) {
|
||||
VLANPacket *packet = *pp;
|
||||
VLANPacket *packet, *next;
|
||||
|
||||
TAILQ_FOREACH_SAFE(packet, &vc->vlan->send_queue, entry, next) {
|
||||
if (packet->sender == vc) {
|
||||
*pp = packet->next;
|
||||
TAILQ_REMOVE(&vc->vlan->send_queue, packet, entry);
|
||||
qemu_free(packet);
|
||||
} else {
|
||||
pp = &packet->next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void qemu_flush_queued_packets(VLANClientState *vc)
|
||||
{
|
||||
VLANPacket *packet;
|
||||
|
||||
while ((packet = vc->vlan->send_queue) != NULL) {
|
||||
while (!TAILQ_EMPTY(&vc->vlan->send_queue)) {
|
||||
VLANPacket *packet;
|
||||
int ret;
|
||||
|
||||
vc->vlan->send_queue = packet->next;
|
||||
packet = TAILQ_FIRST(&vc->vlan->send_queue);
|
||||
TAILQ_REMOVE(&vc->vlan->send_queue, packet, entry);
|
||||
|
||||
ret = qemu_deliver_packet(packet->sender, packet->data, packet->size);
|
||||
if (ret == 0 && packet->sent_cb != NULL) {
|
||||
packet->next = vc->vlan->send_queue;
|
||||
vc->vlan->send_queue = packet;
|
||||
TAILQ_INSERT_HEAD(&vc->vlan->send_queue, packet, entry);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -509,12 +504,12 @@ static void qemu_enqueue_packet(VLANClientState *sender,
|
||||
VLANPacket *packet;
|
||||
|
||||
packet = qemu_malloc(sizeof(VLANPacket) + size);
|
||||
packet->next = sender->vlan->send_queue;
|
||||
packet->sender = sender;
|
||||
packet->size = size;
|
||||
packet->sent_cb = sent_cb;
|
||||
memcpy(packet->data, buf, size);
|
||||
sender->vlan->send_queue = packet;
|
||||
|
||||
TAILQ_INSERT_TAIL(&sender->vlan->send_queue, packet, entry);
|
||||
}
|
||||
|
||||
ssize_t qemu_send_packet_async(VLANClientState *sender,
|
||||
@@ -626,7 +621,6 @@ static ssize_t qemu_enqueue_packet_iov(VLANClientState *sender,
|
||||
max_len = calc_iov_length(iov, iovcnt);
|
||||
|
||||
packet = qemu_malloc(sizeof(VLANPacket) + max_len);
|
||||
packet->next = sender->vlan->send_queue;
|
||||
packet->sender = sender;
|
||||
packet->sent_cb = sent_cb;
|
||||
packet->size = 0;
|
||||
@@ -638,7 +632,7 @@ static ssize_t qemu_enqueue_packet_iov(VLANClientState *sender,
|
||||
packet->size += len;
|
||||
}
|
||||
|
||||
sender->vlan->send_queue = packet;
|
||||
TAILQ_INSERT_TAIL(&sender->vlan->send_queue, packet, entry);
|
||||
|
||||
return packet->size;
|
||||
}
|
||||
@@ -2359,6 +2353,7 @@ VLANState *qemu_find_vlan(int id, int allocate)
|
||||
}
|
||||
vlan = qemu_mallocz(sizeof(VLANState));
|
||||
vlan->id = id;
|
||||
TAILQ_INIT(&vlan->send_queue);
|
||||
vlan->next = NULL;
|
||||
pvlan = &first_vlan;
|
||||
while (*pvlan != NULL)
|
||||
|
5
net.h
5
net.h
@@ -1,6 +1,7 @@
|
||||
#ifndef QEMU_NET_H
|
||||
#define QEMU_NET_H
|
||||
|
||||
#include "sys-queue.h"
|
||||
#include "qemu-common.h"
|
||||
|
||||
/* VLANs support */
|
||||
@@ -35,7 +36,7 @@ typedef struct VLANPacket VLANPacket;
|
||||
typedef void (NetPacketSent) (VLANClientState *, ssize_t);
|
||||
|
||||
struct VLANPacket {
|
||||
struct VLANPacket *next;
|
||||
TAILQ_ENTRY(VLANPacket) entry;
|
||||
VLANClientState *sender;
|
||||
int size;
|
||||
NetPacketSent *sent_cb;
|
||||
@@ -47,7 +48,7 @@ struct VLANState {
|
||||
VLANClientState *first_client;
|
||||
struct VLANState *next;
|
||||
unsigned int nb_guest_devs, nb_host_devs;
|
||||
VLANPacket *send_queue;
|
||||
TAILQ_HEAD(send_queue, VLANPacket) send_queue;
|
||||
int delivering;
|
||||
};
|
||||
|
||||
|
@@ -7,10 +7,12 @@
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <getopt.h>
|
||||
#include <libgen.h>
|
||||
|
||||
#include "qemu-common.h"
|
||||
#include "block_int.h"
|
||||
|
@@ -526,14 +526,15 @@ static always_inline void gen_ext_h(void (*tcg_gen_ext_i64)(TCGv t0, TCGv t1),
|
||||
else
|
||||
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[ra]);
|
||||
} else {
|
||||
TCGv tmp1, tmp2;
|
||||
TCGv tmp1;
|
||||
tmp1 = tcg_temp_new();
|
||||
|
||||
tcg_gen_andi_i64(tmp1, cpu_ir[rb], 7);
|
||||
tcg_gen_shli_i64(tmp1, tmp1, 3);
|
||||
tmp2 = tcg_const_i64(64);
|
||||
tcg_gen_sub_i64(tmp1, tmp2, tmp1);
|
||||
tcg_temp_free(tmp2);
|
||||
tcg_gen_neg_i64(tmp1, tmp1);
|
||||
tcg_gen_andi_i64(tmp1, tmp1, 0x3f);
|
||||
tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], tmp1);
|
||||
|
||||
tcg_temp_free(tmp1);
|
||||
}
|
||||
if (tcg_gen_ext_i64)
|
||||
@@ -1320,7 +1321,7 @@ static always_inline int translate_one (DisasContext *ctx, uint32_t insn)
|
||||
break;
|
||||
case 0x6A:
|
||||
/* EXTLH */
|
||||
gen_ext_h(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit);
|
||||
gen_ext_h(&tcg_gen_ext32u_i64, ra, rb, rc, islit, lit);
|
||||
break;
|
||||
case 0x72:
|
||||
/* MSKQH */
|
||||
|
@@ -1441,9 +1441,8 @@ static inline void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
|
||||
#ifdef TCG_TARGET_HAS_bswap64_i64
|
||||
tcg_gen_op2_i64(INDEX_op_bswap64_i64, ret, arg);
|
||||
#else
|
||||
TCGv_i32 t0, t1;
|
||||
t0 = tcg_temp_new_i32();
|
||||
t1 = tcg_temp_new_i32();
|
||||
TCGv_i64 t0 = tcg_temp_new_i64();
|
||||
TCGv_i64 t1 = tcg_temp_new_i64();
|
||||
|
||||
tcg_gen_shli_i64(t0, arg, 56);
|
||||
|
||||
@@ -1473,8 +1472,8 @@ static inline void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
|
||||
|
||||
tcg_gen_shri_i64(t1, arg, 56);
|
||||
tcg_gen_or_i64(ret, t0, t1);
|
||||
tcg_temp_free_i32(t0);
|
||||
tcg_temp_free_i32(t1);
|
||||
tcg_temp_free_i64(t0);
|
||||
tcg_temp_free_i64(t1);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user