Compare commits
43 Commits
v10.0.0
...
v0.13.0-rc
Author | SHA1 | Date | |
---|---|---|---|
|
5c0961618d | ||
|
472de0c851 | ||
|
0131c8c2dd | ||
|
d3c5b2e670 | ||
|
a385adb70e | ||
|
41de2f0c86 | ||
|
ec810f662a | ||
|
9367dbbe6d | ||
|
089c672520 | ||
|
6f8d14beb2 | ||
|
5b500f974b | ||
|
a3c4a01fb2 | ||
|
345a6d2b54 | ||
|
1b191088ae | ||
|
2c25b81316 | ||
|
5a0d460c35 | ||
|
78b6890828 | ||
|
375d40709e | ||
|
e632519ab8 | ||
|
d65741acf4 | ||
|
5aa0e6cb56 | ||
|
3bc5aa187f | ||
|
5105d99b7f | ||
|
b422f4194d | ||
|
72230c523b | ||
|
a9b56f8289 | ||
|
f891f9f74d | ||
|
271a24e7bf | ||
|
2c1064ed2d | ||
|
55ee7b38e8 | ||
|
6674dc4269 | ||
|
96638e706c | ||
|
08e90b3cad | ||
|
ada70b4522 | ||
|
8f6e28789f | ||
|
e14aad448b | ||
|
7829bc6c9f | ||
|
32b8bb3b3b | ||
|
cc12b5c748 | ||
|
50aa457e1d | ||
|
6546605650 | ||
|
42ccca964c | ||
|
966444248f |
@@ -190,6 +190,9 @@ obj-$(CONFIG_USB_OHCI) += usb-ohci.o
|
||||
obj-y += rtl8139.o
|
||||
obj-y += e1000.o
|
||||
|
||||
# Inter-VM PCI shared memory
|
||||
obj-$(CONFIG_KVM) += ivshmem.o
|
||||
|
||||
# Hardware support
|
||||
obj-i386-y += vga.o
|
||||
obj-i386-y += mc146818rtc.o i8259.o pc.o
|
||||
|
@@ -586,6 +586,7 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
||||
addr >>= BDRV_SECTOR_BITS;
|
||||
|
||||
if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
|
||||
int ret;
|
||||
/* get device name */
|
||||
len = qemu_get_byte(f);
|
||||
qemu_get_buffer(f, (uint8_t *)device_name, len);
|
||||
@@ -601,9 +602,12 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
|
||||
buf = qemu_malloc(BLOCK_SIZE);
|
||||
|
||||
qemu_get_buffer(f, buf, BLOCK_SIZE);
|
||||
bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
|
||||
ret = bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
|
||||
|
||||
qemu_free(buf);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
} else if (flags & BLK_MIG_FLAG_PROGRESS) {
|
||||
if (!banner_printed) {
|
||||
printf("Receiving block device images\n");
|
||||
|
58
block.c
58
block.c
@@ -330,7 +330,7 @@ BlockDriver *bdrv_find_protocol(const char *filename)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static BlockDriver *find_image_format(const char *filename)
|
||||
static int find_image_format(const char *filename, BlockDriver **pdrv)
|
||||
{
|
||||
int ret, score, score_max;
|
||||
BlockDriver *drv1, *drv;
|
||||
@@ -338,19 +338,27 @@ static BlockDriver *find_image_format(const char *filename)
|
||||
BlockDriverState *bs;
|
||||
|
||||
ret = bdrv_file_open(&bs, filename, 0);
|
||||
if (ret < 0)
|
||||
return NULL;
|
||||
if (ret < 0) {
|
||||
*pdrv = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Return the raw BlockDriver * to scsi-generic devices or empty drives */
|
||||
if (bs->sg || !bdrv_is_inserted(bs)) {
|
||||
bdrv_delete(bs);
|
||||
return bdrv_find_format("raw");
|
||||
drv = bdrv_find_format("raw");
|
||||
if (!drv) {
|
||||
ret = -ENOENT;
|
||||
}
|
||||
*pdrv = drv;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = bdrv_pread(bs, 0, buf, sizeof(buf));
|
||||
bdrv_delete(bs);
|
||||
if (ret < 0) {
|
||||
return NULL;
|
||||
*pdrv = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
score_max = 0;
|
||||
@@ -364,7 +372,11 @@ static BlockDriver *find_image_format(const char *filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
return drv;
|
||||
if (!drv) {
|
||||
ret = -ENOENT;
|
||||
}
|
||||
*pdrv = drv;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -571,12 +583,11 @@ int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
|
||||
|
||||
/* Find the right image format driver */
|
||||
if (!drv) {
|
||||
drv = find_image_format(filename);
|
||||
ret = find_image_format(filename, &drv);
|
||||
probed = 1;
|
||||
}
|
||||
|
||||
if (!drv) {
|
||||
ret = -ENOENT;
|
||||
goto unlink_and_fail;
|
||||
}
|
||||
|
||||
@@ -732,6 +743,7 @@ int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res)
|
||||
int bdrv_commit(BlockDriverState *bs)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
BlockDriver *backing_drv;
|
||||
int64_t i, total_sectors;
|
||||
int n, j, ro, open_flags;
|
||||
int ret = 0, rw_ret = 0;
|
||||
@@ -749,7 +761,8 @@ int bdrv_commit(BlockDriverState *bs)
|
||||
if (bs->backing_hd->keep_read_only) {
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
|
||||
backing_drv = bs->backing_hd->drv;
|
||||
ro = bs->backing_hd->read_only;
|
||||
strncpy(filename, bs->backing_hd->filename, sizeof(filename));
|
||||
open_flags = bs->backing_hd->open_flags;
|
||||
@@ -759,12 +772,14 @@ int bdrv_commit(BlockDriverState *bs)
|
||||
bdrv_delete(bs->backing_hd);
|
||||
bs->backing_hd = NULL;
|
||||
bs_rw = bdrv_new("");
|
||||
rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR, drv);
|
||||
rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
|
||||
backing_drv);
|
||||
if (rw_ret < 0) {
|
||||
bdrv_delete(bs_rw);
|
||||
/* try to re-open read-only */
|
||||
bs_ro = bdrv_new("");
|
||||
ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR, drv);
|
||||
ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
|
||||
backing_drv);
|
||||
if (ret < 0) {
|
||||
bdrv_delete(bs_ro);
|
||||
/* drive not functional anymore */
|
||||
@@ -816,7 +831,8 @@ ro_cleanup:
|
||||
bdrv_delete(bs->backing_hd);
|
||||
bs->backing_hd = NULL;
|
||||
bs_ro = bdrv_new("");
|
||||
ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR, drv);
|
||||
ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
|
||||
backing_drv);
|
||||
if (ret < 0) {
|
||||
bdrv_delete(bs_ro);
|
||||
/* drive not functional anymore */
|
||||
@@ -1465,10 +1481,8 @@ int bdrv_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
assert(bs->drv);
|
||||
|
||||
if (bs->drv->no_zero_init) {
|
||||
return 0;
|
||||
} else if (bs->file) {
|
||||
return bdrv_has_zero_init(bs->file);
|
||||
if (bs->drv->bdrv_has_zero_init) {
|
||||
return bs->drv->bdrv_has_zero_init(bs);
|
||||
}
|
||||
|
||||
return 1;
|
||||
@@ -1800,6 +1814,11 @@ int bdrv_can_snapshot(BlockDriverState *bs)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int bdrv_is_snapshot(BlockDriverState *bs)
|
||||
{
|
||||
return !!(bs->open_flags & BDRV_O_SNAPSHOT);
|
||||
}
|
||||
|
||||
BlockDriverState *bdrv_snapshots(void)
|
||||
{
|
||||
BlockDriverState *bs;
|
||||
@@ -2502,7 +2521,7 @@ int bdrv_is_inserted(BlockDriverState *bs)
|
||||
if (!drv)
|
||||
return 0;
|
||||
if (!drv->bdrv_is_inserted)
|
||||
return 1;
|
||||
return !bs->tray_open;
|
||||
ret = drv->bdrv_is_inserted(bs);
|
||||
return ret;
|
||||
}
|
||||
@@ -2544,10 +2563,11 @@ int bdrv_eject(BlockDriverState *bs, int eject_flag)
|
||||
ret = drv->bdrv_eject(bs, eject_flag);
|
||||
}
|
||||
if (ret == -ENOTSUP) {
|
||||
if (eject_flag)
|
||||
bdrv_close(bs);
|
||||
ret = 0;
|
||||
}
|
||||
if (ret >= 0) {
|
||||
bs->tray_open = eject_flag;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
3
block.h
3
block.h
@@ -35,7 +35,7 @@ typedef struct QEMUSnapshotInfo {
|
||||
#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
|
||||
#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
|
||||
|
||||
#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_CACHE_WB)
|
||||
#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH)
|
||||
|
||||
#define BDRV_SECTOR_BITS 9
|
||||
#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
|
||||
@@ -202,6 +202,7 @@ const char *bdrv_get_encrypted_filename(BlockDriverState *bs);
|
||||
void bdrv_get_backing_filename(BlockDriverState *bs,
|
||||
char *filename, int filename_size);
|
||||
int bdrv_can_snapshot(BlockDriverState *bs);
|
||||
int bdrv_is_snapshot(BlockDriverState *bs);
|
||||
BlockDriverState *bdrv_snapshots(void);
|
||||
int bdrv_snapshot_create(BlockDriverState *bs,
|
||||
QEMUSnapshotInfo *sn_info);
|
||||
|
@@ -655,7 +655,7 @@ static int write_l2_entries(BlockDriverState *bs, uint64_t *l2_table,
|
||||
int ret;
|
||||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
|
||||
ret = bdrv_pwrite_sync(bs->file, l2_offset + start_offset,
|
||||
ret = bdrv_pwrite(bs->file, l2_offset + start_offset,
|
||||
&l2_table[l2_start_index], len);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
@@ -718,9 +718,17 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < j; i++)
|
||||
qcow2_free_any_clusters(bs,
|
||||
be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
|
||||
/*
|
||||
* If this was a COW, we need to decrease the refcount of the old cluster.
|
||||
* Also flush bs->file to get the right order for L2 and refcount update.
|
||||
*/
|
||||
if (j != 0) {
|
||||
bdrv_flush(bs->file);
|
||||
for (i = 0; i < j; i++) {
|
||||
qcow2_free_any_clusters(bs,
|
||||
be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1);
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
err:
|
||||
|
@@ -993,6 +993,11 @@ static int hdev_create(const char *filename, QEMUOptionParameter *options)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hdev_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static BlockDriver bdrv_host_device = {
|
||||
.format_name = "host_device",
|
||||
.protocol_name = "host_device",
|
||||
@@ -1002,7 +1007,7 @@ static BlockDriver bdrv_host_device = {
|
||||
.bdrv_close = raw_close,
|
||||
.bdrv_create = hdev_create,
|
||||
.create_options = raw_create_options,
|
||||
.no_zero_init = 1,
|
||||
.bdrv_has_zero_init = hdev_has_zero_init,
|
||||
.bdrv_flush = raw_flush,
|
||||
|
||||
.bdrv_aio_readv = raw_aio_readv,
|
||||
@@ -1117,7 +1122,7 @@ static BlockDriver bdrv_host_floppy = {
|
||||
.bdrv_close = raw_close,
|
||||
.bdrv_create = hdev_create,
|
||||
.create_options = raw_create_options,
|
||||
.no_zero_init = 1,
|
||||
.bdrv_has_zero_init = hdev_has_zero_init,
|
||||
.bdrv_flush = raw_flush,
|
||||
|
||||
.bdrv_aio_readv = raw_aio_readv,
|
||||
@@ -1149,9 +1154,6 @@ static int cdrom_probe_device(const char *filename)
|
||||
int fd, ret;
|
||||
int prio = 0;
|
||||
|
||||
if (strstart(filename, "/dev/cd", NULL))
|
||||
prio = 50;
|
||||
|
||||
fd = open(filename, O_RDONLY | O_NONBLOCK);
|
||||
if (fd < 0) {
|
||||
goto out;
|
||||
@@ -1217,7 +1219,7 @@ static BlockDriver bdrv_host_cdrom = {
|
||||
.bdrv_close = raw_close,
|
||||
.bdrv_create = hdev_create,
|
||||
.create_options = raw_create_options,
|
||||
.no_zero_init = 1,
|
||||
.bdrv_has_zero_init = hdev_has_zero_init,
|
||||
.bdrv_flush = raw_flush,
|
||||
|
||||
.bdrv_aio_readv = raw_aio_readv,
|
||||
@@ -1340,7 +1342,7 @@ static BlockDriver bdrv_host_cdrom = {
|
||||
.bdrv_close = raw_close,
|
||||
.bdrv_create = hdev_create,
|
||||
.create_options = raw_create_options,
|
||||
.no_zero_init = 1,
|
||||
.bdrv_has_zero_init = hdev_has_zero_init,
|
||||
.bdrv_flush = raw_flush,
|
||||
|
||||
.bdrv_aio_readv = raw_aio_readv,
|
||||
|
@@ -394,6 +394,11 @@ static int raw_set_locked(BlockDriverState *bs, int locked)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int hdev_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static BlockDriver bdrv_host_device = {
|
||||
.format_name = "host_device",
|
||||
.protocol_name = "host_device",
|
||||
@@ -402,6 +407,7 @@ static BlockDriver bdrv_host_device = {
|
||||
.bdrv_file_open = hdev_open,
|
||||
.bdrv_close = raw_close,
|
||||
.bdrv_flush = raw_flush,
|
||||
.bdrv_has_zero_init = hdev_has_zero_init,
|
||||
|
||||
.bdrv_read = raw_read,
|
||||
.bdrv_write = raw_write,
|
||||
|
@@ -237,6 +237,11 @@ static QEMUOptionParameter raw_create_options[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static int raw_has_zero_init(BlockDriverState *bs)
|
||||
{
|
||||
return bdrv_has_zero_init(bs->file);
|
||||
}
|
||||
|
||||
static BlockDriver bdrv_raw = {
|
||||
.format_name = "raw",
|
||||
|
||||
@@ -264,6 +269,7 @@ static BlockDriver bdrv_raw = {
|
||||
|
||||
.bdrv_create = raw_create,
|
||||
.create_options = raw_create_options,
|
||||
.bdrv_has_zero_init = raw_has_zero_init,
|
||||
};
|
||||
|
||||
static void bdrv_raw_init(void)
|
||||
|
@@ -512,7 +512,7 @@ static inline uint8_t fat_chksum(const direntry_t* entry)
|
||||
for(i=0;i<11;i++) {
|
||||
unsigned char c;
|
||||
|
||||
c = (i <= 8) ? entry->name[i] : entry->extension[i-8];
|
||||
c = (i < 8) ? entry->name[i] : entry->extension[i-8];
|
||||
chksum=(((chksum&0xfe)>>1)|((chksum&0x01)?0x80:0)) + c;
|
||||
}
|
||||
|
||||
@@ -2665,6 +2665,11 @@ static int vvfat_write(BlockDriverState *bs, int64_t sector_num,
|
||||
|
||||
DLOG(checkpoint());
|
||||
|
||||
/* Check if we're operating in read-only mode */
|
||||
if (s->qcow == NULL) {
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
vvfat_close_current_file(s);
|
||||
|
||||
/*
|
||||
@@ -2763,12 +2768,12 @@ static int vvfat_is_allocated(BlockDriverState *bs,
|
||||
|
||||
static int write_target_commit(BlockDriverState *bs, int64_t sector_num,
|
||||
const uint8_t* buffer, int nb_sectors) {
|
||||
BDRVVVFATState* s = bs->opaque;
|
||||
BDRVVVFATState* s = *((BDRVVVFATState**) bs->opaque);
|
||||
return try_commit(s);
|
||||
}
|
||||
|
||||
static void write_target_close(BlockDriverState *bs) {
|
||||
BDRVVVFATState* s = bs->opaque;
|
||||
BDRVVVFATState* s = *((BDRVVVFATState**) bs->opaque);
|
||||
bdrv_delete(s->qcow);
|
||||
free(s->qcow_filename);
|
||||
}
|
||||
@@ -2783,6 +2788,7 @@ static int enable_write_target(BDRVVVFATState *s)
|
||||
{
|
||||
BlockDriver *bdrv_qcow;
|
||||
QEMUOptionParameter *options;
|
||||
int ret;
|
||||
int size = sector2cluster(s, s->sector_count);
|
||||
s->used_clusters = calloc(size, 1);
|
||||
|
||||
@@ -2798,11 +2804,16 @@ static int enable_write_target(BDRVVVFATState *s)
|
||||
|
||||
if (bdrv_create(bdrv_qcow, s->qcow_filename, options) < 0)
|
||||
return -1;
|
||||
|
||||
s->qcow = bdrv_new("");
|
||||
if (s->qcow == NULL ||
|
||||
bdrv_open(s->qcow, s->qcow_filename, BDRV_O_RDWR, bdrv_qcow) < 0)
|
||||
{
|
||||
return -1;
|
||||
if (s->qcow == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = bdrv_open(s->qcow, s->qcow_filename,
|
||||
BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, bdrv_qcow);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifndef _WIN32
|
||||
@@ -2811,7 +2822,8 @@ static int enable_write_target(BDRVVVFATState *s)
|
||||
|
||||
s->bs->backing_hd = calloc(sizeof(BlockDriverState), 1);
|
||||
s->bs->backing_hd->drv = &vvfat_write_target;
|
||||
s->bs->backing_hd->opaque = s;
|
||||
s->bs->backing_hd->opaque = qemu_malloc(sizeof(void*));
|
||||
*(void**)s->bs->backing_hd->opaque = s;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
12
block_int.h
12
block_int.h
@@ -127,8 +127,11 @@ struct BlockDriver {
|
||||
|
||||
void (*bdrv_debug_event)(BlockDriverState *bs, BlkDebugEvent event);
|
||||
|
||||
/* Set if newly created images are not guaranteed to contain only zeros */
|
||||
int no_zero_init;
|
||||
/*
|
||||
* Returns 1 if newly created images are guaranteed to contain only
|
||||
* zeros, 0 otherwise.
|
||||
*/
|
||||
int (*bdrv_has_zero_init)(BlockDriverState *bs);
|
||||
|
||||
QLIST_ENTRY(BlockDriver) list;
|
||||
};
|
||||
@@ -141,6 +144,7 @@ struct BlockDriverState {
|
||||
int open_flags; /* flags used to open the file, re-used for re-open */
|
||||
int removable; /* if true, the media can be removed */
|
||||
int locked; /* if true, the media cannot temporarily be ejected */
|
||||
int tray_open; /* if true, the virtual tray is open */
|
||||
int encrypted; /* if true, the media is encrypted */
|
||||
int valid_key; /* if true, a valid encryption key has been set */
|
||||
int sg; /* if true, the device is a /dev/sg* */
|
||||
@@ -243,7 +247,7 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf)
|
||||
_conf.logical_block_size, 512), \
|
||||
DEFINE_PROP_UINT16("physical_block_size", _state, \
|
||||
_conf.physical_block_size, 512), \
|
||||
DEFINE_PROP_UINT16("min_io_size", _state, _conf.min_io_size, 512), \
|
||||
DEFINE_PROP_UINT32("opt_io_size", _state, _conf.opt_io_size, 512)
|
||||
DEFINE_PROP_UINT16("min_io_size", _state, _conf.min_io_size, 0), \
|
||||
DEFINE_PROP_UINT32("opt_io_size", _state, _conf.opt_io_size, 0)
|
||||
|
||||
#endif /* BLOCK_INT_H */
|
||||
|
@@ -590,6 +590,7 @@ int do_change_block(Monitor *mon, const char *device,
|
||||
return -1;
|
||||
}
|
||||
bdrv_flags = bdrv_is_read_only(bs) ? 0 : BDRV_O_RDWR;
|
||||
bdrv_flags |= bdrv_is_snapshot(bs) ? BDRV_O_SNAPSHOT : 0;
|
||||
if (bdrv_open(bs, filename, bdrv_flags, drv) < 0) {
|
||||
qerror_report(QERR_OPEN_FILE_FAILED, filename);
|
||||
return -1;
|
||||
|
@@ -629,8 +629,10 @@ extern unsigned long guest_base;
|
||||
extern int have_guest_base;
|
||||
extern unsigned long reserved_va;
|
||||
#define GUEST_BASE guest_base
|
||||
#define RESERVED_VA reserved_va
|
||||
#else
|
||||
#define GUEST_BASE 0ul
|
||||
#define RESERVED_VA 0ul
|
||||
#endif
|
||||
|
||||
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
|
||||
|
@@ -40,6 +40,8 @@ static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
|
||||
}
|
||||
|
||||
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
|
||||
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
|
||||
ram_addr_t size, void *host);
|
||||
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
|
||||
void qemu_ram_free(ram_addr_t addr);
|
||||
/* This should only be used for ram local to a device. */
|
||||
|
96
docs/specs/ivshmem_device_spec.txt
Normal file
96
docs/specs/ivshmem_device_spec.txt
Normal file
@@ -0,0 +1,96 @@
|
||||
|
||||
Device Specification for Inter-VM shared memory device
|
||||
------------------------------------------------------
|
||||
|
||||
The Inter-VM shared memory device is designed to share a region of memory to
|
||||
userspace in multiple virtual guests. The memory region does not belong to any
|
||||
guest, but is a POSIX memory object on the host. Optionally, the device may
|
||||
support sending interrupts to other guests sharing the same memory region.
|
||||
|
||||
|
||||
The Inter-VM PCI device
|
||||
-----------------------
|
||||
|
||||
*BARs*
|
||||
|
||||
The device supports three BARs. BAR0 is a 1 Kbyte MMIO region to support
|
||||
registers. BAR1 is used for MSI-X when it is enabled in the device. BAR2 is
|
||||
used to map the shared memory object from the host. The size of BAR2 is
|
||||
specified when the guest is started and must be a power of 2 in size.
|
||||
|
||||
*Registers*
|
||||
|
||||
The device currently supports 4 registers of 32-bits each. Registers
|
||||
are used for synchronization between guests sharing the same memory object when
|
||||
interrupts are supported (this requires using the shared memory server).
|
||||
|
||||
The server assigns each VM an ID number and sends this ID number to the Qemu
|
||||
process when the guest starts.
|
||||
|
||||
enum ivshmem_registers {
|
||||
IntrMask = 0,
|
||||
IntrStatus = 4,
|
||||
IVPosition = 8,
|
||||
Doorbell = 12
|
||||
};
|
||||
|
||||
The first two registers are the interrupt mask and status registers. Mask and
|
||||
status are only used with pin-based interrupts. They are unused with MSI
|
||||
interrupts.
|
||||
|
||||
Status Register: The status register is set to 1 when an interrupt occurs.
|
||||
|
||||
Mask Register: The mask register is bitwise ANDed with the interrupt status
|
||||
and the result will raise an interrupt if it is non-zero. However, since 1 is
|
||||
the only value the status will be set to, it is only the first bit of the mask
|
||||
that has any effect. Therefore interrupts can be masked by setting the first
|
||||
bit to 0 and unmasked by setting the first bit to 1.
|
||||
|
||||
IVPosition Register: The IVPosition register is read-only and reports the
|
||||
guest's ID number. The guest IDs are non-negative integers. When using the
|
||||
server, since the server is a separate process, the VM ID will only be set when
|
||||
the device is ready (shared memory is received from the server and accessible via
|
||||
the device). If the device is not ready, the IVPosition will return -1.
|
||||
Applications should ensure that they have a valid VM ID before accessing the
|
||||
shared memory.
|
||||
|
||||
Doorbell Register: To interrupt another guest, a guest must write to the
|
||||
Doorbell register. The doorbell register is 32-bits, logically divided into
|
||||
two 16-bit fields. The high 16-bits are the guest ID to interrupt and the low
|
||||
16-bits are the interrupt vector to trigger. The semantics of the value
|
||||
written to the doorbell depends on whether the device is using MSI or a regular
|
||||
pin-based interrupt. In short, MSI uses vectors while regular interrupts set the
|
||||
status register.
|
||||
|
||||
Regular Interrupts
|
||||
|
||||
If regular interrupts are used (due to either a guest not supporting MSI or the
|
||||
user specifying not to use them on startup) then the value written to the lower
|
||||
16-bits of the Doorbell register results is arbitrary and will trigger an
|
||||
interrupt in the destination guest.
|
||||
|
||||
Message Signalled Interrupts
|
||||
|
||||
A ivshmem device may support multiple MSI vectors. If so, the lower 16-bits
|
||||
written to the Doorbell register must be between 0 and the maximum number of
|
||||
vectors the guest supports. The lower 16 bits written to the doorbell is the
|
||||
MSI vector that will be raised in the destination guest. The number of MSI
|
||||
vectors is configurable but it is set when the VM is started.
|
||||
|
||||
The important thing to remember with MSI is that it is only a signal, no status
|
||||
is set (since MSI interrupts are not shared). All information other than the
|
||||
interrupt itself should be communicated via the shared memory region. Devices
|
||||
supporting multiple MSI vectors can use different vectors to indicate different
|
||||
events have occurred. The semantics of interrupt vectors are left to the
|
||||
user's discretion.
|
||||
|
||||
|
||||
Usage in the Guest
|
||||
------------------
|
||||
|
||||
The shared memory device is intended to be used with the provided UIO driver.
|
||||
Very little configuration is needed. The guest should map BAR0 to access the
|
||||
registers (an array of 32-bit ints allows simple writing) and map BAR2 to
|
||||
access the shared memory region itself. The size of the shared memory region
|
||||
is specified when the guest (or shared memory server) is started. A guest may
|
||||
map the whole shared memory region or only part of it.
|
43
exec.c
43
exec.c
@@ -2808,6 +2808,49 @@ static ram_addr_t last_ram_offset(void)
|
||||
return last;
|
||||
}
|
||||
|
||||
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
|
||||
ram_addr_t size, void *host)
|
||||
{
|
||||
RAMBlock *new_block, *block;
|
||||
|
||||
size = TARGET_PAGE_ALIGN(size);
|
||||
new_block = qemu_mallocz(sizeof(*new_block));
|
||||
|
||||
if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
|
||||
char *id = dev->parent_bus->info->get_dev_path(dev);
|
||||
if (id) {
|
||||
snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
|
||||
qemu_free(id);
|
||||
}
|
||||
}
|
||||
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
|
||||
|
||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (!strcmp(block->idstr, new_block->idstr)) {
|
||||
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
|
||||
new_block->idstr);
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
new_block->host = host;
|
||||
|
||||
new_block->offset = find_ram_offset(size);
|
||||
new_block->length = size;
|
||||
|
||||
QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
|
||||
|
||||
ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
|
||||
last_ram_offset() >> TARGET_PAGE_BITS);
|
||||
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
|
||||
0xff, size >> TARGET_PAGE_BITS);
|
||||
|
||||
if (kvm_enabled())
|
||||
kvm_setup_guest_memory(new_block->host, size);
|
||||
|
||||
return new_block->offset;
|
||||
}
|
||||
|
||||
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
|
||||
{
|
||||
RAMBlock *new_block, *block;
|
||||
|
2
hw/hw.h
2
hw/hw.h
@@ -264,6 +264,8 @@ int register_savevm_live(DeviceState *dev,
|
||||
void *opaque);
|
||||
|
||||
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
|
||||
void register_device_unmigratable(DeviceState *dev, const char *idstr,
|
||||
void *opaque);
|
||||
|
||||
typedef void QEMUResetHandler(void *opaque);
|
||||
|
||||
|
@@ -1643,6 +1643,21 @@ static void ide_atapi_cmd(IDEState *s)
|
||||
ide_atapi_cmd_reply(s, len, max_len);
|
||||
break;
|
||||
}
|
||||
case GPCMD_GET_EVENT_STATUS_NOTIFICATION:
|
||||
max_len = ube16_to_cpu(packet + 7);
|
||||
|
||||
if (packet[1] & 0x01) { /* polling */
|
||||
/* We don't support any event class (yet). */
|
||||
cpu_to_ube16(buf, 0x00); /* No event descriptor returned */
|
||||
buf[2] = 0x80; /* No Event Available (NEA) */
|
||||
buf[3] = 0x00; /* Empty supported event classes */
|
||||
ide_atapi_cmd_reply(s, 4, max_len);
|
||||
} else { /* asynchronous mode */
|
||||
/* Only polling is supported, asynchronous mode is not. */
|
||||
ide_atapi_cmd_error(s, SENSE_ILLEGAL_REQUEST,
|
||||
ASC_INV_FIELD_IN_CMD_PACKET);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ide_atapi_cmd_error(s, SENSE_ILLEGAL_REQUEST,
|
||||
ASC_ILLEGAL_OPCODE);
|
||||
|
23
hw/ide/pci.c
23
hw/ide/pci.c
@@ -40,8 +40,27 @@ void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val)
|
||||
printf("%s: 0x%08x\n", __func__, val);
|
||||
#endif
|
||||
if (!(val & BM_CMD_START)) {
|
||||
/* XXX: do it better */
|
||||
ide_dma_cancel(bm);
|
||||
/*
|
||||
* We can't cancel Scatter Gather DMA in the middle of the
|
||||
* operation or a partial (not full) DMA transfer would reach
|
||||
* the storage so we wait for completion instead (we beahve
|
||||
* like if the DMA was completed by the time the guest trying
|
||||
* to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
|
||||
* set).
|
||||
*
|
||||
* In the future we'll be able to safely cancel the I/O if the
|
||||
* whole DMA operation will be submitted to disk with a single
|
||||
* aio operation with preadv/pwritev.
|
||||
*/
|
||||
if (bm->aiocb) {
|
||||
qemu_aio_flush();
|
||||
#ifdef DEBUG_IDE
|
||||
if (bm->aiocb)
|
||||
printf("ide_dma_cancel: aiocb still pending");
|
||||
if (bm->status & BM_STATUS_DMAING)
|
||||
printf("ide_dma_cancel: BM_STATUS_DMAING still pending");
|
||||
#endif
|
||||
}
|
||||
bm->cmd = val & 0x09;
|
||||
} else {
|
||||
if (!(bm->status & BM_STATUS_DMAING)) {
|
||||
|
829
hw/ivshmem.c
Normal file
829
hw/ivshmem.c
Normal file
@@ -0,0 +1,829 @@
|
||||
/*
|
||||
* Inter-VM Shared Memory PCI device.
|
||||
*
|
||||
* Author:
|
||||
* Cam Macdonell <cam@cs.ualberta.ca>
|
||||
*
|
||||
* Based On: cirrus_vga.c
|
||||
* Copyright (c) 2004 Fabrice Bellard
|
||||
* Copyright (c) 2004 Makoto Suzuki (suzu)
|
||||
*
|
||||
* and rtl8139.c
|
||||
* Copyright (c) 2006 Igor Kovalenko
|
||||
*
|
||||
* This code is licensed under the GNU GPL v2.
|
||||
*/
|
||||
#include "hw.h"
|
||||
#include "pc.h"
|
||||
#include "pci.h"
|
||||
#include "msix.h"
|
||||
#include "kvm.h"
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#define IVSHMEM_IOEVENTFD 0
|
||||
#define IVSHMEM_MSI 1
|
||||
|
||||
#define IVSHMEM_PEER 0
|
||||
#define IVSHMEM_MASTER 1
|
||||
|
||||
#define IVSHMEM_REG_BAR_SIZE 0x100
|
||||
|
||||
//#define DEBUG_IVSHMEM
|
||||
#ifdef DEBUG_IVSHMEM
|
||||
#define IVSHMEM_DPRINTF(fmt, ...) \
|
||||
do {printf("IVSHMEM: " fmt, ## __VA_ARGS__); } while (0)
|
||||
#else
|
||||
#define IVSHMEM_DPRINTF(fmt, ...)
|
||||
#endif
|
||||
|
||||
typedef struct Peer {
|
||||
int nb_eventfds;
|
||||
int *eventfds;
|
||||
} Peer;
|
||||
|
||||
typedef struct EventfdEntry {
|
||||
PCIDevice *pdev;
|
||||
int vector;
|
||||
} EventfdEntry;
|
||||
|
||||
typedef struct IVShmemState {
|
||||
PCIDevice dev;
|
||||
uint32_t intrmask;
|
||||
uint32_t intrstatus;
|
||||
uint32_t doorbell;
|
||||
|
||||
CharDriverState **eventfd_chr;
|
||||
CharDriverState *server_chr;
|
||||
int ivshmem_mmio_io_addr;
|
||||
|
||||
pcibus_t mmio_addr;
|
||||
pcibus_t shm_pci_addr;
|
||||
uint64_t ivshmem_offset;
|
||||
uint64_t ivshmem_size; /* size of shared memory region */
|
||||
int shm_fd; /* shared memory file descriptor */
|
||||
|
||||
Peer *peers;
|
||||
int nb_peers; /* how many guests we have space for */
|
||||
int max_peer; /* maximum numbered peer */
|
||||
|
||||
int vm_id;
|
||||
uint32_t vectors;
|
||||
uint32_t features;
|
||||
EventfdEntry *eventfd_table;
|
||||
|
||||
char * shmobj;
|
||||
char * sizearg;
|
||||
char * role;
|
||||
int role_val; /* scalar to avoid multiple string comparisons */
|
||||
} IVShmemState;
|
||||
|
||||
/* registers for the Inter-VM shared memory device */
|
||||
enum ivshmem_registers {
|
||||
INTRMASK = 0,
|
||||
INTRSTATUS = 4,
|
||||
IVPOSITION = 8,
|
||||
DOORBELL = 12,
|
||||
};
|
||||
|
||||
static inline uint32_t ivshmem_has_feature(IVShmemState *ivs,
|
||||
unsigned int feature) {
|
||||
return (ivs->features & (1 << feature));
|
||||
}
|
||||
|
||||
static inline bool is_power_of_two(uint64_t x) {
|
||||
return (x & (x - 1)) == 0;
|
||||
}
|
||||
|
||||
static void ivshmem_map(PCIDevice *pci_dev, int region_num,
|
||||
pcibus_t addr, pcibus_t size, int type)
|
||||
{
|
||||
IVShmemState *s = DO_UPCAST(IVShmemState, dev, pci_dev);
|
||||
|
||||
s->shm_pci_addr = addr;
|
||||
|
||||
if (s->ivshmem_offset > 0) {
|
||||
cpu_register_physical_memory(s->shm_pci_addr, s->ivshmem_size,
|
||||
s->ivshmem_offset);
|
||||
}
|
||||
|
||||
IVSHMEM_DPRINTF("guest pci addr = %" FMT_PCIBUS ", guest h/w addr = %"
|
||||
PRIu64 ", size = %" FMT_PCIBUS "\n", addr, s->ivshmem_offset, size);
|
||||
|
||||
}
|
||||
|
||||
/* accessing registers - based on rtl8139 */
|
||||
static void ivshmem_update_irq(IVShmemState *s, int val)
|
||||
{
|
||||
int isr;
|
||||
isr = (s->intrstatus & s->intrmask) & 0xffffffff;
|
||||
|
||||
/* don't print ISR resets */
|
||||
if (isr) {
|
||||
IVSHMEM_DPRINTF("Set IRQ to %d (%04x %04x)\n",
|
||||
isr ? 1 : 0, s->intrstatus, s->intrmask);
|
||||
}
|
||||
|
||||
qemu_set_irq(s->dev.irq[0], (isr != 0));
|
||||
}
|
||||
|
||||
static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val)
|
||||
{
|
||||
IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val);
|
||||
|
||||
s->intrmask = val;
|
||||
|
||||
ivshmem_update_irq(s, val);
|
||||
}
|
||||
|
||||
static uint32_t ivshmem_IntrMask_read(IVShmemState *s)
|
||||
{
|
||||
uint32_t ret = s->intrmask;
|
||||
|
||||
IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val)
|
||||
{
|
||||
IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val);
|
||||
|
||||
s->intrstatus = val;
|
||||
|
||||
ivshmem_update_irq(s, val);
|
||||
return;
|
||||
}
|
||||
|
||||
static uint32_t ivshmem_IntrStatus_read(IVShmemState *s)
|
||||
{
|
||||
uint32_t ret = s->intrstatus;
|
||||
|
||||
/* reading ISR clears all interrupts */
|
||||
s->intrstatus = 0;
|
||||
|
||||
ivshmem_update_irq(s, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ivshmem_io_writew(void *opaque, target_phys_addr_t addr,
|
||||
uint32_t val)
|
||||
{
|
||||
|
||||
IVSHMEM_DPRINTF("We shouldn't be writing words\n");
|
||||
}
|
||||
|
||||
static void ivshmem_io_writel(void *opaque, target_phys_addr_t addr,
|
||||
uint32_t val)
|
||||
{
|
||||
IVShmemState *s = opaque;
|
||||
|
||||
uint64_t write_one = 1;
|
||||
uint16_t dest = val >> 16;
|
||||
uint16_t vector = val & 0xff;
|
||||
|
||||
addr &= 0xfc;
|
||||
|
||||
IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr);
|
||||
switch (addr)
|
||||
{
|
||||
case INTRMASK:
|
||||
ivshmem_IntrMask_write(s, val);
|
||||
break;
|
||||
|
||||
case INTRSTATUS:
|
||||
ivshmem_IntrStatus_write(s, val);
|
||||
break;
|
||||
|
||||
case DOORBELL:
|
||||
/* check that dest VM ID is reasonable */
|
||||
if (dest > s->max_peer) {
|
||||
IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest);
|
||||
break;
|
||||
}
|
||||
|
||||
/* check doorbell range */
|
||||
if (vector < s->peers[dest].nb_eventfds) {
|
||||
IVSHMEM_DPRINTF("Writing %" PRId64 " to VM %d on vector %d\n",
|
||||
write_one, dest, vector);
|
||||
if (write(s->peers[dest].eventfds[vector],
|
||||
&(write_one), 8) != 8) {
|
||||
IVSHMEM_DPRINTF("error writing to eventfd\n");
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
IVSHMEM_DPRINTF("Invalid VM Doorbell VM %d\n", dest);
|
||||
}
|
||||
}
|
||||
|
||||
static void ivshmem_io_writeb(void *opaque, target_phys_addr_t addr,
|
||||
uint32_t val)
|
||||
{
|
||||
IVSHMEM_DPRINTF("We shouldn't be writing bytes\n");
|
||||
}
|
||||
|
||||
static uint32_t ivshmem_io_readw(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
|
||||
IVSHMEM_DPRINTF("We shouldn't be reading words\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t ivshmem_io_readl(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
|
||||
IVShmemState *s = opaque;
|
||||
uint32_t ret;
|
||||
|
||||
switch (addr)
|
||||
{
|
||||
case INTRMASK:
|
||||
ret = ivshmem_IntrMask_read(s);
|
||||
break;
|
||||
|
||||
case INTRSTATUS:
|
||||
ret = ivshmem_IntrStatus_read(s);
|
||||
break;
|
||||
|
||||
case IVPOSITION:
|
||||
/* return my VM ID if the memory is mapped */
|
||||
if (s->shm_fd > 0) {
|
||||
ret = s->vm_id;
|
||||
} else {
|
||||
ret = -1;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t ivshmem_io_readb(void *opaque, target_phys_addr_t addr)
|
||||
{
|
||||
IVSHMEM_DPRINTF("We shouldn't be reading bytes\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static CPUReadMemoryFunc * const ivshmem_mmio_read[3] = {
|
||||
ivshmem_io_readb,
|
||||
ivshmem_io_readw,
|
||||
ivshmem_io_readl,
|
||||
};
|
||||
|
||||
static CPUWriteMemoryFunc * const ivshmem_mmio_write[3] = {
|
||||
ivshmem_io_writeb,
|
||||
ivshmem_io_writew,
|
||||
ivshmem_io_writel,
|
||||
};
|
||||
|
||||
static void ivshmem_receive(void *opaque, const uint8_t *buf, int size)
|
||||
{
|
||||
IVShmemState *s = opaque;
|
||||
|
||||
ivshmem_IntrStatus_write(s, *buf);
|
||||
|
||||
IVSHMEM_DPRINTF("ivshmem_receive 0x%02x\n", *buf);
|
||||
}
|
||||
|
||||
static int ivshmem_can_receive(void * opaque)
|
||||
{
|
||||
return 8;
|
||||
}
|
||||
|
||||
static void ivshmem_event(void *opaque, int event)
|
||||
{
|
||||
IVSHMEM_DPRINTF("ivshmem_event %d\n", event);
|
||||
}
|
||||
|
||||
static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {
|
||||
|
||||
EventfdEntry *entry = opaque;
|
||||
PCIDevice *pdev = entry->pdev;
|
||||
|
||||
IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, entry->vector);
|
||||
msix_notify(pdev, entry->vector);
|
||||
}
|
||||
|
||||
static CharDriverState* create_eventfd_chr_device(void * opaque, int eventfd,
|
||||
int vector)
|
||||
{
|
||||
/* create a event character device based on the passed eventfd */
|
||||
IVShmemState *s = opaque;
|
||||
CharDriverState * chr;
|
||||
|
||||
chr = qemu_chr_open_eventfd(eventfd);
|
||||
|
||||
if (chr == NULL) {
|
||||
fprintf(stderr, "creating eventfd for eventfd %d failed\n", eventfd);
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* if MSI is supported we need multiple interrupts */
|
||||
if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
|
||||
s->eventfd_table[vector].pdev = &s->dev;
|
||||
s->eventfd_table[vector].vector = vector;
|
||||
|
||||
qemu_chr_add_handlers(chr, ivshmem_can_receive, fake_irqfd,
|
||||
ivshmem_event, &s->eventfd_table[vector]);
|
||||
} else {
|
||||
qemu_chr_add_handlers(chr, ivshmem_can_receive, ivshmem_receive,
|
||||
ivshmem_event, s);
|
||||
}
|
||||
|
||||
return chr;
|
||||
|
||||
}
|
||||
|
||||
static int check_shm_size(IVShmemState *s, int fd) {
|
||||
/* check that the guest isn't going to try and map more memory than the
|
||||
* the object has allocated return -1 to indicate error */
|
||||
|
||||
struct stat buf;
|
||||
|
||||
fstat(fd, &buf);
|
||||
|
||||
if (s->ivshmem_size > buf.st_size) {
|
||||
fprintf(stderr,
|
||||
"IVSHMEM ERROR: Requested memory size greater"
|
||||
" than shared object size (%" PRIu64 " > %" PRIu64")\n",
|
||||
s->ivshmem_size, (uint64_t)buf.st_size);
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* create the shared memory BAR when we are not using the server, so we can
|
||||
* create the BAR and map the memory immediately */
|
||||
static void create_shared_memory_BAR(IVShmemState *s, int fd) {
|
||||
|
||||
void * ptr;
|
||||
|
||||
s->shm_fd = fd;
|
||||
|
||||
ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
|
||||
s->ivshmem_offset = qemu_ram_alloc_from_ptr(&s->dev.qdev, "ivshmem.bar2",
|
||||
s->ivshmem_size, ptr);
|
||||
|
||||
/* region for shared memory */
|
||||
pci_register_bar(&s->dev, 2, s->ivshmem_size,
|
||||
PCI_BASE_ADDRESS_SPACE_MEMORY, ivshmem_map);
|
||||
}
|
||||
|
||||
static void close_guest_eventfds(IVShmemState *s, int posn)
|
||||
{
|
||||
int i, guest_curr_max;
|
||||
|
||||
guest_curr_max = s->peers[posn].nb_eventfds;
|
||||
|
||||
for (i = 0; i < guest_curr_max; i++) {
|
||||
kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
|
||||
s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
|
||||
close(s->peers[posn].eventfds[i]);
|
||||
}
|
||||
|
||||
qemu_free(s->peers[posn].eventfds);
|
||||
s->peers[posn].nb_eventfds = 0;
|
||||
}
|
||||
|
||||
static void setup_ioeventfds(IVShmemState *s) {
|
||||
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i <= s->max_peer; i++) {
|
||||
for (j = 0; j < s->peers[i].nb_eventfds; j++) {
|
||||
kvm_set_ioeventfd_mmio_long(s->peers[i].eventfds[j],
|
||||
s->mmio_addr + DOORBELL, (i << 16) | j, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* this function increase the dynamic storage need to store data about other
|
||||
* guests */
|
||||
static void increase_dynamic_storage(IVShmemState *s, int new_min_size) {
|
||||
|
||||
int j, old_nb_alloc;
|
||||
|
||||
old_nb_alloc = s->nb_peers;
|
||||
|
||||
while (new_min_size >= s->nb_peers)
|
||||
s->nb_peers = s->nb_peers * 2;
|
||||
|
||||
IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers);
|
||||
s->peers = qemu_realloc(s->peers, s->nb_peers * sizeof(Peer));
|
||||
|
||||
/* zero out new pointers */
|
||||
for (j = old_nb_alloc; j < s->nb_peers; j++) {
|
||||
s->peers[j].eventfds = NULL;
|
||||
s->peers[j].nb_eventfds = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
|
||||
{
|
||||
IVShmemState *s = opaque;
|
||||
int incoming_fd, tmp_fd;
|
||||
int guest_max_eventfd;
|
||||
long incoming_posn;
|
||||
|
||||
memcpy(&incoming_posn, buf, sizeof(long));
|
||||
/* pick off s->server_chr->msgfd and store it, posn should accompany msg */
|
||||
tmp_fd = qemu_chr_get_msgfd(s->server_chr);
|
||||
IVSHMEM_DPRINTF("posn is %ld, fd is %d\n", incoming_posn, tmp_fd);
|
||||
|
||||
/* make sure we have enough space for this guest */
|
||||
if (incoming_posn >= s->nb_peers) {
|
||||
increase_dynamic_storage(s, incoming_posn);
|
||||
}
|
||||
|
||||
if (tmp_fd == -1) {
|
||||
/* if posn is positive and unseen before then this is our posn*/
|
||||
if ((incoming_posn >= 0) &&
|
||||
(s->peers[incoming_posn].eventfds == NULL)) {
|
||||
/* receive our posn */
|
||||
s->vm_id = incoming_posn;
|
||||
return;
|
||||
} else {
|
||||
/* otherwise an fd == -1 means an existing guest has gone away */
|
||||
IVSHMEM_DPRINTF("posn %ld has gone away\n", incoming_posn);
|
||||
close_guest_eventfds(s, incoming_posn);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* because of the implementation of get_msgfd, we need a dup */
|
||||
incoming_fd = dup(tmp_fd);
|
||||
|
||||
if (incoming_fd == -1) {
|
||||
fprintf(stderr, "could not allocate file descriptor %s\n",
|
||||
strerror(errno));
|
||||
return;
|
||||
}
|
||||
|
||||
/* if the position is -1, then it's shared memory region fd */
|
||||
if (incoming_posn == -1) {
|
||||
|
||||
void * map_ptr;
|
||||
|
||||
s->max_peer = 0;
|
||||
|
||||
if (check_shm_size(s, incoming_fd) == -1) {
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
/* mmap the region and map into the BAR2 */
|
||||
map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED,
|
||||
incoming_fd, 0);
|
||||
s->ivshmem_offset = qemu_ram_alloc_from_ptr(&s->dev.qdev,
|
||||
"ivshmem.bar2", s->ivshmem_size, map_ptr);
|
||||
|
||||
IVSHMEM_DPRINTF("guest pci addr = %" FMT_PCIBUS ", guest h/w addr = %"
|
||||
PRIu64 ", size = %" PRIu64 "\n", s->shm_pci_addr,
|
||||
s->ivshmem_offset, s->ivshmem_size);
|
||||
|
||||
if (s->shm_pci_addr > 0) {
|
||||
/* map memory into BAR2 */
|
||||
cpu_register_physical_memory(s->shm_pci_addr, s->ivshmem_size,
|
||||
s->ivshmem_offset);
|
||||
}
|
||||
|
||||
/* only store the fd if it is successfully mapped */
|
||||
s->shm_fd = incoming_fd;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* each guest has an array of eventfds, and we keep track of how many
|
||||
* guests for each VM */
|
||||
guest_max_eventfd = s->peers[incoming_posn].nb_eventfds;
|
||||
|
||||
if (guest_max_eventfd == 0) {
|
||||
/* one eventfd per MSI vector */
|
||||
s->peers[incoming_posn].eventfds = (int *) qemu_malloc(s->vectors *
|
||||
sizeof(int));
|
||||
}
|
||||
|
||||
/* this is an eventfd for a particular guest VM */
|
||||
IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
|
||||
guest_max_eventfd, incoming_fd);
|
||||
s->peers[incoming_posn].eventfds[guest_max_eventfd] = incoming_fd;
|
||||
|
||||
/* increment count for particular guest */
|
||||
s->peers[incoming_posn].nb_eventfds++;
|
||||
|
||||
/* keep track of the maximum VM ID */
|
||||
if (incoming_posn > s->max_peer) {
|
||||
s->max_peer = incoming_posn;
|
||||
}
|
||||
|
||||
if (incoming_posn == s->vm_id) {
|
||||
s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s,
|
||||
s->peers[s->vm_id].eventfds[guest_max_eventfd],
|
||||
guest_max_eventfd);
|
||||
}
|
||||
|
||||
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
|
||||
if (kvm_set_ioeventfd_mmio_long(incoming_fd, s->mmio_addr + DOORBELL,
|
||||
(incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
|
||||
fprintf(stderr, "ivshmem: ioeventfd not available\n");
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void ivshmem_reset(DeviceState *d)
|
||||
{
|
||||
IVShmemState *s = DO_UPCAST(IVShmemState, dev.qdev, d);
|
||||
|
||||
s->intrstatus = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
static void ivshmem_mmio_map(PCIDevice *pci_dev, int region_num,
|
||||
pcibus_t addr, pcibus_t size, int type)
|
||||
{
|
||||
IVShmemState *s = DO_UPCAST(IVShmemState, dev, pci_dev);
|
||||
|
||||
s->mmio_addr = addr;
|
||||
cpu_register_physical_memory(addr + 0, IVSHMEM_REG_BAR_SIZE,
|
||||
s->ivshmem_mmio_io_addr);
|
||||
|
||||
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
|
||||
setup_ioeventfds(s);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t ivshmem_get_size(IVShmemState * s) {
|
||||
|
||||
uint64_t value;
|
||||
char *ptr;
|
||||
|
||||
value = strtoull(s->sizearg, &ptr, 10);
|
||||
switch (*ptr) {
|
||||
case 0: case 'M': case 'm':
|
||||
value <<= 20;
|
||||
break;
|
||||
case 'G': case 'g':
|
||||
value <<= 30;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "qemu: invalid ram size: %s\n", s->sizearg);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* BARs must be a power of 2 */
|
||||
if (!is_power_of_two(value)) {
|
||||
fprintf(stderr, "ivshmem: size must be power of 2\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static void ivshmem_setup_msi(IVShmemState * s) {
|
||||
|
||||
int i;
|
||||
|
||||
/* allocate the MSI-X vectors */
|
||||
|
||||
if (!msix_init(&s->dev, s->vectors, 1, 0)) {
|
||||
pci_register_bar(&s->dev, 1,
|
||||
msix_bar_size(&s->dev),
|
||||
PCI_BASE_ADDRESS_SPACE_MEMORY,
|
||||
msix_mmio_map);
|
||||
IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
|
||||
} else {
|
||||
IVSHMEM_DPRINTF("msix initialization failed\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* 'activate' the vectors */
|
||||
for (i = 0; i < s->vectors; i++) {
|
||||
msix_vector_use(&s->dev, i);
|
||||
}
|
||||
|
||||
/* allocate Qemu char devices for receiving interrupts */
|
||||
s->eventfd_table = qemu_mallocz(s->vectors * sizeof(EventfdEntry));
|
||||
}
|
||||
|
||||
static void ivshmem_save(QEMUFile* f, void *opaque)
|
||||
{
|
||||
IVShmemState *proxy = opaque;
|
||||
|
||||
IVSHMEM_DPRINTF("ivshmem_save\n");
|
||||
pci_device_save(&proxy->dev, f);
|
||||
|
||||
if (ivshmem_has_feature(proxy, IVSHMEM_MSI)) {
|
||||
msix_save(&proxy->dev, f);
|
||||
} else {
|
||||
qemu_put_be32(f, proxy->intrstatus);
|
||||
qemu_put_be32(f, proxy->intrmask);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
|
||||
{
|
||||
IVSHMEM_DPRINTF("ivshmem_load\n");
|
||||
|
||||
IVShmemState *proxy = opaque;
|
||||
int ret, i;
|
||||
|
||||
if (version_id > 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (proxy->role_val == IVSHMEM_PEER) {
|
||||
fprintf(stderr, "ivshmem: 'peer' devices are not migratable\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = pci_device_load(&proxy->dev, f);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ivshmem_has_feature(proxy, IVSHMEM_MSI)) {
|
||||
msix_load(&proxy->dev, f);
|
||||
for (i = 0; i < proxy->vectors; i++) {
|
||||
msix_vector_use(&proxy->dev, i);
|
||||
}
|
||||
} else {
|
||||
proxy->intrstatus = qemu_get_be32(f);
|
||||
proxy->intrmask = qemu_get_be32(f);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pci_ivshmem_init(PCIDevice *dev)
|
||||
{
|
||||
IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
|
||||
uint8_t *pci_conf;
|
||||
|
||||
if (s->sizearg == NULL)
|
||||
s->ivshmem_size = 4 << 20; /* 4 MB default */
|
||||
else {
|
||||
s->ivshmem_size = ivshmem_get_size(s);
|
||||
}
|
||||
|
||||
register_savevm(&s->dev.qdev, "ivshmem", 0, 0, ivshmem_save, ivshmem_load,
|
||||
dev);
|
||||
|
||||
/* IRQFD requires MSI */
|
||||
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
|
||||
!ivshmem_has_feature(s, IVSHMEM_MSI)) {
|
||||
fprintf(stderr, "ivshmem: ioeventfd/irqfd requires MSI\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* check that role is reasonable */
|
||||
if (s->role) {
|
||||
if (strncmp(s->role, "peer", 5) == 0) {
|
||||
s->role_val = IVSHMEM_PEER;
|
||||
} else if (strncmp(s->role, "master", 7) == 0) {
|
||||
s->role_val = IVSHMEM_MASTER;
|
||||
} else {
|
||||
fprintf(stderr, "ivshmem: 'role' must be 'peer' or 'master'\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
s->role_val = IVSHMEM_MASTER; /* default */
|
||||
}
|
||||
|
||||
if (s->role_val == IVSHMEM_PEER) {
|
||||
register_device_unmigratable(&s->dev.qdev, "ivshmem", s);
|
||||
}
|
||||
|
||||
pci_conf = s->dev.config;
|
||||
pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REDHAT_QUMRANET);
|
||||
pci_conf[0x02] = 0x10;
|
||||
pci_conf[0x03] = 0x11;
|
||||
pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
|
||||
pci_config_set_class(pci_conf, PCI_CLASS_MEMORY_RAM);
|
||||
pci_conf[PCI_HEADER_TYPE] = PCI_HEADER_TYPE_NORMAL;
|
||||
|
||||
pci_config_set_interrupt_pin(pci_conf, 1);
|
||||
|
||||
s->shm_pci_addr = 0;
|
||||
s->ivshmem_offset = 0;
|
||||
s->shm_fd = 0;
|
||||
|
||||
s->ivshmem_mmio_io_addr = cpu_register_io_memory(ivshmem_mmio_read,
|
||||
ivshmem_mmio_write, s);
|
||||
/* region for registers*/
|
||||
pci_register_bar(&s->dev, 0, IVSHMEM_REG_BAR_SIZE,
|
||||
PCI_BASE_ADDRESS_SPACE_MEMORY, ivshmem_mmio_map);
|
||||
|
||||
if ((s->server_chr != NULL) &&
|
||||
(strncmp(s->server_chr->filename, "unix:", 5) == 0)) {
|
||||
/* if we get a UNIX socket as the parameter we will talk
|
||||
* to the ivshmem server to receive the memory region */
|
||||
|
||||
if (s->shmobj != NULL) {
|
||||
fprintf(stderr, "WARNING: do not specify both 'chardev' "
|
||||
"and 'shm' with ivshmem\n");
|
||||
}
|
||||
|
||||
IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
|
||||
s->server_chr->filename);
|
||||
|
||||
if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
|
||||
ivshmem_setup_msi(s);
|
||||
}
|
||||
|
||||
/* we allocate enough space for 16 guests and grow as needed */
|
||||
s->nb_peers = 16;
|
||||
s->vm_id = -1;
|
||||
|
||||
/* allocate/initialize space for interrupt handling */
|
||||
s->peers = qemu_mallocz(s->nb_peers * sizeof(Peer));
|
||||
|
||||
pci_register_bar(&s->dev, 2, s->ivshmem_size,
|
||||
PCI_BASE_ADDRESS_SPACE_MEMORY, ivshmem_map);
|
||||
|
||||
s->eventfd_chr = qemu_mallocz(s->vectors * sizeof(CharDriverState *));
|
||||
|
||||
qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read,
|
||||
ivshmem_event, s);
|
||||
} else {
|
||||
/* just map the file immediately, we're not using a server */
|
||||
int fd;
|
||||
|
||||
if (s->shmobj == NULL) {
|
||||
fprintf(stderr, "Must specify 'chardev' or 'shm' to ivshmem\n");
|
||||
}
|
||||
|
||||
IVSHMEM_DPRINTF("using shm_open (shm object = %s)\n", s->shmobj);
|
||||
|
||||
/* try opening with O_EXCL and if it succeeds zero the memory
|
||||
* by truncating to 0 */
|
||||
if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL,
|
||||
S_IRWXU|S_IRWXG|S_IRWXO)) > 0) {
|
||||
/* truncate file to length PCI device's memory */
|
||||
if (ftruncate(fd, s->ivshmem_size) != 0) {
|
||||
fprintf(stderr, "ivshmem: could not truncate shared file\n");
|
||||
}
|
||||
|
||||
} else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR,
|
||||
S_IRWXU|S_IRWXG|S_IRWXO)) < 0) {
|
||||
fprintf(stderr, "ivshmem: could not open shared file\n");
|
||||
exit(-1);
|
||||
|
||||
}
|
||||
|
||||
if (check_shm_size(s, fd) == -1) {
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
create_shared_memory_BAR(s, fd);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pci_ivshmem_uninit(PCIDevice *dev)
|
||||
{
|
||||
IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
|
||||
|
||||
cpu_unregister_io_memory(s->ivshmem_mmio_io_addr);
|
||||
unregister_savevm(&dev->qdev, "ivshmem", s);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static PCIDeviceInfo ivshmem_info = {
|
||||
.qdev.name = "ivshmem",
|
||||
.qdev.size = sizeof(IVShmemState),
|
||||
.qdev.reset = ivshmem_reset,
|
||||
.init = pci_ivshmem_init,
|
||||
.exit = pci_ivshmem_uninit,
|
||||
.qdev.props = (Property[]) {
|
||||
DEFINE_PROP_CHR("chardev", IVShmemState, server_chr),
|
||||
DEFINE_PROP_STRING("size", IVShmemState, sizearg),
|
||||
DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1),
|
||||
DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, false),
|
||||
DEFINE_PROP_BIT("msi", IVShmemState, features, IVSHMEM_MSI, true),
|
||||
DEFINE_PROP_STRING("shm", IVShmemState, shmobj),
|
||||
DEFINE_PROP_STRING("role", IVShmemState, role),
|
||||
DEFINE_PROP_END_OF_LIST(),
|
||||
}
|
||||
};
|
||||
|
||||
static void ivshmem_register_devices(void)
|
||||
{
|
||||
pci_qdev_register(&ivshmem_info);
|
||||
}
|
||||
|
||||
device_init(ivshmem_register_devices)
|
@@ -485,16 +485,26 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
|
||||
return buflen;
|
||||
}
|
||||
|
||||
static int mode_sense_page(SCSIRequest *req, int page, uint8_t *p)
|
||||
static int mode_sense_page(SCSIRequest *req, int page, uint8_t *p,
|
||||
int page_control)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
|
||||
BlockDriverState *bdrv = s->bs;
|
||||
int cylinders, heads, secs;
|
||||
|
||||
/*
|
||||
* If Changeable Values are requested, a mask denoting those mode parameters
|
||||
* that are changeable shall be returned. As we currently don't support
|
||||
* parameter changes via MODE_SELECT all bits are returned set to zero.
|
||||
* The buffer was already menset to zero by the caller of this function.
|
||||
*/
|
||||
switch (page) {
|
||||
case 4: /* Rigid disk device geometry page. */
|
||||
p[0] = 4;
|
||||
p[1] = 0x16;
|
||||
if (page_control == 1) { /* Changeable Values */
|
||||
return p[1] + 2;
|
||||
}
|
||||
/* if a geometry hint is available, use it */
|
||||
bdrv_get_geometry_hint(bdrv, &cylinders, &heads, &secs);
|
||||
p[2] = (cylinders >> 16) & 0xff;
|
||||
@@ -519,11 +529,14 @@ static int mode_sense_page(SCSIRequest *req, int page, uint8_t *p)
|
||||
/* Medium rotation rate [rpm], 5400 rpm */
|
||||
p[20] = (5400 >> 8) & 0xff;
|
||||
p[21] = 5400 & 0xff;
|
||||
return 0x16;
|
||||
return p[1] + 2;
|
||||
|
||||
case 5: /* Flexible disk device geometry page. */
|
||||
p[0] = 5;
|
||||
p[1] = 0x1e;
|
||||
if (page_control == 1) { /* Changeable Values */
|
||||
return p[1] + 2;
|
||||
}
|
||||
/* Transfer rate [kbit/s], 5Mbit/s */
|
||||
p[2] = 5000 >> 8;
|
||||
p[3] = 5000 & 0xff;
|
||||
@@ -555,21 +568,27 @@ static int mode_sense_page(SCSIRequest *req, int page, uint8_t *p)
|
||||
/* Medium rotation rate [rpm], 5400 rpm */
|
||||
p[28] = (5400 >> 8) & 0xff;
|
||||
p[29] = 5400 & 0xff;
|
||||
return 0x1e;
|
||||
return p[1] + 2;
|
||||
|
||||
case 8: /* Caching page. */
|
||||
p[0] = 8;
|
||||
p[1] = 0x12;
|
||||
if (page_control == 1) { /* Changeable Values */
|
||||
return p[1] + 2;
|
||||
}
|
||||
if (bdrv_enable_write_cache(s->bs)) {
|
||||
p[2] = 4; /* WCE */
|
||||
}
|
||||
return 20;
|
||||
return p[1] + 2;
|
||||
|
||||
case 0x2a: /* CD Capabilities and Mechanical Status page. */
|
||||
if (bdrv_get_type_hint(bdrv) != BDRV_TYPE_CDROM)
|
||||
return 0;
|
||||
p[0] = 0x2a;
|
||||
p[1] = 0x14;
|
||||
if (page_control == 1) { /* Changeable Values */
|
||||
return p[1] + 2;
|
||||
}
|
||||
p[2] = 3; // CD-R & CD-RW read
|
||||
p[3] = 0; // Writing not supported
|
||||
p[4] = 0x7f; /* Audio, composite, digital out,
|
||||
@@ -593,7 +612,7 @@ static int mode_sense_page(SCSIRequest *req, int page, uint8_t *p)
|
||||
p[19] = (16 * 176) & 0xff;
|
||||
p[20] = (16 * 176) >> 8; // 16x write speed current
|
||||
p[21] = (16 * 176) & 0xff;
|
||||
return 22;
|
||||
return p[1] + 2;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
@@ -604,29 +623,46 @@ static int scsi_disk_emulate_mode_sense(SCSIRequest *req, uint8_t *outbuf)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
|
||||
uint64_t nb_sectors;
|
||||
int page, dbd, buflen;
|
||||
int page, dbd, buflen, page_control;
|
||||
uint8_t *p;
|
||||
uint8_t dev_specific_param;
|
||||
|
||||
dbd = req->cmd.buf[1] & 0x8;
|
||||
page = req->cmd.buf[2] & 0x3f;
|
||||
DPRINTF("Mode Sense (page %d, len %zd)\n", page, req->cmd.xfer);
|
||||
page_control = (req->cmd.buf[2] & 0xc0) >> 6;
|
||||
DPRINTF("Mode Sense(%d) (page %d, len %d, page_control %d)\n",
|
||||
(req->cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, len, page_control);
|
||||
memset(outbuf, 0, req->cmd.xfer);
|
||||
p = outbuf;
|
||||
|
||||
p[1] = 0; /* Default media type. */
|
||||
p[3] = 0; /* Block descriptor length. */
|
||||
if (bdrv_is_read_only(s->bs)) {
|
||||
p[2] = 0x80; /* Readonly. */
|
||||
dev_specific_param = 0x80; /* Readonly. */
|
||||
} else {
|
||||
dev_specific_param = 0x00;
|
||||
}
|
||||
|
||||
if (req->cmd.buf[0] == MODE_SENSE) {
|
||||
p[1] = 0; /* Default media type. */
|
||||
p[2] = dev_specific_param;
|
||||
p[3] = 0; /* Block descriptor length. */
|
||||
p += 4;
|
||||
} else { /* MODE_SENSE_10 */
|
||||
p[2] = 0; /* Default media type. */
|
||||
p[3] = dev_specific_param;
|
||||
p[6] = p[7] = 0; /* Block descriptor length. */
|
||||
p += 8;
|
||||
}
|
||||
p += 4;
|
||||
|
||||
bdrv_get_geometry(s->bs, &nb_sectors);
|
||||
if ((~dbd) & nb_sectors) {
|
||||
outbuf[3] = 8; /* Block descriptor length */
|
||||
if (!dbd && nb_sectors) {
|
||||
if (req->cmd.buf[0] == MODE_SENSE) {
|
||||
outbuf[3] = 8; /* Block descriptor length */
|
||||
} else { /* MODE_SENSE_10 */
|
||||
outbuf[7] = 8; /* Block descriptor length */
|
||||
}
|
||||
nb_sectors /= s->cluster_size;
|
||||
nb_sectors--;
|
||||
if (nb_sectors > 0xffffff)
|
||||
nb_sectors = 0xffffff;
|
||||
nb_sectors = 0;
|
||||
p[0] = 0; /* media density code */
|
||||
p[1] = (nb_sectors >> 16) & 0xff;
|
||||
p[2] = (nb_sectors >> 8) & 0xff;
|
||||
@@ -638,21 +674,37 @@ static int scsi_disk_emulate_mode_sense(SCSIRequest *req, uint8_t *outbuf)
|
||||
p += 8;
|
||||
}
|
||||
|
||||
if (page_control == 3) { /* Saved Values */
|
||||
return -1; /* ILLEGAL_REQUEST */
|
||||
}
|
||||
|
||||
switch (page) {
|
||||
case 0x04:
|
||||
case 0x05:
|
||||
case 0x08:
|
||||
case 0x2a:
|
||||
p += mode_sense_page(req, page, p);
|
||||
p += mode_sense_page(req, page, p, page_control);
|
||||
break;
|
||||
case 0x3f:
|
||||
p += mode_sense_page(req, 0x08, p);
|
||||
p += mode_sense_page(req, 0x2a, p);
|
||||
p += mode_sense_page(req, 0x08, p, page_control);
|
||||
p += mode_sense_page(req, 0x2a, p, page_control);
|
||||
break;
|
||||
default:
|
||||
return -1; /* ILLEGAL_REQUEST */
|
||||
}
|
||||
|
||||
buflen = p - outbuf;
|
||||
outbuf[0] = buflen - 4;
|
||||
/*
|
||||
* The mode data length field specifies the length in bytes of the
|
||||
* following data that is available to be transferred. The mode data
|
||||
* length does not include itself.
|
||||
*/
|
||||
if (req->cmd.buf[0] == MODE_SENSE) {
|
||||
outbuf[0] = buflen - 1;
|
||||
} else { /* MODE_SENSE_10 */
|
||||
outbuf[0] = ((buflen - 2) >> 8) & 0xff;
|
||||
outbuf[1] = (buflen - 2) & 0xff;
|
||||
}
|
||||
if (buflen > req->cmd.xfer)
|
||||
buflen = req->cmd.xfer;
|
||||
return buflen;
|
||||
|
@@ -28,6 +28,7 @@ typedef struct VirtIOBlock
|
||||
BlockConf *conf;
|
||||
unsigned short sector_mask;
|
||||
char sn[BLOCK_SERIAL_STRLEN];
|
||||
DeviceState *qdev;
|
||||
} VirtIOBlock;
|
||||
|
||||
static VirtIOBlock *to_virtio_blk(VirtIODevice *vdev)
|
||||
@@ -479,6 +480,11 @@ static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id)
|
||||
qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem));
|
||||
req->next = s->rq;
|
||||
s->rq = req;
|
||||
|
||||
virtqueue_map_sg(req->elem.in_sg, req->elem.in_addr,
|
||||
req->elem.in_num, 1);
|
||||
virtqueue_map_sg(req->elem.out_sg, req->elem.out_addr,
|
||||
req->elem.out_num, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -522,9 +528,16 @@ VirtIODevice *virtio_blk_init(DeviceState *dev, BlockConf *conf)
|
||||
s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output);
|
||||
|
||||
qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
|
||||
s->qdev = dev;
|
||||
register_savevm(dev, "virtio-blk", virtio_blk_id++, 2,
|
||||
virtio_blk_save, virtio_blk_load, s);
|
||||
bdrv_set_removable(s->bs, 0);
|
||||
|
||||
return &s->vdev;
|
||||
}
|
||||
|
||||
void virtio_blk_exit(VirtIODevice *vdev)
|
||||
{
|
||||
VirtIOBlock *s = to_virtio_blk(vdev);
|
||||
unregister_savevm(s->qdev, "virtio-blk", s);
|
||||
}
|
||||
|
@@ -569,6 +569,7 @@ static int virtio_blk_exit_pci(PCIDevice *pci_dev)
|
||||
{
|
||||
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
|
||||
|
||||
virtio_blk_exit(proxy->vdev);
|
||||
blockdev_mark_auto_del(proxy->block.bs);
|
||||
return virtio_exit_pci(pci_dev);
|
||||
}
|
||||
|
40
hw/virtio.c
40
hw/virtio.c
@@ -360,11 +360,26 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
|
||||
size_t num_sg, int is_write)
|
||||
{
|
||||
unsigned int i;
|
||||
target_phys_addr_t len;
|
||||
|
||||
for (i = 0; i < num_sg; i++) {
|
||||
len = sg[i].iov_len;
|
||||
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
|
||||
if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
|
||||
fprintf(stderr, "virtio: trying to map MMIO memory\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
|
||||
{
|
||||
unsigned int i, head, max;
|
||||
target_phys_addr_t desc_pa = vq->vring.desc;
|
||||
target_phys_addr_t len;
|
||||
|
||||
if (!virtqueue_num_heads(vq, vq->last_avail_idx))
|
||||
return 0;
|
||||
@@ -388,29 +403,20 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
|
||||
i = 0;
|
||||
}
|
||||
|
||||
/* Collect all the descriptors */
|
||||
do {
|
||||
struct iovec *sg;
|
||||
int is_write = 0;
|
||||
|
||||
if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
|
||||
elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
|
||||
sg = &elem->in_sg[elem->in_num++];
|
||||
is_write = 1;
|
||||
} else
|
||||
} else {
|
||||
elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
|
||||
sg = &elem->out_sg[elem->out_num++];
|
||||
|
||||
/* Grab the first descriptor, and check it's OK. */
|
||||
sg->iov_len = vring_desc_len(desc_pa, i);
|
||||
len = sg->iov_len;
|
||||
|
||||
sg->iov_base = cpu_physical_memory_map(vring_desc_addr(desc_pa, i),
|
||||
&len, is_write);
|
||||
|
||||
if (sg->iov_base == NULL || len != sg->iov_len) {
|
||||
fprintf(stderr, "virtio: trying to map MMIO memory\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
sg->iov_len = vring_desc_len(desc_pa, i);
|
||||
|
||||
/* If we've got too many, that implies a descriptor loop. */
|
||||
if ((elem->in_num + elem->out_num) > max) {
|
||||
fprintf(stderr, "Looped descriptor");
|
||||
@@ -418,6 +424,10 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
|
||||
}
|
||||
} while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
|
||||
|
||||
/* Now map what we have collected */
|
||||
virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
|
||||
virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
|
||||
|
||||
elem->index = head;
|
||||
|
||||
vq->inuse++;
|
||||
|
@@ -81,6 +81,7 @@ typedef struct VirtQueueElement
|
||||
unsigned int out_num;
|
||||
unsigned int in_num;
|
||||
target_phys_addr_t in_addr[VIRTQUEUE_MAX_SIZE];
|
||||
target_phys_addr_t out_addr[VIRTQUEUE_MAX_SIZE];
|
||||
struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
|
||||
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
|
||||
} VirtQueueElement;
|
||||
@@ -142,6 +143,8 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count);
|
||||
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
|
||||
unsigned int len, unsigned int idx);
|
||||
|
||||
void virtqueue_map_sg(struct iovec *sg, target_phys_addr_t *addr,
|
||||
size_t num_sg, int is_write);
|
||||
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
|
||||
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes);
|
||||
|
||||
@@ -194,6 +197,7 @@ VirtIODevice *virtio_9p_init(DeviceState *dev, V9fsConf *conf);
|
||||
|
||||
|
||||
void virtio_net_exit(VirtIODevice *vdev);
|
||||
void virtio_blk_exit(VirtIODevice *vdev);
|
||||
|
||||
#define DEFINE_VIRTIO_COMMON_FEATURES(_state, _field) \
|
||||
DEFINE_PROP_BIT("indirect_desc", _state, _field, \
|
||||
|
32
kvm-all.c
32
kvm-all.c
@@ -1241,6 +1241,38 @@ int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset)
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign)
|
||||
{
|
||||
#ifdef KVM_IOEVENTFD
|
||||
int ret;
|
||||
struct kvm_ioeventfd iofd;
|
||||
|
||||
iofd.datamatch = val;
|
||||
iofd.addr = addr;
|
||||
iofd.len = 4;
|
||||
iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
|
||||
iofd.fd = fd;
|
||||
|
||||
if (!kvm_enabled()) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
if (!assign) {
|
||||
iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
|
||||
}
|
||||
|
||||
ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
|
||||
|
||||
if (ret < 0) {
|
||||
return -errno;
|
||||
}
|
||||
|
||||
return 0;
|
||||
#else
|
||||
return -ENOSYS;
|
||||
#endif
|
||||
}
|
||||
|
||||
int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
|
||||
{
|
||||
#ifdef KVM_IOEVENTFD
|
||||
|
@@ -136,3 +136,8 @@ int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
1
kvm.h
1
kvm.h
@@ -175,6 +175,7 @@ static inline void cpu_synchronize_post_init(CPUState *env)
|
||||
}
|
||||
|
||||
#endif
|
||||
int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign);
|
||||
|
||||
int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
|
||||
#endif
|
||||
|
@@ -225,13 +225,13 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
|
||||
int prot;
|
||||
int looped = 0;
|
||||
|
||||
if (size > reserved_va) {
|
||||
if (size > RESERVED_VA) {
|
||||
return (abi_ulong)-1;
|
||||
}
|
||||
|
||||
last_addr = start;
|
||||
for (addr = start; last_addr + size != addr; addr += qemu_host_page_size) {
|
||||
if (last_addr + size >= reserved_va
|
||||
if (last_addr + size >= RESERVED_VA
|
||||
|| (abi_ulong)(last_addr + size) < last_addr) {
|
||||
if (looped) {
|
||||
return (abi_ulong)-1;
|
||||
@@ -271,7 +271,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
|
||||
|
||||
size = HOST_PAGE_ALIGN(size);
|
||||
|
||||
if (reserved_va) {
|
||||
if (RESERVED_VA) {
|
||||
return mmap_find_vma_reserved(start, size);
|
||||
}
|
||||
|
||||
@@ -651,7 +651,7 @@ int target_munmap(abi_ulong start, abi_ulong len)
|
||||
ret = 0;
|
||||
/* unmap what we can */
|
||||
if (real_start < real_end) {
|
||||
if (reserved_va) {
|
||||
if (RESERVED_VA) {
|
||||
mmap_reserve(real_start, real_end - real_start);
|
||||
} else {
|
||||
ret = munmap(g2h(real_start), real_end - real_start);
|
||||
@@ -679,7 +679,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
flags,
|
||||
g2h(new_addr));
|
||||
|
||||
if (reserved_va && host_addr != MAP_FAILED) {
|
||||
if (RESERVED_VA && host_addr != MAP_FAILED) {
|
||||
/* If new and old addresses overlap then the above mremap will
|
||||
already have failed with EINVAL. */
|
||||
mmap_reserve(old_addr, old_size);
|
||||
@@ -701,7 +701,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
}
|
||||
} else {
|
||||
int prot = 0;
|
||||
if (reserved_va && old_size < new_size) {
|
||||
if (RESERVED_VA && old_size < new_size) {
|
||||
abi_ulong addr;
|
||||
for (addr = old_addr + old_size;
|
||||
addr < old_addr + new_size;
|
||||
@@ -711,7 +711,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
}
|
||||
if (prot == 0) {
|
||||
host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
|
||||
if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
|
||||
if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
|
||||
mmap_reserve(old_addr + old_size, new_size - old_size);
|
||||
}
|
||||
} else {
|
||||
|
@@ -599,6 +599,7 @@ BlockDriverAIOCB *paio_ioctl(BlockDriverState *bs, int fd,
|
||||
acb->aio_type = QEMU_AIO_IOCTL;
|
||||
acb->aio_fildes = fd;
|
||||
acb->ev_signo = SIGUSR2;
|
||||
acb->async_context_id = get_async_context_id();
|
||||
acb->aio_offset = 0;
|
||||
acb->aio_ioctl_buf = buf;
|
||||
acb->aio_ioctl_cmd = req;
|
||||
|
@@ -2087,6 +2087,12 @@ static void tcp_chr_read(void *opaque)
|
||||
}
|
||||
}
|
||||
|
||||
CharDriverState *qemu_chr_open_eventfd(int eventfd){
|
||||
|
||||
return qemu_chr_open_fd(eventfd, eventfd);
|
||||
|
||||
}
|
||||
|
||||
static void tcp_chr_connect(void *opaque)
|
||||
{
|
||||
CharDriverState *chr = opaque;
|
||||
|
@@ -94,6 +94,9 @@ void qemu_chr_info_print(Monitor *mon, const QObject *ret_data);
|
||||
void qemu_chr_info(Monitor *mon, QObject **ret_data);
|
||||
CharDriverState *qemu_chr_find(const char *name);
|
||||
|
||||
/* add an eventfd to the qemu devices that are polled */
|
||||
CharDriverState *qemu_chr_open_eventfd(int eventfd);
|
||||
|
||||
extern int term_escape_char;
|
||||
|
||||
/* async I/O support */
|
||||
|
@@ -706,6 +706,49 @@ Using the @option{-net socket} option, it is possible to make VLANs
|
||||
that span several QEMU instances. See @ref{sec_invocation} to have a
|
||||
basic example.
|
||||
|
||||
@section Other Devices
|
||||
|
||||
@subsection Inter-VM Shared Memory device
|
||||
|
||||
With KVM enabled on a Linux host, a shared memory device is available. Guests
|
||||
map a POSIX shared memory region into the guest as a PCI device that enables
|
||||
zero-copy communication to the application level of the guests. The basic
|
||||
syntax is:
|
||||
|
||||
@example
|
||||
qemu -device ivshmem,size=<size in format accepted by -m>[,shm=<shm name>]
|
||||
@end example
|
||||
|
||||
If desired, interrupts can be sent between guest VMs accessing the same shared
|
||||
memory region. Interrupt support requires using a shared memory server and
|
||||
using a chardev socket to connect to it. The code for the shared memory server
|
||||
is qemu.git/contrib/ivshmem-server. An example syntax when using the shared
|
||||
memory server is:
|
||||
|
||||
@example
|
||||
qemu -device ivshmem,size=<size in format accepted by -m>[,chardev=<id>]
|
||||
[,msi=on][,ioeventfd=on][,vectors=n][,role=peer|master]
|
||||
qemu -chardev socket,path=<path>,id=<id>
|
||||
@end example
|
||||
|
||||
When using the server, the guest will be assigned a VM ID (>=0) that allows guests
|
||||
using the same server to communicate via interrupts. Guests can read their
|
||||
VM ID from a device register (see example code). Since receiving the shared
|
||||
memory region from the server is asynchronous, there is a (small) chance the
|
||||
guest may boot before the shared memory is attached. To allow an application
|
||||
to ensure shared memory is attached, the VM ID register will return -1 (an
|
||||
invalid VM ID) until the memory is attached. Once the shared memory is
|
||||
attached, the VM ID will return the guest's valid VM ID. With these semantics,
|
||||
the guest application can check to ensure the shared memory is attached to the
|
||||
guest before proceeding.
|
||||
|
||||
The @option{role} argument can be set to either master or peer and will affect
|
||||
how the shared memory is migrated. With @option{role=master}, the guest will
|
||||
copy the shared memory on migration to the destination host. With
|
||||
@option{role=peer}, the guest will not be able to migrate with the device attached.
|
||||
With the @option{peer} case, the device should be detached and then reattached
|
||||
after migration using the PCI hotplug support.
|
||||
|
||||
@node direct_linux_boot
|
||||
@section Direct Linux Boot
|
||||
|
||||
|
@@ -783,7 +783,8 @@ static int img_convert(int argc, char **argv)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out_bs = bdrv_new_open(out_filename, out_fmt, BDRV_O_FLAGS | BDRV_O_RDWR);
|
||||
out_bs = bdrv_new_open(out_filename, out_fmt,
|
||||
BDRV_O_FLAGS | BDRV_O_RDWR | BDRV_O_NO_FLUSH);
|
||||
if (!out_bs) {
|
||||
ret = -1;
|
||||
goto out;
|
||||
@@ -1286,7 +1287,7 @@ static int img_rebase(int argc, char **argv)
|
||||
}
|
||||
|
||||
bs_new_backing = bdrv_new("new_backing");
|
||||
ret = bdrv_open(bs_new_backing, out_baseimg, BDRV_O_FLAGS | BDRV_O_RDWR,
|
||||
ret = bdrv_open(bs_new_backing, out_baseimg, BDRV_O_FLAGS,
|
||||
new_backing_drv);
|
||||
if (ret) {
|
||||
error("Could not open new backing file '%s'", out_baseimg);
|
||||
|
@@ -118,7 +118,7 @@ ETEXI
|
||||
DEF("drive", HAS_ARG, QEMU_OPTION_drive,
|
||||
"-drive [file=file][,if=type][,bus=n][,unit=m][,media=d][,index=i]\n"
|
||||
" [,cyls=c,heads=h,secs=s[,trans=t]][,snapshot=on|off]\n"
|
||||
" [,cache=writethrough|writeback|unsafe|none][,format=f]\n"
|
||||
" [,cache=writethrough|writeback|none|unsafe][,format=f]\n"
|
||||
" [,serial=s][,addr=A][,id=name][,aio=threads|native]\n"
|
||||
" [,readonly=on|off]\n"
|
||||
" use 'file' as a drive image\n", QEMU_ARCH_ALL)
|
||||
|
50
savevm.c
50
savevm.c
@@ -1018,6 +1018,7 @@ typedef struct SaveStateEntry {
|
||||
const VMStateDescription *vmsd;
|
||||
void *opaque;
|
||||
CompatEntry *compat;
|
||||
int no_migrate;
|
||||
} SaveStateEntry;
|
||||
|
||||
|
||||
@@ -1081,6 +1082,7 @@ int register_savevm_live(DeviceState *dev,
|
||||
se->load_state = load_state;
|
||||
se->opaque = opaque;
|
||||
se->vmsd = NULL;
|
||||
se->no_migrate = 0;
|
||||
|
||||
if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
|
||||
char *id = dev->parent_bus->info->get_dev_path(dev);
|
||||
@@ -1139,11 +1141,39 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
|
||||
QTAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) {
|
||||
if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
|
||||
QTAILQ_REMOVE(&savevm_handlers, se, entry);
|
||||
if (se->compat) {
|
||||
qemu_free(se->compat);
|
||||
}
|
||||
qemu_free(se);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* mark a device as not to be migrated, that is the device should be
|
||||
unplugged before migration */
|
||||
void register_device_unmigratable(DeviceState *dev, const char *idstr,
|
||||
void *opaque)
|
||||
{
|
||||
SaveStateEntry *se;
|
||||
char id[256] = "";
|
||||
|
||||
if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
|
||||
char *path = dev->parent_bus->info->get_dev_path(dev);
|
||||
if (path) {
|
||||
pstrcpy(id, sizeof(id), path);
|
||||
pstrcat(id, sizeof(id), "/");
|
||||
qemu_free(path);
|
||||
}
|
||||
}
|
||||
pstrcat(id, sizeof(id), idstr);
|
||||
|
||||
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
||||
if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
|
||||
se->no_migrate = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
|
||||
const VMStateDescription *vmsd,
|
||||
void *opaque, int alias_id,
|
||||
@@ -1206,6 +1236,9 @@ void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd,
|
||||
QTAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) {
|
||||
if (se->vmsd == vmsd && se->opaque == opaque) {
|
||||
QTAILQ_REMOVE(&savevm_handlers, se, entry);
|
||||
if (se->compat) {
|
||||
qemu_free(se->compat);
|
||||
}
|
||||
qemu_free(se);
|
||||
}
|
||||
}
|
||||
@@ -1347,13 +1380,19 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
|
||||
return vmstate_load_state(f, se->vmsd, se->opaque, version_id);
|
||||
}
|
||||
|
||||
static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
|
||||
static int vmstate_save(QEMUFile *f, SaveStateEntry *se)
|
||||
{
|
||||
if (se->no_migrate) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!se->vmsd) { /* Old style */
|
||||
se->save_state(f, se->opaque);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
vmstate_save_state(f,se->vmsd, se->opaque);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define QEMU_VM_FILE_MAGIC 0x5145564d
|
||||
@@ -1448,6 +1487,7 @@ int qemu_savevm_state_iterate(Monitor *mon, QEMUFile *f)
|
||||
int qemu_savevm_state_complete(Monitor *mon, QEMUFile *f)
|
||||
{
|
||||
SaveStateEntry *se;
|
||||
int r;
|
||||
|
||||
cpu_synchronize_all_states();
|
||||
|
||||
@@ -1480,7 +1520,11 @@ int qemu_savevm_state_complete(Monitor *mon, QEMUFile *f)
|
||||
qemu_put_be32(f, se->instance_id);
|
||||
qemu_put_be32(f, se->version_id);
|
||||
|
||||
vmstate_save(f, se);
|
||||
r = vmstate_save(f, se);
|
||||
if (r < 0) {
|
||||
monitor_printf(mon, "cannot migrate with device '%s'\n", se->idstr);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
qemu_put_byte(f, QEMU_VM_EOF);
|
||||
|
Reference in New Issue
Block a user