Compare commits
	
		
			124 Commits
		
	
	
		
			pull-ui-20
			...
			pull-ui-20
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 58aa7d8e44 | ||
|  | a6ccabd676 | ||
|  | 46d921bebe | ||
|  | e0d2bd5195 | ||
|  | 91ec41dc3f | ||
|  | 1464ad45cd | ||
|  | 48eb62a74f | ||
|  | b1918fbb1c | ||
|  | 10f759079e | ||
|  | b5a1b44318 | ||
|  | 0399293e5b | ||
|  | f194a1ae53 | ||
|  | 9ee86b8526 | ||
|  | 4d91e9115c | ||
|  | c81200b014 | ||
|  | 14f00c6c49 | ||
|  | 96a1616c85 | ||
|  | e55250c6cb | ||
|  | 3c0f12df65 | ||
|  | ba63cf47a9 | ||
|  | a55c910e0b | ||
|  | 9776f63645 | ||
|  | 7ef295ea5b | ||
|  | 140b7ce5ff | ||
|  | 04ae712a9f | ||
|  | e334bd3190 | ||
|  | 9886ecdf31 | ||
|  | 91cca2cda9 | ||
|  | aa6489da4e | ||
|  | dacf0a2ff7 | ||
|  | 12dcc3217d | ||
|  | 73462dddf6 | ||
|  | c3ae85fc8f | ||
|  | 9c5a746038 | ||
|  | b2e62d9a7b | ||
|  | ed50ff7875 | ||
|  | f9fd40ebe4 | ||
|  | 49017bd8b4 | ||
|  | a0e1e6d705 | ||
|  | eab713941a | ||
|  | 4824a61a6d | ||
|  | 738a5d9fbb | ||
|  | 16f4a8dc5c | ||
|  | 76151cacfe | ||
|  | 83ec1923cd | ||
|  | 8b41c30525 | ||
|  | 0719e71e52 | ||
|  | 71c2768433 | ||
|  | 8c4f0eb94c | ||
|  | 2d3b7c0164 | ||
|  | f8693c2cd0 | ||
|  | 60253ed1e6 | ||
|  | 9f14b0add1 | ||
|  | 74074e8a7c | ||
|  | 3c52ddcdc5 | ||
|  | 750cf86932 | ||
|  | ed6128ebbd | ||
|  | 4ade0541de | ||
|  | 3d211d9f4d | ||
|  | b23197f9cf | ||
|  | bc9beb47c7 | ||
|  | 5d4e1a1081 | ||
|  | 1bcea73e13 | ||
|  | 56797b1fbc | ||
|  | 3596f524d4 | ||
|  | 62cb4145bb | ||
|  | 4779dc1d19 | ||
|  | 23d92d68e7 | ||
|  | 2c140f5f2c | ||
|  | 6411dd1334 | ||
|  | 9c279bec75 | ||
|  | 646fd16865 | ||
|  | ce350f32e4 | ||
|  | d90527178c | ||
|  | 6aaa681c9b | ||
|  | ce1307e180 | ||
|  | bc994b74ea | ||
|  | 562f5e0b97 | ||
|  | 8581c115d2 | ||
|  | a28d8391e3 | ||
|  | 99abd0d6f7 | ||
|  | fe345a3d5d | ||
|  | 8777f6abdb | ||
|  | c5b2ee4c7a | ||
|  | 5ab0e547bf | ||
|  | 66fb2d5467 | ||
|  | 0b85d73583 | ||
|  | d9c7737e57 | ||
|  | 9c74a85304 | ||
|  | fee5b753ff | ||
|  | 6575ccddf4 | ||
|  | 01df51432e | ||
|  | f22d0af076 | ||
|  | b98d26e333 | ||
|  | f2c1d54c18 | ||
|  | 05fa1c742f | ||
|  | d2ba7ecb34 | ||
|  | cc199b16cf | ||
|  | 4c9bca7e39 | ||
|  | 16096a4d47 | ||
|  | 21cd917ff5 | ||
|  | e5b43573e2 | ||
|  | 04a3615860 | ||
|  | 939901dcd2 | ||
|  | b189346eb1 | ||
|  | 1bff960642 | ||
|  | 60390a2192 | ||
|  | eab8eb8db3 | ||
|  | 7725b8bf12 | ||
|  | 071608b519 | ||
|  | 1da90c34c9 | ||
|  | 3ff430aa91 | ||
|  | 35227e6a09 | ||
|  | e8ce12d9ea | ||
|  | beded0ff7f | ||
|  | a005b3ef50 | ||
|  | 902c053d83 | ||
|  | 09b5e30da5 | ||
|  | 2d7d06d847 | ||
|  | cba0e7796b | ||
|  | ce266b75fe | ||
|  | d4a63ac8b1 | ||
|  | 3d0db3e74d | ||
|  | e6915b5f3a | 
							
								
								
									
										12
									
								
								MAINTAINERS
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								MAINTAINERS
									
									
									
									
									
								
							| @@ -656,12 +656,6 @@ F: hw/*/grlib* | ||||
|  | ||||
| S390 Machines | ||||
| ------------- | ||||
| S390 Virtio | ||||
| M: Alexander Graf <agraf@suse.de> | ||||
| S: Maintained | ||||
| F: hw/s390x/s390-*.c | ||||
| X: hw/s390x/*pci*.[hc] | ||||
|  | ||||
| S390 Virtio-ccw | ||||
| M: Cornelia Huck <cornelia.huck@de.ibm.com> | ||||
| M: Christian Borntraeger <borntraeger@de.ibm.com> | ||||
| @@ -669,7 +663,6 @@ M: Alexander Graf <agraf@suse.de> | ||||
| S: Supported | ||||
| F: hw/char/sclp*.[hc] | ||||
| F: hw/s390x/ | ||||
| X: hw/s390x/s390-virtio-bus.[ch] | ||||
| F: include/hw/s390x/ | ||||
| F: pc-bios/s390-ccw/ | ||||
| F: hw/watchdog/wdt_diag288.c | ||||
| @@ -857,6 +850,10 @@ M: Gerd Hoffmann <kraxel@redhat.com> | ||||
| S: Maintained | ||||
| F: hw/usb/* | ||||
| F: tests/usb-*-test.c | ||||
| F: docs/usb2.txt | ||||
| F: docs/usb-storage.txt | ||||
| F: include/hw/usb.h | ||||
| F: include/hw/usb/ | ||||
|  | ||||
| USB (serial adapter) | ||||
| M: Gerd Hoffmann <kraxel@redhat.com> | ||||
| @@ -924,6 +921,7 @@ M: Amit Shah <amit.shah@redhat.com> | ||||
| S: Supported | ||||
| F: hw/virtio/virtio-rng.c | ||||
| F: include/hw/virtio/virtio-rng.h | ||||
| F: include/sysemu/rng*.h | ||||
| F: backends/rng*.c | ||||
|  | ||||
| nvme | ||||
|   | ||||
| @@ -567,7 +567,7 @@ static CharDriverState *chr_baum_init(const char *id, | ||||
|                                       ChardevReturn *ret, | ||||
|                                       Error **errp) | ||||
| { | ||||
|     ChardevCommon *common = qapi_ChardevDummy_base(backend->u.braille); | ||||
|     ChardevCommon *common = backend->u.braille; | ||||
|     BaumDriverState *baum; | ||||
|     CharDriverState *chr; | ||||
|     brlapi_handle_t *handle; | ||||
|   | ||||
| @@ -68,7 +68,7 @@ static CharDriverState *qemu_chr_open_msmouse(const char *id, | ||||
|                                               ChardevReturn *ret, | ||||
|                                               Error **errp) | ||||
| { | ||||
|     ChardevCommon *common = qapi_ChardevDummy_base(backend->u.msmouse); | ||||
|     ChardevCommon *common = backend->u.msmouse; | ||||
|     CharDriverState *chr; | ||||
|  | ||||
|     chr = qemu_chr_alloc(common, errp); | ||||
|   | ||||
| @@ -25,33 +25,12 @@ typedef struct RngEgd | ||||
|  | ||||
|     CharDriverState *chr; | ||||
|     char *chr_name; | ||||
|  | ||||
|     GSList *requests; | ||||
| } RngEgd; | ||||
|  | ||||
| typedef struct RngRequest | ||||
| { | ||||
|     EntropyReceiveFunc *receive_entropy; | ||||
|     uint8_t *data; | ||||
|     void *opaque; | ||||
|     size_t offset; | ||||
|     size_t size; | ||||
| } RngRequest; | ||||
|  | ||||
| static void rng_egd_request_entropy(RngBackend *b, size_t size, | ||||
|                                     EntropyReceiveFunc *receive_entropy, | ||||
|                                     void *opaque) | ||||
| static void rng_egd_request_entropy(RngBackend *b, RngRequest *req) | ||||
| { | ||||
|     RngEgd *s = RNG_EGD(b); | ||||
|     RngRequest *req; | ||||
|  | ||||
|     req = g_malloc(sizeof(*req)); | ||||
|  | ||||
|     req->offset = 0; | ||||
|     req->size = size; | ||||
|     req->receive_entropy = receive_entropy; | ||||
|     req->opaque = opaque; | ||||
|     req->data = g_malloc(req->size); | ||||
|     size_t size = req->size; | ||||
|  | ||||
|     while (size > 0) { | ||||
|         uint8_t header[2]; | ||||
| @@ -65,14 +44,6 @@ static void rng_egd_request_entropy(RngBackend *b, size_t size, | ||||
|  | ||||
|         size -= len; | ||||
|     } | ||||
|  | ||||
|     s->requests = g_slist_append(s->requests, req); | ||||
| } | ||||
|  | ||||
| static void rng_egd_free_request(RngRequest *req) | ||||
| { | ||||
|     g_free(req->data); | ||||
|     g_free(req); | ||||
| } | ||||
|  | ||||
| static int rng_egd_chr_can_read(void *opaque) | ||||
| @@ -81,7 +52,7 @@ static int rng_egd_chr_can_read(void *opaque) | ||||
|     GSList *i; | ||||
|     int size = 0; | ||||
|  | ||||
|     for (i = s->requests; i; i = i->next) { | ||||
|     for (i = s->parent.requests; i; i = i->next) { | ||||
|         RngRequest *req = i->data; | ||||
|         size += req->size - req->offset; | ||||
|     } | ||||
| @@ -94,8 +65,8 @@ static void rng_egd_chr_read(void *opaque, const uint8_t *buf, int size) | ||||
|     RngEgd *s = RNG_EGD(opaque); | ||||
|     size_t buf_offset = 0; | ||||
|  | ||||
|     while (size > 0 && s->requests) { | ||||
|         RngRequest *req = s->requests->data; | ||||
|     while (size > 0 && s->parent.requests) { | ||||
|         RngRequest *req = s->parent.requests->data; | ||||
|         int len = MIN(size, req->size - req->offset); | ||||
|  | ||||
|         memcpy(req->data + req->offset, buf + buf_offset, len); | ||||
| @@ -104,38 +75,13 @@ static void rng_egd_chr_read(void *opaque, const uint8_t *buf, int size) | ||||
|         size -= len; | ||||
|  | ||||
|         if (req->offset == req->size) { | ||||
|             s->requests = g_slist_remove_link(s->requests, s->requests); | ||||
|  | ||||
|             req->receive_entropy(req->opaque, req->data, req->size); | ||||
|  | ||||
|             rng_egd_free_request(req); | ||||
|             rng_backend_finalize_request(&s->parent, req); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void rng_egd_free_requests(RngEgd *s) | ||||
| { | ||||
|     GSList *i; | ||||
|  | ||||
|     for (i = s->requests; i; i = i->next) { | ||||
|         rng_egd_free_request(i->data); | ||||
|     } | ||||
|  | ||||
|     g_slist_free(s->requests); | ||||
|     s->requests = NULL; | ||||
| } | ||||
|  | ||||
| static void rng_egd_cancel_requests(RngBackend *b) | ||||
| { | ||||
|     RngEgd *s = RNG_EGD(b); | ||||
|  | ||||
|     /* We simply delete the list of pending requests.  If there is data in the  | ||||
|      * queue waiting to be read, this is okay, because there will always be | ||||
|      * more data than we requested originally | ||||
|      */ | ||||
|     rng_egd_free_requests(s); | ||||
| } | ||||
|  | ||||
| static void rng_egd_opened(RngBackend *b, Error **errp) | ||||
| { | ||||
|     RngEgd *s = RNG_EGD(b); | ||||
| @@ -204,8 +150,6 @@ static void rng_egd_finalize(Object *obj) | ||||
|     } | ||||
|  | ||||
|     g_free(s->chr_name); | ||||
|  | ||||
|     rng_egd_free_requests(s); | ||||
| } | ||||
|  | ||||
| static void rng_egd_class_init(ObjectClass *klass, void *data) | ||||
| @@ -213,7 +157,6 @@ static void rng_egd_class_init(ObjectClass *klass, void *data) | ||||
|     RngBackendClass *rbc = RNG_BACKEND_CLASS(klass); | ||||
|  | ||||
|     rbc->request_entropy = rng_egd_request_entropy; | ||||
|     rbc->cancel_requests = rng_egd_cancel_requests; | ||||
|     rbc->opened = rng_egd_opened; | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -22,10 +22,6 @@ struct RndRandom | ||||
|  | ||||
|     int fd; | ||||
|     char *filename; | ||||
|  | ||||
|     EntropyReceiveFunc *receive_func; | ||||
|     void *opaque; | ||||
|     size_t size; | ||||
| }; | ||||
|  | ||||
| /** | ||||
| @@ -38,36 +34,35 @@ struct RndRandom | ||||
| static void entropy_available(void *opaque) | ||||
| { | ||||
|     RndRandom *s = RNG_RANDOM(opaque); | ||||
|     uint8_t buffer[s->size]; | ||||
|     ssize_t len; | ||||
|  | ||||
|     len = read(s->fd, buffer, s->size); | ||||
|     if (len < 0 && errno == EAGAIN) { | ||||
|         return; | ||||
|     while (s->parent.requests != NULL) { | ||||
|         RngRequest *req = s->parent.requests->data; | ||||
|         ssize_t len; | ||||
|  | ||||
|         len = read(s->fd, req->data, req->size); | ||||
|         if (len < 0 && errno == EAGAIN) { | ||||
|             return; | ||||
|         } | ||||
|         g_assert(len != -1); | ||||
|  | ||||
|         req->receive_entropy(req->opaque, req->data, len); | ||||
|  | ||||
|         rng_backend_finalize_request(&s->parent, req); | ||||
|     } | ||||
|     g_assert(len != -1); | ||||
|  | ||||
|     s->receive_func(s->opaque, buffer, len); | ||||
|     s->receive_func = NULL; | ||||
|  | ||||
|     /* We've drained all requests, the fd handler can be reset. */ | ||||
|     qemu_set_fd_handler(s->fd, NULL, NULL, NULL); | ||||
| } | ||||
|  | ||||
| static void rng_random_request_entropy(RngBackend *b, size_t size, | ||||
|                                         EntropyReceiveFunc *receive_entropy, | ||||
|                                         void *opaque) | ||||
| static void rng_random_request_entropy(RngBackend *b, RngRequest *req) | ||||
| { | ||||
|     RndRandom *s = RNG_RANDOM(b); | ||||
|  | ||||
|     if (s->receive_func) { | ||||
|         s->receive_func(s->opaque, NULL, 0); | ||||
|     if (s->parent.requests == NULL) { | ||||
|         /* If there are no pending requests yet, we need to | ||||
|          * install our fd handler. */ | ||||
|         qemu_set_fd_handler(s->fd, entropy_available, NULL, s); | ||||
|     } | ||||
|  | ||||
|     s->receive_func = receive_entropy; | ||||
|     s->opaque = opaque; | ||||
|     s->size = size; | ||||
|  | ||||
|     qemu_set_fd_handler(s->fd, entropy_available, NULL, s); | ||||
| } | ||||
|  | ||||
| static void rng_random_opened(RngBackend *b, Error **errp) | ||||
|   | ||||
| @@ -20,18 +20,20 @@ void rng_backend_request_entropy(RngBackend *s, size_t size, | ||||
|                                  void *opaque) | ||||
| { | ||||
|     RngBackendClass *k = RNG_BACKEND_GET_CLASS(s); | ||||
|     RngRequest *req; | ||||
|  | ||||
|     if (k->request_entropy) { | ||||
|         k->request_entropy(s, size, receive_entropy, opaque); | ||||
|     } | ||||
| } | ||||
|         req = g_malloc(sizeof(*req)); | ||||
|  | ||||
| void rng_backend_cancel_requests(RngBackend *s) | ||||
| { | ||||
|     RngBackendClass *k = RNG_BACKEND_GET_CLASS(s); | ||||
|         req->offset = 0; | ||||
|         req->size = size; | ||||
|         req->receive_entropy = receive_entropy; | ||||
|         req->opaque = opaque; | ||||
|         req->data = g_malloc(req->size); | ||||
|  | ||||
|     if (k->cancel_requests) { | ||||
|         k->cancel_requests(s); | ||||
|         k->request_entropy(s, req); | ||||
|  | ||||
|         s->requests = g_slist_append(s->requests, req); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -73,6 +75,30 @@ static void rng_backend_prop_set_opened(Object *obj, bool value, Error **errp) | ||||
|     s->opened = true; | ||||
| } | ||||
|  | ||||
| static void rng_backend_free_request(RngRequest *req) | ||||
| { | ||||
|     g_free(req->data); | ||||
|     g_free(req); | ||||
| } | ||||
|  | ||||
| static void rng_backend_free_requests(RngBackend *s) | ||||
| { | ||||
|     GSList *i; | ||||
|  | ||||
|     for (i = s->requests; i; i = i->next) { | ||||
|         rng_backend_free_request(i->data); | ||||
|     } | ||||
|  | ||||
|     g_slist_free(s->requests); | ||||
|     s->requests = NULL; | ||||
| } | ||||
|  | ||||
| void rng_backend_finalize_request(RngBackend *s, RngRequest *req) | ||||
| { | ||||
|     s->requests = g_slist_remove(s->requests, req); | ||||
|     rng_backend_free_request(req); | ||||
| } | ||||
|  | ||||
| static void rng_backend_init(Object *obj) | ||||
| { | ||||
|     object_property_add_bool(obj, "opened", | ||||
| @@ -81,6 +107,13 @@ static void rng_backend_init(Object *obj) | ||||
|                              NULL); | ||||
| } | ||||
|  | ||||
| static void rng_backend_finalize(Object *obj) | ||||
| { | ||||
|     RngBackend *s = RNG_BACKEND(obj); | ||||
|  | ||||
|     rng_backend_free_requests(s); | ||||
| } | ||||
|  | ||||
| static void rng_backend_class_init(ObjectClass *oc, void *data) | ||||
| { | ||||
|     UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); | ||||
| @@ -93,6 +126,7 @@ static const TypeInfo rng_backend_info = { | ||||
|     .parent = TYPE_OBJECT, | ||||
|     .instance_size = sizeof(RngBackend), | ||||
|     .instance_init = rng_backend_init, | ||||
|     .instance_finalize = rng_backend_finalize, | ||||
|     .class_size = sizeof(RngBackendClass), | ||||
|     .class_init = rng_backend_class_init, | ||||
|     .abstract = true, | ||||
|   | ||||
| @@ -21,10 +21,7 @@ | ||||
| #include "qemu/ratelimit.h" | ||||
| #include "sysemu/block-backend.h" | ||||
|  | ||||
| #define BACKUP_CLUSTER_BITS 16 | ||||
| #define BACKUP_CLUSTER_SIZE (1 << BACKUP_CLUSTER_BITS) | ||||
| #define BACKUP_SECTORS_PER_CLUSTER (BACKUP_CLUSTER_SIZE / BDRV_SECTOR_SIZE) | ||||
|  | ||||
| #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) | ||||
| #define SLICE_TIME 100000000ULL /* ns */ | ||||
|  | ||||
| typedef struct CowRequest { | ||||
| @@ -46,9 +43,16 @@ typedef struct BackupBlockJob { | ||||
|     CoRwlock flush_rwlock; | ||||
|     uint64_t sectors_read; | ||||
|     HBitmap *bitmap; | ||||
|     int64_t cluster_size; | ||||
|     QLIST_HEAD(, CowRequest) inflight_reqs; | ||||
| } BackupBlockJob; | ||||
|  | ||||
| /* Size of a cluster in sectors, instead of bytes. */ | ||||
| static inline int64_t cluster_size_sectors(BackupBlockJob *job) | ||||
| { | ||||
|   return job->cluster_size / BDRV_SECTOR_SIZE; | ||||
| } | ||||
|  | ||||
| /* See if in-flight requests overlap and wait for them to complete */ | ||||
| static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job, | ||||
|                                                        int64_t start, | ||||
| @@ -97,13 +101,14 @@ static int coroutine_fn backup_do_cow(BlockDriverState *bs, | ||||
|     QEMUIOVector bounce_qiov; | ||||
|     void *bounce_buffer = NULL; | ||||
|     int ret = 0; | ||||
|     int64_t sectors_per_cluster = cluster_size_sectors(job); | ||||
|     int64_t start, end; | ||||
|     int n; | ||||
|  | ||||
|     qemu_co_rwlock_rdlock(&job->flush_rwlock); | ||||
|  | ||||
|     start = sector_num / BACKUP_SECTORS_PER_CLUSTER; | ||||
|     end = DIV_ROUND_UP(sector_num + nb_sectors, BACKUP_SECTORS_PER_CLUSTER); | ||||
|     start = sector_num / sectors_per_cluster; | ||||
|     end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster); | ||||
|  | ||||
|     trace_backup_do_cow_enter(job, start, sector_num, nb_sectors); | ||||
|  | ||||
| @@ -118,12 +123,12 @@ static int coroutine_fn backup_do_cow(BlockDriverState *bs, | ||||
|  | ||||
|         trace_backup_do_cow_process(job, start); | ||||
|  | ||||
|         n = MIN(BACKUP_SECTORS_PER_CLUSTER, | ||||
|         n = MIN(sectors_per_cluster, | ||||
|                 job->common.len / BDRV_SECTOR_SIZE - | ||||
|                 start * BACKUP_SECTORS_PER_CLUSTER); | ||||
|                 start * sectors_per_cluster); | ||||
|  | ||||
|         if (!bounce_buffer) { | ||||
|             bounce_buffer = qemu_blockalign(bs, BACKUP_CLUSTER_SIZE); | ||||
|             bounce_buffer = qemu_blockalign(bs, job->cluster_size); | ||||
|         } | ||||
|         iov.iov_base = bounce_buffer; | ||||
|         iov.iov_len = n * BDRV_SECTOR_SIZE; | ||||
| @@ -131,10 +136,10 @@ static int coroutine_fn backup_do_cow(BlockDriverState *bs, | ||||
|  | ||||
|         if (is_write_notifier) { | ||||
|             ret = bdrv_co_readv_no_serialising(bs, | ||||
|                                            start * BACKUP_SECTORS_PER_CLUSTER, | ||||
|                                            start * sectors_per_cluster, | ||||
|                                            n, &bounce_qiov); | ||||
|         } else { | ||||
|             ret = bdrv_co_readv(bs, start * BACKUP_SECTORS_PER_CLUSTER, n, | ||||
|             ret = bdrv_co_readv(bs, start * sectors_per_cluster, n, | ||||
|                                 &bounce_qiov); | ||||
|         } | ||||
|         if (ret < 0) { | ||||
| @@ -147,11 +152,11 @@ static int coroutine_fn backup_do_cow(BlockDriverState *bs, | ||||
|  | ||||
|         if (buffer_is_zero(iov.iov_base, iov.iov_len)) { | ||||
|             ret = bdrv_co_write_zeroes(job->target, | ||||
|                                        start * BACKUP_SECTORS_PER_CLUSTER, | ||||
|                                        start * sectors_per_cluster, | ||||
|                                        n, BDRV_REQ_MAY_UNMAP); | ||||
|         } else { | ||||
|             ret = bdrv_co_writev(job->target, | ||||
|                                  start * BACKUP_SECTORS_PER_CLUSTER, n, | ||||
|                                  start * sectors_per_cluster, n, | ||||
|                                  &bounce_qiov); | ||||
|         } | ||||
|         if (ret < 0) { | ||||
| @@ -322,21 +327,22 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job) | ||||
|     int64_t cluster; | ||||
|     int64_t end; | ||||
|     int64_t last_cluster = -1; | ||||
|     int64_t sectors_per_cluster = cluster_size_sectors(job); | ||||
|     BlockDriverState *bs = job->common.bs; | ||||
|     HBitmapIter hbi; | ||||
|  | ||||
|     granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); | ||||
|     clusters_per_iter = MAX((granularity / BACKUP_CLUSTER_SIZE), 1); | ||||
|     clusters_per_iter = MAX((granularity / job->cluster_size), 1); | ||||
|     bdrv_dirty_iter_init(job->sync_bitmap, &hbi); | ||||
|  | ||||
|     /* Find the next dirty sector(s) */ | ||||
|     while ((sector = hbitmap_iter_next(&hbi)) != -1) { | ||||
|         cluster = sector / BACKUP_SECTORS_PER_CLUSTER; | ||||
|         cluster = sector / sectors_per_cluster; | ||||
|  | ||||
|         /* Fake progress updates for any clusters we skipped */ | ||||
|         if (cluster != last_cluster + 1) { | ||||
|             job->common.offset += ((cluster - last_cluster - 1) * | ||||
|                                    BACKUP_CLUSTER_SIZE); | ||||
|                                    job->cluster_size); | ||||
|         } | ||||
|  | ||||
|         for (end = cluster + clusters_per_iter; cluster < end; cluster++) { | ||||
| @@ -344,8 +350,8 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job) | ||||
|                 if (yield_and_check(job)) { | ||||
|                     return ret; | ||||
|                 } | ||||
|                 ret = backup_do_cow(bs, cluster * BACKUP_SECTORS_PER_CLUSTER, | ||||
|                                     BACKUP_SECTORS_PER_CLUSTER, &error_is_read, | ||||
|                 ret = backup_do_cow(bs, cluster * sectors_per_cluster, | ||||
|                                     sectors_per_cluster, &error_is_read, | ||||
|                                     false); | ||||
|                 if ((ret < 0) && | ||||
|                     backup_error_action(job, error_is_read, -ret) == | ||||
| @@ -357,17 +363,17 @@ static int coroutine_fn backup_run_incremental(BackupBlockJob *job) | ||||
|  | ||||
|         /* If the bitmap granularity is smaller than the backup granularity, | ||||
|          * we need to advance the iterator pointer to the next cluster. */ | ||||
|         if (granularity < BACKUP_CLUSTER_SIZE) { | ||||
|             bdrv_set_dirty_iter(&hbi, cluster * BACKUP_SECTORS_PER_CLUSTER); | ||||
|         if (granularity < job->cluster_size) { | ||||
|             bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster); | ||||
|         } | ||||
|  | ||||
|         last_cluster = cluster - 1; | ||||
|     } | ||||
|  | ||||
|     /* Play some final catchup with the progress meter */ | ||||
|     end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE); | ||||
|     end = DIV_ROUND_UP(job->common.len, job->cluster_size); | ||||
|     if (last_cluster + 1 < end) { | ||||
|         job->common.offset += ((end - last_cluster - 1) * BACKUP_CLUSTER_SIZE); | ||||
|         job->common.offset += ((end - last_cluster - 1) * job->cluster_size); | ||||
|     } | ||||
|  | ||||
|     return ret; | ||||
| @@ -384,13 +390,14 @@ static void coroutine_fn backup_run(void *opaque) | ||||
|         .notify = backup_before_write_notify, | ||||
|     }; | ||||
|     int64_t start, end; | ||||
|     int64_t sectors_per_cluster = cluster_size_sectors(job); | ||||
|     int ret = 0; | ||||
|  | ||||
|     QLIST_INIT(&job->inflight_reqs); | ||||
|     qemu_co_rwlock_init(&job->flush_rwlock); | ||||
|  | ||||
|     start = 0; | ||||
|     end = DIV_ROUND_UP(job->common.len, BACKUP_CLUSTER_SIZE); | ||||
|     end = DIV_ROUND_UP(job->common.len, job->cluster_size); | ||||
|  | ||||
|     job->bitmap = hbitmap_alloc(end, 0); | ||||
|  | ||||
| @@ -427,7 +434,7 @@ static void coroutine_fn backup_run(void *opaque) | ||||
|                 /* Check to see if these blocks are already in the | ||||
|                  * backing file. */ | ||||
|  | ||||
|                 for (i = 0; i < BACKUP_SECTORS_PER_CLUSTER;) { | ||||
|                 for (i = 0; i < sectors_per_cluster;) { | ||||
|                     /* bdrv_is_allocated() only returns true/false based | ||||
|                      * on the first set of sectors it comes across that | ||||
|                      * are are all in the same state. | ||||
| @@ -436,8 +443,8 @@ static void coroutine_fn backup_run(void *opaque) | ||||
|                      * needed but at some point that is always the case. */ | ||||
|                     alloced = | ||||
|                         bdrv_is_allocated(bs, | ||||
|                                 start * BACKUP_SECTORS_PER_CLUSTER + i, | ||||
|                                 BACKUP_SECTORS_PER_CLUSTER - i, &n); | ||||
|                                 start * sectors_per_cluster + i, | ||||
|                                 sectors_per_cluster - i, &n); | ||||
|                     i += n; | ||||
|  | ||||
|                     if (alloced == 1 || n == 0) { | ||||
| @@ -452,8 +459,8 @@ static void coroutine_fn backup_run(void *opaque) | ||||
|                 } | ||||
|             } | ||||
|             /* FULL sync mode we copy the whole drive. */ | ||||
|             ret = backup_do_cow(bs, start * BACKUP_SECTORS_PER_CLUSTER, | ||||
|                     BACKUP_SECTORS_PER_CLUSTER, &error_is_read, false); | ||||
|             ret = backup_do_cow(bs, start * sectors_per_cluster, | ||||
|                                 sectors_per_cluster, &error_is_read, false); | ||||
|             if (ret < 0) { | ||||
|                 /* Depending on error action, fail now or retry cluster */ | ||||
|                 BlockErrorAction action = | ||||
| @@ -494,6 +501,8 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target, | ||||
|                   BlockJobTxn *txn, Error **errp) | ||||
| { | ||||
|     int64_t len; | ||||
|     BlockDriverInfo bdi; | ||||
|     int ret; | ||||
|  | ||||
|     assert(bs); | ||||
|     assert(target); | ||||
| @@ -563,14 +572,32 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target, | ||||
|         goto error; | ||||
|     } | ||||
|  | ||||
|     bdrv_op_block_all(target, job->common.blocker); | ||||
|  | ||||
|     job->on_source_error = on_source_error; | ||||
|     job->on_target_error = on_target_error; | ||||
|     job->target = target; | ||||
|     job->sync_mode = sync_mode; | ||||
|     job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ? | ||||
|                        sync_bitmap : NULL; | ||||
|  | ||||
|     /* If there is no backing file on the target, we cannot rely on COW if our | ||||
|      * backup cluster size is smaller than the target cluster size. Even for | ||||
|      * targets with a backing file, try to avoid COW if possible. */ | ||||
|     ret = bdrv_get_info(job->target, &bdi); | ||||
|     if (ret < 0 && !target->backing) { | ||||
|         error_setg_errno(errp, -ret, | ||||
|             "Couldn't determine the cluster size of the target image, " | ||||
|             "which has no backing file"); | ||||
|         error_append_hint(errp, | ||||
|             "Aborting, since this may create an unusable destination image\n"); | ||||
|         goto error; | ||||
|     } else if (ret < 0 && target->backing) { | ||||
|         /* Not fatal; just trudge on ahead. */ | ||||
|         job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; | ||||
|     } else { | ||||
|         job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); | ||||
|     } | ||||
|  | ||||
|     bdrv_op_block_all(target, job->common.blocker); | ||||
|     job->common.len = len; | ||||
|     job->common.co = qemu_coroutine_create(backup_run); | ||||
|     block_job_txn_add_job(txn, &job->common); | ||||
|   | ||||
							
								
								
									
										66
									
								
								block/curl.c
									
									
									
									
									
								
							
							
						
						
									
										66
									
								
								block/curl.c
									
									
									
									
									
								
							| @@ -27,6 +27,7 @@ | ||||
| #include "block/block_int.h" | ||||
| #include "qapi/qmp/qbool.h" | ||||
| #include "qapi/qmp/qstring.h" | ||||
| #include "crypto/secret.h" | ||||
| #include <curl/curl.h> | ||||
|  | ||||
| // #define DEBUG_CURL | ||||
| @@ -78,6 +79,10 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle, | ||||
| #define CURL_BLOCK_OPT_SSLVERIFY "sslverify" | ||||
| #define CURL_BLOCK_OPT_TIMEOUT "timeout" | ||||
| #define CURL_BLOCK_OPT_COOKIE    "cookie" | ||||
| #define CURL_BLOCK_OPT_USERNAME "username" | ||||
| #define CURL_BLOCK_OPT_PASSWORD_SECRET "password-secret" | ||||
| #define CURL_BLOCK_OPT_PROXY_USERNAME "proxy-username" | ||||
| #define CURL_BLOCK_OPT_PROXY_PASSWORD_SECRET "proxy-password-secret" | ||||
|  | ||||
| struct BDRVCURLState; | ||||
|  | ||||
| @@ -120,6 +125,10 @@ typedef struct BDRVCURLState { | ||||
|     char *cookie; | ||||
|     bool accept_range; | ||||
|     AioContext *aio_context; | ||||
|     char *username; | ||||
|     char *password; | ||||
|     char *proxyusername; | ||||
|     char *proxypassword; | ||||
| } BDRVCURLState; | ||||
|  | ||||
| static void curl_clean_state(CURLState *s); | ||||
| @@ -419,6 +428,21 @@ static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s) | ||||
|         curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg); | ||||
|         curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1); | ||||
|  | ||||
|         if (s->username) { | ||||
|             curl_easy_setopt(state->curl, CURLOPT_USERNAME, s->username); | ||||
|         } | ||||
|         if (s->password) { | ||||
|             curl_easy_setopt(state->curl, CURLOPT_PASSWORD, s->password); | ||||
|         } | ||||
|         if (s->proxyusername) { | ||||
|             curl_easy_setopt(state->curl, | ||||
|                              CURLOPT_PROXYUSERNAME, s->proxyusername); | ||||
|         } | ||||
|         if (s->proxypassword) { | ||||
|             curl_easy_setopt(state->curl, | ||||
|                              CURLOPT_PROXYPASSWORD, s->proxypassword); | ||||
|         } | ||||
|  | ||||
|         /* Restrict supported protocols to avoid security issues in the more | ||||
|          * obscure protocols.  For example, do not allow POP3/SMTP/IMAP see | ||||
|          * CVE-2013-0249. | ||||
| @@ -525,10 +549,31 @@ static QemuOptsList runtime_opts = { | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "Pass the cookie or list of cookies with each request" | ||||
|         }, | ||||
|         { | ||||
|             .name = CURL_BLOCK_OPT_USERNAME, | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "Username for HTTP auth" | ||||
|         }, | ||||
|         { | ||||
|             .name = CURL_BLOCK_OPT_PASSWORD_SECRET, | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "ID of secret used as password for HTTP auth", | ||||
|         }, | ||||
|         { | ||||
|             .name = CURL_BLOCK_OPT_PROXY_USERNAME, | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "Username for HTTP proxy auth" | ||||
|         }, | ||||
|         { | ||||
|             .name = CURL_BLOCK_OPT_PROXY_PASSWORD_SECRET, | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "ID of secret used as password for HTTP proxy auth", | ||||
|         }, | ||||
|         { /* end of list */ } | ||||
|     }, | ||||
| }; | ||||
|  | ||||
|  | ||||
| static int curl_open(BlockDriverState *bs, QDict *options, int flags, | ||||
|                      Error **errp) | ||||
| { | ||||
| @@ -539,6 +584,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, | ||||
|     const char *file; | ||||
|     const char *cookie; | ||||
|     double d; | ||||
|     const char *secretid; | ||||
|  | ||||
|     static int inited = 0; | ||||
|  | ||||
| @@ -580,6 +626,26 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, | ||||
|         goto out_noclean; | ||||
|     } | ||||
|  | ||||
|     s->username = g_strdup(qemu_opt_get(opts, CURL_BLOCK_OPT_USERNAME)); | ||||
|     secretid = qemu_opt_get(opts, CURL_BLOCK_OPT_PASSWORD_SECRET); | ||||
|  | ||||
|     if (secretid) { | ||||
|         s->password = qcrypto_secret_lookup_as_utf8(secretid, errp); | ||||
|         if (!s->password) { | ||||
|             goto out_noclean; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     s->proxyusername = g_strdup( | ||||
|         qemu_opt_get(opts, CURL_BLOCK_OPT_PROXY_USERNAME)); | ||||
|     secretid = qemu_opt_get(opts, CURL_BLOCK_OPT_PROXY_PASSWORD_SECRET); | ||||
|     if (secretid) { | ||||
|         s->proxypassword = qcrypto_secret_lookup_as_utf8(secretid, errp); | ||||
|         if (!s->proxypassword) { | ||||
|             goto out_noclean; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if (!inited) { | ||||
|         curl_global_init(CURL_GLOBAL_ALL); | ||||
|         inited = 1; | ||||
|   | ||||
| @@ -39,6 +39,7 @@ | ||||
| #include "sysemu/sysemu.h" | ||||
| #include "qmp-commands.h" | ||||
| #include "qapi/qmp/qstring.h" | ||||
| #include "crypto/secret.h" | ||||
|  | ||||
| #include <iscsi/iscsi.h> | ||||
| #include <iscsi/scsi-lowlevel.h> | ||||
| @@ -1080,6 +1081,8 @@ static void parse_chap(struct iscsi_context *iscsi, const char *target, | ||||
|     QemuOpts *opts; | ||||
|     const char *user = NULL; | ||||
|     const char *password = NULL; | ||||
|     const char *secretid; | ||||
|     char *secret = NULL; | ||||
|  | ||||
|     list = qemu_find_opts("iscsi"); | ||||
|     if (!list) { | ||||
| @@ -1099,8 +1102,20 @@ static void parse_chap(struct iscsi_context *iscsi, const char *target, | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     secretid = qemu_opt_get(opts, "password-secret"); | ||||
|     password = qemu_opt_get(opts, "password"); | ||||
|     if (!password) { | ||||
|     if (secretid && password) { | ||||
|         error_setg(errp, "'password' and 'password-secret' properties are " | ||||
|                    "mutually exclusive"); | ||||
|         return; | ||||
|     } | ||||
|     if (secretid) { | ||||
|         secret = qcrypto_secret_lookup_as_utf8(secretid, errp); | ||||
|         if (!secret) { | ||||
|             return; | ||||
|         } | ||||
|         password = secret; | ||||
|     } else if (!password) { | ||||
|         error_setg(errp, "CHAP username specified but no password was given"); | ||||
|         return; | ||||
|     } | ||||
| @@ -1108,6 +1123,8 @@ static void parse_chap(struct iscsi_context *iscsi, const char *target, | ||||
|     if (iscsi_set_initiator_username_pwd(iscsi, user, password)) { | ||||
|         error_setg(errp, "Failed to set initiator username and password"); | ||||
|     } | ||||
|  | ||||
|     g_free(secret); | ||||
| } | ||||
|  | ||||
| static void parse_header_digest(struct iscsi_context *iscsi, const char *target, | ||||
| @@ -1857,6 +1874,11 @@ static QemuOptsList qemu_iscsi_opts = { | ||||
|             .name = "password", | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "password for CHAP authentication to target", | ||||
|         },{ | ||||
|             .name = "password-secret", | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "ID of the secret providing password for CHAP " | ||||
|                     "authentication to target", | ||||
|         },{ | ||||
|             .name = "header-digest", | ||||
|             .type = QEMU_OPT_STRING, | ||||
|   | ||||
							
								
								
									
										353
									
								
								block/mirror.c
									
									
									
									
									
								
							
							
						
						
									
										353
									
								
								block/mirror.c
									
									
									
									
									
								
							| @@ -47,7 +47,6 @@ typedef struct MirrorBlockJob { | ||||
|     BlockdevOnError on_source_error, on_target_error; | ||||
|     bool synced; | ||||
|     bool should_complete; | ||||
|     int64_t sector_num; | ||||
|     int64_t granularity; | ||||
|     size_t buf_size; | ||||
|     int64_t bdev_length; | ||||
| @@ -64,6 +63,8 @@ typedef struct MirrorBlockJob { | ||||
|     int ret; | ||||
|     bool unmap; | ||||
|     bool waiting_for_io; | ||||
|     int target_cluster_sectors; | ||||
|     int max_iov; | ||||
| } MirrorBlockJob; | ||||
|  | ||||
| typedef struct MirrorOp { | ||||
| @@ -159,115 +160,84 @@ static void mirror_read_complete(void *opaque, int ret) | ||||
|                     mirror_write_complete, op); | ||||
| } | ||||
|  | ||||
| static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||||
| /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and | ||||
|  * return the offset of the adjusted tail sector against original. */ | ||||
| static int mirror_cow_align(MirrorBlockJob *s, | ||||
|                             int64_t *sector_num, | ||||
|                             int *nb_sectors) | ||||
| { | ||||
|     bool need_cow; | ||||
|     int ret = 0; | ||||
|     int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS; | ||||
|     int64_t align_sector_num = *sector_num; | ||||
|     int align_nb_sectors = *nb_sectors; | ||||
|     int max_sectors = chunk_sectors * s->max_iov; | ||||
|  | ||||
|     need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap); | ||||
|     need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors, | ||||
|                           s->cow_bitmap); | ||||
|     if (need_cow) { | ||||
|         bdrv_round_to_clusters(s->target, *sector_num, *nb_sectors, | ||||
|                                &align_sector_num, &align_nb_sectors); | ||||
|     } | ||||
|  | ||||
|     if (align_nb_sectors > max_sectors) { | ||||
|         align_nb_sectors = max_sectors; | ||||
|         if (need_cow) { | ||||
|             align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors, | ||||
|                                                s->target_cluster_sectors); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors); | ||||
|     *sector_num = align_sector_num; | ||||
|     *nb_sectors = align_nb_sectors; | ||||
|     assert(ret >= 0); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static inline void mirror_wait_for_io(MirrorBlockJob *s) | ||||
| { | ||||
|     assert(!s->waiting_for_io); | ||||
|     s->waiting_for_io = true; | ||||
|     qemu_coroutine_yield(); | ||||
|     s->waiting_for_io = false; | ||||
| } | ||||
|  | ||||
| /* Submit async read while handling COW. | ||||
|  * Returns: nb_sectors if no alignment is necessary, or | ||||
|  *          (new_end - sector_num) if tail is rounded up or down due to | ||||
|  *          alignment or buffer limit. | ||||
|  */ | ||||
| static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, | ||||
|                           int nb_sectors) | ||||
| { | ||||
|     BlockDriverState *source = s->common.bs; | ||||
|     int nb_sectors, sectors_per_chunk, nb_chunks, max_iov; | ||||
|     int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector; | ||||
|     uint64_t delay_ns = 0; | ||||
|     int sectors_per_chunk, nb_chunks; | ||||
|     int ret = nb_sectors; | ||||
|     MirrorOp *op; | ||||
|     int pnum; | ||||
|     int64_t ret; | ||||
|     BlockDriverState *file; | ||||
|  | ||||
|     max_iov = MIN(source->bl.max_iov, s->target->bl.max_iov); | ||||
|  | ||||
|     s->sector_num = hbitmap_iter_next(&s->hbi); | ||||
|     if (s->sector_num < 0) { | ||||
|         bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); | ||||
|         s->sector_num = hbitmap_iter_next(&s->hbi); | ||||
|         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); | ||||
|         assert(s->sector_num >= 0); | ||||
|     } | ||||
|  | ||||
|     hbitmap_next_sector = s->sector_num; | ||||
|     sector_num = s->sector_num; | ||||
|     sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; | ||||
|     end = s->bdev_length / BDRV_SECTOR_SIZE; | ||||
|  | ||||
|     /* Extend the QEMUIOVector to include all adjacent blocks that will | ||||
|      * be copied in this operation. | ||||
|      * | ||||
|      * We have to do this if we have no backing file yet in the destination, | ||||
|      * and the cluster size is very large.  Then we need to do COW ourselves. | ||||
|      * The first time a cluster is copied, copy it entirely.  Note that, | ||||
|      * because both the granularity and the cluster size are powers of two, | ||||
|      * the number of sectors to copy cannot exceed one cluster. | ||||
|      * | ||||
|      * We also want to extend the QEMUIOVector to include more adjacent | ||||
|      * dirty blocks if possible, to limit the number of I/O operations and | ||||
|      * run efficiently even with a small granularity. | ||||
|      */ | ||||
|     nb_chunks = 0; | ||||
|     nb_sectors = 0; | ||||
|     next_sector = sector_num; | ||||
|     next_chunk = sector_num / sectors_per_chunk; | ||||
|     /* We can only handle as much as buf_size at a time. */ | ||||
|     nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); | ||||
|     assert(nb_sectors); | ||||
|  | ||||
|     /* Wait for I/O to this cluster (from a previous iteration) to be done.  */ | ||||
|     while (test_bit(next_chunk, s->in_flight_bitmap)) { | ||||
|         trace_mirror_yield_in_flight(s, sector_num, s->in_flight); | ||||
|         s->waiting_for_io = true; | ||||
|         qemu_coroutine_yield(); | ||||
|         s->waiting_for_io = false; | ||||
|     if (s->cow_bitmap) { | ||||
|         ret += mirror_cow_align(s, §or_num, &nb_sectors); | ||||
|     } | ||||
|     assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); | ||||
|     /* The sector range must meet granularity because: | ||||
|      * 1) Caller passes in aligned values; | ||||
|      * 2) mirror_cow_align is used only when target cluster is larger. */ | ||||
|     assert(!(nb_sectors % sectors_per_chunk)); | ||||
|     assert(!(sector_num % sectors_per_chunk)); | ||||
|     nb_chunks = nb_sectors / sectors_per_chunk; | ||||
|  | ||||
|     do { | ||||
|         int added_sectors, added_chunks; | ||||
|  | ||||
|         if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) || | ||||
|             test_bit(next_chunk, s->in_flight_bitmap)) { | ||||
|             assert(nb_sectors > 0); | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         added_sectors = sectors_per_chunk; | ||||
|         if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) { | ||||
|             bdrv_round_to_clusters(s->target, | ||||
|                                    next_sector, added_sectors, | ||||
|                                    &next_sector, &added_sectors); | ||||
|  | ||||
|             /* On the first iteration, the rounding may make us copy | ||||
|              * sectors before the first dirty one. | ||||
|              */ | ||||
|             if (next_sector < sector_num) { | ||||
|                 assert(nb_sectors == 0); | ||||
|                 sector_num = next_sector; | ||||
|                 next_chunk = next_sector / sectors_per_chunk; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors)); | ||||
|         added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk; | ||||
|  | ||||
|         /* When doing COW, it may happen that there is not enough space for | ||||
|          * a full cluster.  Wait if that is the case. | ||||
|          */ | ||||
|         while (nb_chunks == 0 && s->buf_free_count < added_chunks) { | ||||
|             trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight); | ||||
|             s->waiting_for_io = true; | ||||
|             qemu_coroutine_yield(); | ||||
|             s->waiting_for_io = false; | ||||
|         } | ||||
|         if (s->buf_free_count < nb_chunks + added_chunks) { | ||||
|             trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight); | ||||
|             break; | ||||
|         } | ||||
|         if (max_iov < nb_chunks + added_chunks) { | ||||
|             trace_mirror_break_iov_max(s, nb_chunks, added_chunks); | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         /* We have enough free space to copy these sectors.  */ | ||||
|         bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks); | ||||
|  | ||||
|         nb_sectors += added_sectors; | ||||
|         nb_chunks += added_chunks; | ||||
|         next_sector += added_sectors; | ||||
|         next_chunk += added_chunks; | ||||
|         if (!s->synced && s->common.speed) { | ||||
|             delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors); | ||||
|         } | ||||
|     } while (delay_ns == 0 && next_sector < end); | ||||
|     while (s->buf_free_count < nb_chunks) { | ||||
|         trace_mirror_yield_in_flight(s, sector_num, s->in_flight); | ||||
|         mirror_wait_for_io(s); | ||||
|     } | ||||
|  | ||||
|     /* Allocate a MirrorOp that is used as an AIO callback.  */ | ||||
|     op = g_new(MirrorOp, 1); | ||||
| @@ -279,47 +249,151 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||||
|      * from s->buf_free. | ||||
|      */ | ||||
|     qemu_iovec_init(&op->qiov, nb_chunks); | ||||
|     next_sector = sector_num; | ||||
|     while (nb_chunks-- > 0) { | ||||
|         MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); | ||||
|         size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size; | ||||
|         size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; | ||||
|  | ||||
|         QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); | ||||
|         s->buf_free_count--; | ||||
|         qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); | ||||
|  | ||||
|         /* Advance the HBitmapIter in parallel, so that we do not examine | ||||
|          * the same sector twice. | ||||
|          */ | ||||
|         if (next_sector > hbitmap_next_sector | ||||
|             && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { | ||||
|             hbitmap_next_sector = hbitmap_iter_next(&s->hbi); | ||||
|         } | ||||
|  | ||||
|         next_sector += sectors_per_chunk; | ||||
|     } | ||||
|  | ||||
|     bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors); | ||||
|  | ||||
|     /* Copy the dirty cluster.  */ | ||||
|     s->in_flight++; | ||||
|     s->sectors_in_flight += nb_sectors; | ||||
|     trace_mirror_one_iteration(s, sector_num, nb_sectors); | ||||
|  | ||||
|     ret = bdrv_get_block_status_above(source, NULL, sector_num, | ||||
|                                       nb_sectors, &pnum, &file); | ||||
|     if (ret < 0 || pnum < nb_sectors || | ||||
|             (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) { | ||||
|         bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, | ||||
|                        mirror_read_complete, op); | ||||
|     } else if (ret & BDRV_BLOCK_ZERO) { | ||||
|     bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors, | ||||
|                    mirror_read_complete, op); | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static void mirror_do_zero_or_discard(MirrorBlockJob *s, | ||||
|                                       int64_t sector_num, | ||||
|                                       int nb_sectors, | ||||
|                                       bool is_discard) | ||||
| { | ||||
|     MirrorOp *op; | ||||
|  | ||||
|     /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed | ||||
|      * so the freeing in mirror_iteration_done is nop. */ | ||||
|     op = g_new0(MirrorOp, 1); | ||||
|     op->s = s; | ||||
|     op->sector_num = sector_num; | ||||
|     op->nb_sectors = nb_sectors; | ||||
|  | ||||
|     s->in_flight++; | ||||
|     s->sectors_in_flight += nb_sectors; | ||||
|     if (is_discard) { | ||||
|         bdrv_aio_discard(s->target, sector_num, op->nb_sectors, | ||||
|                          mirror_write_complete, op); | ||||
|     } else { | ||||
|         bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors, | ||||
|                               s->unmap ? BDRV_REQ_MAY_UNMAP : 0, | ||||
|                               mirror_write_complete, op); | ||||
|     } else { | ||||
|         assert(!(ret & BDRV_BLOCK_DATA)); | ||||
|         bdrv_aio_discard(s->target, sector_num, op->nb_sectors, | ||||
|                          mirror_write_complete, op); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) | ||||
| { | ||||
|     BlockDriverState *source = s->common.bs; | ||||
|     int64_t sector_num; | ||||
|     uint64_t delay_ns = 0; | ||||
|     /* At least the first dirty chunk is mirrored in one iteration. */ | ||||
|     int nb_chunks = 1; | ||||
|     int64_t end = s->bdev_length / BDRV_SECTOR_SIZE; | ||||
|     int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; | ||||
|  | ||||
|     sector_num = hbitmap_iter_next(&s->hbi); | ||||
|     if (sector_num < 0) { | ||||
|         bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); | ||||
|         sector_num = hbitmap_iter_next(&s->hbi); | ||||
|         trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); | ||||
|         assert(sector_num >= 0); | ||||
|     } | ||||
|  | ||||
|     /* Find the number of consective dirty chunks following the first dirty | ||||
|      * one, and wait for in flight requests in them. */ | ||||
|     while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) { | ||||
|         int64_t hbitmap_next; | ||||
|         int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk; | ||||
|         int64_t next_chunk = next_sector / sectors_per_chunk; | ||||
|         if (next_sector >= end || | ||||
|             !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { | ||||
|             break; | ||||
|         } | ||||
|         if (test_bit(next_chunk, s->in_flight_bitmap)) { | ||||
|             if (nb_chunks > 0) { | ||||
|                 break; | ||||
|             } | ||||
|             trace_mirror_yield_in_flight(s, next_sector, s->in_flight); | ||||
|             mirror_wait_for_io(s); | ||||
|             /* Now retry.  */ | ||||
|         } else { | ||||
|             hbitmap_next = hbitmap_iter_next(&s->hbi); | ||||
|             assert(hbitmap_next == next_sector); | ||||
|             nb_chunks++; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     /* Clear dirty bits before querying the block status, because | ||||
|      * calling bdrv_get_block_status_above could yield - if some blocks are | ||||
|      * marked dirty in this window, we need to know. | ||||
|      */ | ||||
|     bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, | ||||
|                             nb_chunks * sectors_per_chunk); | ||||
|     bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); | ||||
|     while (nb_chunks > 0 && sector_num < end) { | ||||
|         int ret; | ||||
|         int io_sectors; | ||||
|         BlockDriverState *file; | ||||
|         enum MirrorMethod { | ||||
|             MIRROR_METHOD_COPY, | ||||
|             MIRROR_METHOD_ZERO, | ||||
|             MIRROR_METHOD_DISCARD | ||||
|         } mirror_method = MIRROR_METHOD_COPY; | ||||
|  | ||||
|         assert(!(sector_num % sectors_per_chunk)); | ||||
|         ret = bdrv_get_block_status_above(source, NULL, sector_num, | ||||
|                                           nb_chunks * sectors_per_chunk, | ||||
|                                           &io_sectors, &file); | ||||
|         if (ret < 0) { | ||||
|             io_sectors = nb_chunks * sectors_per_chunk; | ||||
|         } | ||||
|  | ||||
|         io_sectors -= io_sectors % sectors_per_chunk; | ||||
|         if (io_sectors < sectors_per_chunk) { | ||||
|             io_sectors = sectors_per_chunk; | ||||
|         } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { | ||||
|             int64_t target_sector_num; | ||||
|             int target_nb_sectors; | ||||
|             bdrv_round_to_clusters(s->target, sector_num, io_sectors, | ||||
|                                    &target_sector_num, &target_nb_sectors); | ||||
|             if (target_sector_num == sector_num && | ||||
|                 target_nb_sectors == io_sectors) { | ||||
|                 mirror_method = ret & BDRV_BLOCK_ZERO ? | ||||
|                                     MIRROR_METHOD_ZERO : | ||||
|                                     MIRROR_METHOD_DISCARD; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         switch (mirror_method) { | ||||
|         case MIRROR_METHOD_COPY: | ||||
|             io_sectors = mirror_do_read(s, sector_num, io_sectors); | ||||
|             break; | ||||
|         case MIRROR_METHOD_ZERO: | ||||
|             mirror_do_zero_or_discard(s, sector_num, io_sectors, false); | ||||
|             break; | ||||
|         case MIRROR_METHOD_DISCARD: | ||||
|             mirror_do_zero_or_discard(s, sector_num, io_sectors, true); | ||||
|             break; | ||||
|         default: | ||||
|             abort(); | ||||
|         } | ||||
|         assert(io_sectors); | ||||
|         sector_num += io_sectors; | ||||
|         nb_chunks -= io_sectors / sectors_per_chunk; | ||||
|         delay_ns += ratelimit_calculate_delay(&s->limit, io_sectors); | ||||
|     } | ||||
|     return delay_ns; | ||||
| } | ||||
| @@ -344,9 +418,7 @@ static void mirror_free_init(MirrorBlockJob *s) | ||||
| static void mirror_drain(MirrorBlockJob *s) | ||||
| { | ||||
|     while (s->in_flight > 0) { | ||||
|         s->waiting_for_io = true; | ||||
|         qemu_coroutine_yield(); | ||||
|         s->waiting_for_io = false; | ||||
|         mirror_wait_for_io(s); | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -420,6 +492,7 @@ static void coroutine_fn mirror_run(void *opaque) | ||||
|                                  checking for a NULL string */ | ||||
|     int ret = 0; | ||||
|     int n; | ||||
|     int target_cluster_size = BDRV_SECTOR_SIZE; | ||||
|  | ||||
|     if (block_job_is_cancelled(&s->common)) { | ||||
|         goto immediate_exit; | ||||
| @@ -449,16 +522,16 @@ static void coroutine_fn mirror_run(void *opaque) | ||||
|      */ | ||||
|     bdrv_get_backing_filename(s->target, backing_filename, | ||||
|                               sizeof(backing_filename)); | ||||
|     if (backing_filename[0] && !s->target->backing) { | ||||
|         ret = bdrv_get_info(s->target, &bdi); | ||||
|         if (ret < 0) { | ||||
|             goto immediate_exit; | ||||
|         } | ||||
|         if (s->granularity < bdi.cluster_size) { | ||||
|             s->buf_size = MAX(s->buf_size, bdi.cluster_size); | ||||
|             s->cow_bitmap = bitmap_new(length); | ||||
|         } | ||||
|     if (!bdrv_get_info(s->target, &bdi) && bdi.cluster_size) { | ||||
|         target_cluster_size = bdi.cluster_size; | ||||
|     } | ||||
|     if (backing_filename[0] && !s->target->backing | ||||
|         && s->granularity < target_cluster_size) { | ||||
|         s->buf_size = MAX(s->buf_size, target_cluster_size); | ||||
|         s->cow_bitmap = bitmap_new(length); | ||||
|     } | ||||
|     s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS; | ||||
|     s->max_iov = MIN(s->common.bs->bl.max_iov, s->target->bl.max_iov); | ||||
|  | ||||
|     end = s->bdev_length / BDRV_SECTOR_SIZE; | ||||
|     s->buf = qemu_try_blockalign(bs, s->buf_size); | ||||
| @@ -533,9 +606,7 @@ static void coroutine_fn mirror_run(void *opaque) | ||||
|             if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || | ||||
|                 (cnt == 0 && s->in_flight > 0)) { | ||||
|                 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); | ||||
|                 s->waiting_for_io = true; | ||||
|                 qemu_coroutine_yield(); | ||||
|                 s->waiting_for_io = false; | ||||
|                 mirror_wait_for_io(s); | ||||
|                 continue; | ||||
|             } else if (cnt != 0) { | ||||
|                 delay_ns = mirror_iteration(s); | ||||
|   | ||||
							
								
								
									
										14
									
								
								block/nbd.c
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								block/nbd.c
									
									
									
									
									
								
							| @@ -204,18 +204,20 @@ static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, char **export, | ||||
|     saddr = g_new0(SocketAddress, 1); | ||||
|  | ||||
|     if (qdict_haskey(options, "path")) { | ||||
|         UnixSocketAddress *q_unix; | ||||
|         saddr->type = SOCKET_ADDRESS_KIND_UNIX; | ||||
|         saddr->u.q_unix = g_new0(UnixSocketAddress, 1); | ||||
|         saddr->u.q_unix->path = g_strdup(qdict_get_str(options, "path")); | ||||
|         q_unix = saddr->u.q_unix = g_new0(UnixSocketAddress, 1); | ||||
|         q_unix->path = g_strdup(qdict_get_str(options, "path")); | ||||
|         qdict_del(options, "path"); | ||||
|     } else { | ||||
|         InetSocketAddress *inet; | ||||
|         saddr->type = SOCKET_ADDRESS_KIND_INET; | ||||
|         saddr->u.inet = g_new0(InetSocketAddress, 1); | ||||
|         saddr->u.inet->host = g_strdup(qdict_get_str(options, "host")); | ||||
|         inet = saddr->u.inet = g_new0(InetSocketAddress, 1); | ||||
|         inet->host = g_strdup(qdict_get_str(options, "host")); | ||||
|         if (!qdict_get_try_str(options, "port")) { | ||||
|             saddr->u.inet->port = g_strdup_printf("%d", NBD_DEFAULT_PORT); | ||||
|             inet->port = g_strdup_printf("%d", NBD_DEFAULT_PORT); | ||||
|         } else { | ||||
|             saddr->u.inet->port = g_strdup(qdict_get_str(options, "port")); | ||||
|             inet->port = g_strdup(qdict_get_str(options, "port")); | ||||
|         } | ||||
|         qdict_del(options, "host"); | ||||
|         qdict_del(options, "port"); | ||||
|   | ||||
							
								
								
									
										12
									
								
								block/nfs.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								block/nfs.c
									
									
									
									
									
								
							| @@ -36,6 +36,7 @@ | ||||
| #include <nfsc/libnfs.h> | ||||
|  | ||||
| #define QEMU_NFS_MAX_READAHEAD_SIZE 1048576 | ||||
| #define QEMU_NFS_MAX_DEBUG_LEVEL 2 | ||||
|  | ||||
| typedef struct NFSClient { | ||||
|     struct nfs_context *context; | ||||
| @@ -333,6 +334,17 @@ static int64_t nfs_client_open(NFSClient *client, const char *filename, | ||||
|                 val = QEMU_NFS_MAX_READAHEAD_SIZE; | ||||
|             } | ||||
|             nfs_set_readahead(client->context, val); | ||||
| #endif | ||||
| #ifdef LIBNFS_FEATURE_DEBUG | ||||
|         } else if (!strcmp(qp->p[i].name, "debug")) { | ||||
|             /* limit the maximum debug level to avoid potential flooding | ||||
|              * of our log files. */ | ||||
|             if (val > QEMU_NFS_MAX_DEBUG_LEVEL) { | ||||
|                 error_report("NFS Warning: Limiting NFS debug level" | ||||
|                              " to %d", QEMU_NFS_MAX_DEBUG_LEVEL); | ||||
|                 val = QEMU_NFS_MAX_DEBUG_LEVEL; | ||||
|             } | ||||
|             nfs_set_debug(client->context, val); | ||||
| #endif | ||||
|         } else { | ||||
|             error_setg(errp, "Unknown NFS parameter name: %s", | ||||
|   | ||||
							
								
								
									
										47
									
								
								block/rbd.c
									
									
									
									
									
								
							
							
						
						
									
										47
									
								
								block/rbd.c
									
									
									
									
									
								
							| @@ -16,6 +16,7 @@ | ||||
| #include "qemu-common.h" | ||||
| #include "qemu/error-report.h" | ||||
| #include "block/block_int.h" | ||||
| #include "crypto/secret.h" | ||||
|  | ||||
| #include <rbd/librbd.h> | ||||
|  | ||||
| @@ -228,6 +229,27 @@ static char *qemu_rbd_parse_clientname(const char *conf, char *clientname) | ||||
|     return NULL; | ||||
| } | ||||
|  | ||||
|  | ||||
| static int qemu_rbd_set_auth(rados_t cluster, const char *secretid, | ||||
|                              Error **errp) | ||||
| { | ||||
|     if (secretid == 0) { | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     gchar *secret = qcrypto_secret_lookup_as_base64(secretid, | ||||
|                                                     errp); | ||||
|     if (!secret) { | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     rados_conf_set(cluster, "key", secret); | ||||
|     g_free(secret); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
|  | ||||
| static int qemu_rbd_set_conf(rados_t cluster, const char *conf, | ||||
|                              bool only_read_conf_file, | ||||
|                              Error **errp) | ||||
| @@ -299,10 +321,13 @@ static int qemu_rbd_create(const char *filename, QemuOpts *opts, Error **errp) | ||||
|     char conf[RBD_MAX_CONF_SIZE]; | ||||
|     char clientname_buf[RBD_MAX_CONF_SIZE]; | ||||
|     char *clientname; | ||||
|     const char *secretid; | ||||
|     rados_t cluster; | ||||
|     rados_ioctx_t io_ctx; | ||||
|     int ret; | ||||
|  | ||||
|     secretid = qemu_opt_get(opts, "password-secret"); | ||||
|  | ||||
|     if (qemu_rbd_parsename(filename, pool, sizeof(pool), | ||||
|                            snap_buf, sizeof(snap_buf), | ||||
|                            name, sizeof(name), | ||||
| @@ -350,6 +375,11 @@ static int qemu_rbd_create(const char *filename, QemuOpts *opts, Error **errp) | ||||
|         return -EIO; | ||||
|     } | ||||
|  | ||||
|     if (qemu_rbd_set_auth(cluster, secretid, errp) < 0) { | ||||
|         rados_shutdown(cluster); | ||||
|         return -EIO; | ||||
|     } | ||||
|  | ||||
|     if (rados_connect(cluster) < 0) { | ||||
|         error_setg(errp, "error connecting"); | ||||
|         rados_shutdown(cluster); | ||||
| @@ -423,6 +453,11 @@ static QemuOptsList runtime_opts = { | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "Specification of the rbd image", | ||||
|         }, | ||||
|         { | ||||
|             .name = "password-secret", | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "ID of secret providing the password", | ||||
|         }, | ||||
|         { /* end of list */ } | ||||
|     }, | ||||
| }; | ||||
| @@ -436,6 +471,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, | ||||
|     char conf[RBD_MAX_CONF_SIZE]; | ||||
|     char clientname_buf[RBD_MAX_CONF_SIZE]; | ||||
|     char *clientname; | ||||
|     const char *secretid; | ||||
|     QemuOpts *opts; | ||||
|     Error *local_err = NULL; | ||||
|     const char *filename; | ||||
| @@ -450,6 +486,7 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, | ||||
|     } | ||||
|  | ||||
|     filename = qemu_opt_get(opts, "filename"); | ||||
|     secretid = qemu_opt_get(opts, "password-secret"); | ||||
|  | ||||
|     if (qemu_rbd_parsename(filename, pool, sizeof(pool), | ||||
|                            snap_buf, sizeof(snap_buf), | ||||
| @@ -488,6 +525,11 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if (qemu_rbd_set_auth(s->cluster, secretid, errp) < 0) { | ||||
|         r = -EIO; | ||||
|         goto failed_shutdown; | ||||
|     } | ||||
|  | ||||
|     /* | ||||
|      * Fallback to more conservative semantics if setting cache | ||||
|      * options fails. Ignore errors from setting rbd_cache because the | ||||
| @@ -919,6 +961,11 @@ static QemuOptsList qemu_rbd_create_opts = { | ||||
|             .type = QEMU_OPT_SIZE, | ||||
|             .help = "RBD object size" | ||||
|         }, | ||||
|         { | ||||
|             .name = "password-secret", | ||||
|             .type = QEMU_OPT_STRING, | ||||
|             .help = "ID of secret providing the password", | ||||
|         }, | ||||
|         { /* end of list */ } | ||||
|     } | ||||
| }; | ||||
|   | ||||
							
								
								
									
										125
									
								
								block/sheepdog.c
									
									
									
									
									
								
							
							
						
						
									
										125
									
								
								block/sheepdog.c
									
									
									
									
									
								
							| @@ -284,6 +284,12 @@ static inline bool is_snapshot(struct SheepdogInode *inode) | ||||
|     return !!inode->snap_ctime; | ||||
| } | ||||
|  | ||||
| static inline size_t count_data_objs(const struct SheepdogInode *inode) | ||||
| { | ||||
|     return DIV_ROUND_UP(inode->vdi_size, | ||||
|                         (1UL << inode->block_size_shift)); | ||||
| } | ||||
|  | ||||
| #undef DPRINTF | ||||
| #ifdef DEBUG_SDOG | ||||
| #define DPRINTF(fmt, args...)                                       \ | ||||
| @@ -2478,13 +2484,128 @@ out: | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| #define NR_BATCHED_DISCARD 128 | ||||
|  | ||||
| static bool remove_objects(BDRVSheepdogState *s) | ||||
| { | ||||
|     int fd, i = 0, nr_objs = 0; | ||||
|     Error *local_err = NULL; | ||||
|     int ret = 0; | ||||
|     bool result = true; | ||||
|     SheepdogInode *inode = &s->inode; | ||||
|  | ||||
|     fd = connect_to_sdog(s, &local_err); | ||||
|     if (fd < 0) { | ||||
|         error_report_err(local_err); | ||||
|         return false; | ||||
|     } | ||||
|  | ||||
|     nr_objs = count_data_objs(inode); | ||||
|     while (i < nr_objs) { | ||||
|         int start_idx, nr_filled_idx; | ||||
|  | ||||
|         while (i < nr_objs && !inode->data_vdi_id[i]) { | ||||
|             i++; | ||||
|         } | ||||
|         start_idx = i; | ||||
|  | ||||
|         nr_filled_idx = 0; | ||||
|         while (i < nr_objs && nr_filled_idx < NR_BATCHED_DISCARD) { | ||||
|             if (inode->data_vdi_id[i]) { | ||||
|                 inode->data_vdi_id[i] = 0; | ||||
|                 nr_filled_idx++; | ||||
|             } | ||||
|  | ||||
|             i++; | ||||
|         } | ||||
|  | ||||
|         ret = write_object(fd, s->aio_context, | ||||
|                            (char *)&inode->data_vdi_id[start_idx], | ||||
|                            vid_to_vdi_oid(s->inode.vdi_id), inode->nr_copies, | ||||
|                            (i - start_idx) * sizeof(uint32_t), | ||||
|                            offsetof(struct SheepdogInode, | ||||
|                                     data_vdi_id[start_idx]), | ||||
|                            false, s->cache_flags); | ||||
|         if (ret < 0) { | ||||
|             error_report("failed to discard snapshot inode."); | ||||
|             result = false; | ||||
|             goto out; | ||||
|         } | ||||
|     } | ||||
|  | ||||
| out: | ||||
|     closesocket(fd); | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| static int sd_snapshot_delete(BlockDriverState *bs, | ||||
|                               const char *snapshot_id, | ||||
|                               const char *name, | ||||
|                               Error **errp) | ||||
| { | ||||
|     /* FIXME: Delete specified snapshot id.  */ | ||||
|     return 0; | ||||
|     uint32_t snap_id = 0; | ||||
|     char snap_tag[SD_MAX_VDI_TAG_LEN]; | ||||
|     Error *local_err = NULL; | ||||
|     int fd, ret; | ||||
|     char buf[SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN]; | ||||
|     BDRVSheepdogState *s = bs->opaque; | ||||
|     unsigned int wlen = SD_MAX_VDI_LEN + SD_MAX_VDI_TAG_LEN, rlen = 0; | ||||
|     uint32_t vid; | ||||
|     SheepdogVdiReq hdr = { | ||||
|         .opcode = SD_OP_DEL_VDI, | ||||
|         .data_length = wlen, | ||||
|         .flags = SD_FLAG_CMD_WRITE, | ||||
|     }; | ||||
|     SheepdogVdiRsp *rsp = (SheepdogVdiRsp *)&hdr; | ||||
|  | ||||
|     if (!remove_objects(s)) { | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     memset(buf, 0, sizeof(buf)); | ||||
|     memset(snap_tag, 0, sizeof(snap_tag)); | ||||
|     pstrcpy(buf, SD_MAX_VDI_LEN, s->name); | ||||
|     if (qemu_strtoul(snapshot_id, NULL, 10, (unsigned long *)&snap_id)) { | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     if (snap_id) { | ||||
|         hdr.snapid = snap_id; | ||||
|     } else { | ||||
|         pstrcpy(snap_tag, sizeof(snap_tag), snapshot_id); | ||||
|         pstrcpy(buf + SD_MAX_VDI_LEN, SD_MAX_VDI_TAG_LEN, snap_tag); | ||||
|     } | ||||
|  | ||||
|     ret = find_vdi_name(s, s->name, snap_id, snap_tag, &vid, true, | ||||
|                         &local_err); | ||||
|     if (ret) { | ||||
|         return ret; | ||||
|     } | ||||
|  | ||||
|     fd = connect_to_sdog(s, &local_err); | ||||
|     if (fd < 0) { | ||||
|         error_report_err(local_err); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     ret = do_req(fd, s->aio_context, (SheepdogReq *)&hdr, | ||||
|                  buf, &wlen, &rlen); | ||||
|     closesocket(fd); | ||||
|     if (ret) { | ||||
|         return ret; | ||||
|     } | ||||
|  | ||||
|     switch (rsp->result) { | ||||
|     case SD_RES_NO_VDI: | ||||
|         error_report("%s was already deleted", s->name); | ||||
|     case SD_RES_SUCCESS: | ||||
|         break; | ||||
|     default: | ||||
|         error_report("%s, %s", sd_strerror(rsp->result), s->name); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static int sd_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab) | ||||
|   | ||||
							
								
								
									
										18
									
								
								block/vhdx.c
									
									
									
									
									
								
							
							
						
						
									
										18
									
								
								block/vhdx.c
									
									
									
									
									
								
							| @@ -264,10 +264,10 @@ static void vhdx_region_unregister_all(BDRVVHDXState *s) | ||||
|  | ||||
| static void vhdx_set_shift_bits(BDRVVHDXState *s) | ||||
| { | ||||
|     s->logical_sector_size_bits = 31 - clz32(s->logical_sector_size); | ||||
|     s->sectors_per_block_bits =   31 - clz32(s->sectors_per_block); | ||||
|     s->chunk_ratio_bits =         63 - clz64(s->chunk_ratio); | ||||
|     s->block_size_bits =          31 - clz32(s->block_size); | ||||
|     s->logical_sector_size_bits = ctz32(s->logical_sector_size); | ||||
|     s->sectors_per_block_bits =   ctz32(s->sectors_per_block); | ||||
|     s->chunk_ratio_bits =         ctz64(s->chunk_ratio); | ||||
|     s->block_size_bits =          ctz32(s->block_size); | ||||
| } | ||||
|  | ||||
| /* | ||||
| @@ -857,14 +857,8 @@ static void vhdx_calc_bat_entries(BDRVVHDXState *s) | ||||
| { | ||||
|     uint32_t data_blocks_cnt, bitmap_blocks_cnt; | ||||
|  | ||||
|     data_blocks_cnt = s->virtual_disk_size >> s->block_size_bits; | ||||
|     if (s->virtual_disk_size - (data_blocks_cnt << s->block_size_bits)) { | ||||
|         data_blocks_cnt++; | ||||
|     } | ||||
|     bitmap_blocks_cnt = data_blocks_cnt >> s->chunk_ratio_bits; | ||||
|     if (data_blocks_cnt - (bitmap_blocks_cnt << s->chunk_ratio_bits)) { | ||||
|         bitmap_blocks_cnt++; | ||||
|     } | ||||
|     data_blocks_cnt = DIV_ROUND_UP(s->virtual_disk_size, s->block_size); | ||||
|     bitmap_blocks_cnt = DIV_ROUND_UP(data_blocks_cnt, s->chunk_ratio); | ||||
|  | ||||
|     if (s->parent_entries) { | ||||
|         s->bat_entries = bitmap_blocks_cnt * (s->chunk_ratio + 1); | ||||
|   | ||||
							
								
								
									
										31
									
								
								blockdev.c
									
									
									
									
									
								
							
							
						
						
									
										31
									
								
								blockdev.c
									
									
									
									
									
								
							| @@ -1202,15 +1202,11 @@ void hmp_commit(Monitor *mon, const QDict *qdict) | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void blockdev_do_action(TransactionActionKind type, void *data, | ||||
|                                Error **errp) | ||||
| static void blockdev_do_action(TransactionAction *action, Error **errp) | ||||
| { | ||||
|     TransactionAction action; | ||||
|     TransactionActionList list; | ||||
|  | ||||
|     action.type = type; | ||||
|     action.u.data = data; | ||||
|     list.value = &action; | ||||
|     list.value = action; | ||||
|     list.next = NULL; | ||||
|     qmp_transaction(&list, false, NULL, errp); | ||||
| } | ||||
| @@ -1236,8 +1232,11 @@ void qmp_blockdev_snapshot_sync(bool has_device, const char *device, | ||||
|         .has_mode = has_mode, | ||||
|         .mode = mode, | ||||
|     }; | ||||
|     blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC, | ||||
|                        &snapshot, errp); | ||||
|     TransactionAction action = { | ||||
|         .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC, | ||||
|         .u.blockdev_snapshot_sync = &snapshot, | ||||
|     }; | ||||
|     blockdev_do_action(&action, errp); | ||||
| } | ||||
|  | ||||
| void qmp_blockdev_snapshot(const char *node, const char *overlay, | ||||
| @@ -1247,9 +1246,11 @@ void qmp_blockdev_snapshot(const char *node, const char *overlay, | ||||
|         .node = (char *) node, | ||||
|         .overlay = (char *) overlay | ||||
|     }; | ||||
|  | ||||
|     blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT, | ||||
|                        &snapshot_data, errp); | ||||
|     TransactionAction action = { | ||||
|         .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT, | ||||
|         .u.blockdev_snapshot = &snapshot_data, | ||||
|     }; | ||||
|     blockdev_do_action(&action, errp); | ||||
| } | ||||
|  | ||||
| void qmp_blockdev_snapshot_internal_sync(const char *device, | ||||
| @@ -1260,9 +1261,11 @@ void qmp_blockdev_snapshot_internal_sync(const char *device, | ||||
|         .device = (char *) device, | ||||
|         .name = (char *) name | ||||
|     }; | ||||
|  | ||||
|     blockdev_do_action(TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC, | ||||
|                        &snapshot, errp); | ||||
|     TransactionAction action = { | ||||
|         .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC, | ||||
|         .u.blockdev_snapshot_internal_sync = &snapshot, | ||||
|     }; | ||||
|     blockdev_do_action(&action, errp); | ||||
| } | ||||
|  | ||||
| SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| = How to use the QAPI code generator = | ||||
|  | ||||
| Copyright IBM Corp. 2011 | ||||
| Copyright (C) 2012-2015 Red Hat, Inc. | ||||
| Copyright (C) 2012-2016 Red Hat, Inc. | ||||
|  | ||||
| This work is licensed under the terms of the GNU GPL, version 2 or | ||||
| later. See the COPYING file in the top-level directory. | ||||
| @@ -52,7 +52,7 @@ schema.  The documentation is delimited between two lines of ##, then | ||||
| the first line names the expression, an optional overview is provided, | ||||
| then individual documentation about each member of 'data' is provided, | ||||
| and finally, a 'Since: x.y.z' tag lists the release that introduced | ||||
| the expression.  Optional fields are tagged with the phrase | ||||
| the expression.  Optional members are tagged with the phrase | ||||
| '#optional', often with their default value; and extensions added | ||||
| after the expression was first released are also given a '(since | ||||
| x.y.z)' comment.  For example: | ||||
| @@ -108,15 +108,15 @@ user-defined type names, while built-in types are lowercase. Type | ||||
| definitions should not end in 'Kind', as this namespace is used for | ||||
| creating implicit C enums for visiting union types, or in 'List', as | ||||
| this namespace is used for creating array types.  Command names, | ||||
| and field names within a type, should be all lower case with words | ||||
| and member names within a type, should be all lower case with words | ||||
| separated by a hyphen.  However, some existing older commands and | ||||
| complex types use underscore; when extending such expressions, | ||||
| consistency is preferred over blindly avoiding underscore.  Event | ||||
| names should be ALL_CAPS with words separated by underscore.  Field | ||||
| names should be ALL_CAPS with words separated by underscore.  Member | ||||
| names cannot start with 'has-' or 'has_', as this is reserved for | ||||
| tracking optional fields. | ||||
| tracking optional members. | ||||
|  | ||||
| Any name (command, event, type, field, or enum value) beginning with | ||||
| Any name (command, event, type, member, or enum value) beginning with | ||||
| "x-" is marked experimental, and may be withdrawn or changed | ||||
| incompatibly in a future release.  All names must begin with a letter, | ||||
| and contain only ASCII letters, digits, dash, and underscore.  There | ||||
| @@ -127,7 +127,7 @@ the vendor), even if the rest of the name uses dash (example: | ||||
| __com.redhat_drive-mirror).  Names beginning with 'q_' are reserved | ||||
| for the generator: QMP names that resemble C keywords or other | ||||
| problematic strings will be munged in C to use this prefix.  For | ||||
| example, a field named "default" in qapi becomes "q_default" in the | ||||
| example, a member named "default" in qapi becomes "q_default" in the | ||||
| generated C code. | ||||
|  | ||||
| In the rest of this document, usage lines are given for each | ||||
| @@ -217,17 +217,18 @@ and must continue to work). | ||||
|  | ||||
| On output structures (only mentioned in the 'returns' side of a command), | ||||
| changing from mandatory to optional is in general unsafe (older clients may be | ||||
| expecting the field, and could crash if it is missing), although it can be done | ||||
| if the only way that the optional argument will be omitted is when it is | ||||
| triggered by the presence of a new input flag to the command that older clients | ||||
| don't know to send.  Changing from optional to mandatory is safe. | ||||
| expecting the member, and could crash if it is missing), although it | ||||
| can be done if the only way that the optional argument will be omitted | ||||
| is when it is triggered by the presence of a new input flag to the | ||||
| command that older clients don't know to send.  Changing from optional | ||||
| to mandatory is safe. | ||||
|  | ||||
| A structure that is used in both input and output of various commands | ||||
| must consider the backwards compatibility constraints of both directions | ||||
| of use. | ||||
|  | ||||
| A struct definition can specify another struct as its base. | ||||
| In this case, the fields of the base type are included as top-level fields | ||||
| In this case, the members of the base type are included as top-level members | ||||
| of the new struct's dictionary in the Client JSON Protocol wire | ||||
| format. An example definition is: | ||||
|  | ||||
| @@ -237,7 +238,7 @@ format. An example definition is: | ||||
|    'data': { '*backing': 'str' } } | ||||
|  | ||||
| An example BlockdevOptionsGenericCOWFormat object on the wire could use | ||||
| both fields like this: | ||||
| both members like this: | ||||
|  | ||||
|  { "file": "/some/place/my-image", | ||||
|    "backing": "/some/place/my-backing-file" } | ||||
| @@ -262,7 +263,7 @@ The enum constants will be named by using a heuristic to turn the | ||||
| type name into a set of underscore separated words. For the example | ||||
| above, 'MyEnum' will turn into 'MY_ENUM' giving a constant name | ||||
| of 'MY_ENUM_VALUE1' for the first value. If the default heuristic | ||||
| does not result in a desirable name, the optional 'prefix' field | ||||
| does not result in a desirable name, the optional 'prefix' member | ||||
| can be used when defining the enum. | ||||
|  | ||||
| The enumeration values are passed as strings over the Client JSON | ||||
| @@ -275,9 +276,9 @@ converting between strings and enum values.  Since the wire format | ||||
| always passes by name, it is acceptable to reorder or add new | ||||
| enumeration members in any location without breaking clients of Client | ||||
| JSON Protocol; however, removing enum values would break | ||||
| compatibility.  For any struct that has a field that will only contain | ||||
| a finite set of string values, using an enum type for that field is | ||||
| better than open-coding the field to be type 'str'. | ||||
| compatibility.  For any struct that has a member that will only contain | ||||
| a finite set of string values, using an enum type for that member is | ||||
| better than open-coding the member to be type 'str'. | ||||
|  | ||||
|  | ||||
| === Union types === | ||||
| @@ -305,8 +306,8 @@ values to data types like in this example: | ||||
|              'qcow2': 'Qcow2Options' } } | ||||
|  | ||||
| In the Client JSON Protocol, a simple union is represented by a | ||||
| dictionary that contains the 'type' field as a discriminator, and a | ||||
| 'data' field that is of the specified data type corresponding to the | ||||
| dictionary that contains the 'type' member as a discriminator, and a | ||||
| 'data' member that is of the specified data type corresponding to the | ||||
| discriminator value, as in these examples: | ||||
|  | ||||
|  { "type": "file", "data" : { "filename": "/some/place/my-image" } } | ||||
| @@ -321,14 +322,14 @@ enum.  The value for each branch can be of any type. | ||||
|  | ||||
| A flat union definition specifies a struct as its base, and | ||||
| avoids nesting on the wire.  All branches of the union must be | ||||
| complex types, and the top-level fields of the union dictionary on | ||||
| the wire will be combination of fields from both the base type and the | ||||
| complex types, and the top-level members of the union dictionary on | ||||
| the wire will be combination of members from both the base type and the | ||||
| appropriate branch type (when merging two dictionaries, there must be | ||||
| no keys in common).  The 'discriminator' field must be the name of an | ||||
| no keys in common).  The 'discriminator' member must be the name of an | ||||
| enum-typed member of the base struct. | ||||
|  | ||||
| The following example enhances the above simple union example by | ||||
| adding a common field 'readonly', renaming the discriminator to | ||||
| adding a common member 'readonly', renaming the discriminator to | ||||
| something more applicable, and reducing the number of {} required on | ||||
| the wire: | ||||
|  | ||||
| @@ -353,8 +354,8 @@ the user, but because it must map to a base member with enum type, the | ||||
| code generator can ensure that branches exist for all values of the | ||||
| enum (although the order of the keys need not match the declaration of | ||||
| the enum).  In the resulting generated C data types, a flat union is | ||||
| represented as a struct with the base member fields included directly, | ||||
| and then a union of structures for each branch of the struct. | ||||
| represented as a struct with the base members included directly, and | ||||
| then a union of structures for each branch of the struct. | ||||
|  | ||||
| A simple union can always be re-written as a flat union where the base | ||||
| class has a single member named 'type', and where each branch of the | ||||
| @@ -424,10 +425,10 @@ string name of a complex type, or a dictionary that declares an | ||||
| anonymous type with the same semantics as a 'struct' expression, with | ||||
| one exception noted below when 'gen' is used. | ||||
|  | ||||
| The 'returns' member describes what will appear in the "return" field | ||||
| The 'returns' member describes what will appear in the "return" member | ||||
| of a Client JSON Protocol reply on successful completion of a command. | ||||
| The member is optional from the command declaration; if absent, the | ||||
| "return" field will be an empty dictionary.  If 'returns' is present, | ||||
| "return" member will be an empty dictionary.  If 'returns' is present, | ||||
| it must be the string name of a complex or built-in type, a | ||||
| one-element array containing the name of a complex or built-in type, | ||||
| with one exception noted below when 'gen' is used.  Although it is | ||||
| @@ -435,7 +436,7 @@ permitted to have the 'returns' member name a built-in type or an | ||||
| array of built-in types, any command that does this cannot be extended | ||||
| to return additional information in the future; thus, new commands | ||||
| should strongly consider returning a dictionary-based type or an array | ||||
| of dictionaries, even if the dictionary only contains one field at the | ||||
| of dictionaries, even if the dictionary only contains one member at the | ||||
| present. | ||||
|  | ||||
| All commands in Client JSON Protocol use a dictionary to report | ||||
| @@ -478,7 +479,7 @@ response is not possible (although the command will still return a | ||||
| normal dictionary error on failure).  When a successful reply is not | ||||
| possible, the command expression should include the optional key | ||||
| 'success-response' with boolean value false.  So far, only QGA makes | ||||
| use of this field. | ||||
| use of this member. | ||||
|  | ||||
|  | ||||
| === Events === | ||||
| @@ -656,7 +657,7 @@ Union types | ||||
|  | ||||
|     { "name": "BlockdevOptions", "meta-type": "object", | ||||
|       "members": [ | ||||
|           { "name": "kind", "type": "BlockdevOptionsKind" } ], | ||||
|           { "name": "type", "type": "BlockdevOptionsKind" } ], | ||||
|       "tag": "type", | ||||
|       "variants": [ | ||||
|           { "case": "file", "type": ":obj-FileOptions-wrapper" }, | ||||
| @@ -722,33 +723,38 @@ the names of built-in types.  Clients should examine member | ||||
|  | ||||
| == Code generation == | ||||
|  | ||||
| Schemas are fed into four scripts to generate all the code/files that, | ||||
| Schemas are fed into five scripts to generate all the code/files that, | ||||
| paired with the core QAPI libraries, comprise everything required to | ||||
| take JSON commands read in by a Client JSON Protocol server, unmarshal | ||||
| the arguments into the underlying C types, call into the corresponding | ||||
| C function, and map the response back to a Client JSON Protocol | ||||
| response to be returned to the user. | ||||
| C function, map the response back to a Client JSON Protocol response | ||||
| to be returned to the user, and introspect the commands. | ||||
|  | ||||
| As an example, we'll use the following schema, which describes a single | ||||
| complex user-defined type (which will produce a C struct, along with a list | ||||
| node structure that can be used to chain together a list of such types in | ||||
| case we want to accept/return a list of this type with a command), and a | ||||
| command which takes that type as a parameter and returns the same type: | ||||
| As an example, we'll use the following schema, which describes a | ||||
| single complex user-defined type, along with command which takes a | ||||
| list of that type as a parameter, and returns a single element of that | ||||
| type.  The user is responsible for writing the implementation of | ||||
| qmp_my_command(); everything else is produced by the generator. | ||||
|  | ||||
|     $ cat example-schema.json | ||||
|     { 'struct': 'UserDefOne', | ||||
|       'data': { 'integer': 'int', 'string': 'str' } } | ||||
|       'data': { 'integer': 'int', '*string': 'str' } } | ||||
|  | ||||
|     { 'command': 'my-command', | ||||
|       'data':    {'arg1': 'UserDefOne'}, | ||||
|       'data': { 'arg1': ['UserDefOne'] }, | ||||
|       'returns': 'UserDefOne' } | ||||
|  | ||||
|     { 'event': 'MY_EVENT' } | ||||
|  | ||||
| For a more thorough look at generated code, the testsuite includes | ||||
| tests/qapi-schema/qapi-schema-tests.json that covers more examples of | ||||
| what the generator will accept, and compiles the resulting C code as | ||||
| part of 'make check-unit'. | ||||
|  | ||||
| === scripts/qapi-types.py === | ||||
|  | ||||
| Used to generate the C types defined by a schema. The following files are | ||||
| created: | ||||
| Used to generate the C types defined by a schema, along with | ||||
| supporting code. The following files are created: | ||||
|  | ||||
| $(prefix)qapi-types.h - C types corresponding to types defined in | ||||
|                         the schema you pass in | ||||
| @@ -763,38 +769,6 @@ Example: | ||||
|  | ||||
|     $ python scripts/qapi-types.py --output-dir="qapi-generated" \ | ||||
|     --prefix="example-" example-schema.json | ||||
|     $ cat qapi-generated/example-qapi-types.c | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     void qapi_free_UserDefOne(UserDefOne *obj) | ||||
|     { | ||||
|         QapiDeallocVisitor *qdv; | ||||
|         Visitor *v; | ||||
|  | ||||
|         if (!obj) { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         qdv = qapi_dealloc_visitor_new(); | ||||
|         v = qapi_dealloc_get_visitor(qdv); | ||||
|         visit_type_UserDefOne(v, &obj, NULL, NULL); | ||||
|         qapi_dealloc_visitor_cleanup(qdv); | ||||
|     } | ||||
|  | ||||
|     void qapi_free_UserDefOneList(UserDefOneList *obj) | ||||
|     { | ||||
|         QapiDeallocVisitor *qdv; | ||||
|         Visitor *v; | ||||
|  | ||||
|         if (!obj) { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         qdv = qapi_dealloc_visitor_new(); | ||||
|         v = qapi_dealloc_get_visitor(qdv); | ||||
|         visit_type_UserDefOneList(v, &obj, NULL, NULL); | ||||
|         qapi_dealloc_visitor_cleanup(qdv); | ||||
|     } | ||||
|     $ cat qapi-generated/example-qapi-types.h | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
| @@ -809,29 +783,59 @@ Example: | ||||
|  | ||||
|     struct UserDefOne { | ||||
|         int64_t integer; | ||||
|         bool has_string; | ||||
|         char *string; | ||||
|     }; | ||||
|  | ||||
|     void qapi_free_UserDefOne(UserDefOne *obj); | ||||
|  | ||||
|     struct UserDefOneList { | ||||
|         union { | ||||
|             UserDefOne *value; | ||||
|             uint64_t padding; | ||||
|         }; | ||||
|         UserDefOneList *next; | ||||
|         UserDefOne *value; | ||||
|     }; | ||||
|  | ||||
|     void qapi_free_UserDefOneList(UserDefOneList *obj); | ||||
|  | ||||
|     #endif | ||||
|     $ cat qapi-generated/example-qapi-types.c | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     void qapi_free_UserDefOne(UserDefOne *obj) | ||||
|     { | ||||
|         QapiDeallocVisitor *qdv; | ||||
|         Visitor *v; | ||||
|  | ||||
|         if (!obj) { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         qdv = qapi_dealloc_visitor_new(); | ||||
|         v = qapi_dealloc_get_visitor(qdv); | ||||
|         visit_type_UserDefOne(v, NULL, &obj, NULL); | ||||
|         qapi_dealloc_visitor_cleanup(qdv); | ||||
|     } | ||||
|  | ||||
|     void qapi_free_UserDefOneList(UserDefOneList *obj) | ||||
|     { | ||||
|         QapiDeallocVisitor *qdv; | ||||
|         Visitor *v; | ||||
|  | ||||
|         if (!obj) { | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|         qdv = qapi_dealloc_visitor_new(); | ||||
|         v = qapi_dealloc_get_visitor(qdv); | ||||
|         visit_type_UserDefOneList(v, NULL, &obj, NULL); | ||||
|         qapi_dealloc_visitor_cleanup(qdv); | ||||
|     } | ||||
|  | ||||
| === scripts/qapi-visit.py === | ||||
|  | ||||
| Used to generate the visitor functions used to walk through and convert | ||||
| a QObject (as provided by QMP) to a native C data structure and | ||||
| vice-versa, as well as the visitor function used to dealloc a complex | ||||
| schema-defined C type. | ||||
| Used to generate the visitor functions used to walk through and | ||||
| convert between a native QAPI C data structure and some other format | ||||
| (such as QObject); the generated functions are named visit_type_FOO() | ||||
| and visit_type_FOO_members(). | ||||
|  | ||||
| The following files are generated: | ||||
|  | ||||
| @@ -848,41 +852,62 @@ Example: | ||||
|  | ||||
|     $ python scripts/qapi-visit.py --output-dir="qapi-generated" | ||||
|     --prefix="example-" example-schema.json | ||||
|     $ cat qapi-generated/example-qapi-visit.h | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     #ifndef EXAMPLE_QAPI_VISIT_H | ||||
|     #define EXAMPLE_QAPI_VISIT_H | ||||
|  | ||||
| [Visitors for built-in types omitted...] | ||||
|  | ||||
|     void visit_type_UserDefOne_members(Visitor *v, UserDefOne *obj, Error **errp); | ||||
|     void visit_type_UserDefOne(Visitor *v, const char *name, UserDefOne **obj, Error **errp); | ||||
|     void visit_type_UserDefOneList(Visitor *v, const char *name, UserDefOneList **obj, Error **errp); | ||||
|  | ||||
|     #endif | ||||
|     $ cat qapi-generated/example-qapi-visit.c | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     static void visit_type_UserDefOne_fields(Visitor *v, UserDefOne **obj, Error **errp) | ||||
|     void visit_type_UserDefOne_members(Visitor *v, UserDefOne *obj, Error **errp) | ||||
|     { | ||||
|         Error *err = NULL; | ||||
|  | ||||
|         visit_type_int(v, &(*obj)->integer, "integer", &err); | ||||
|         visit_type_int(v, "integer", &obj->integer, &err); | ||||
|         if (err) { | ||||
|             goto out; | ||||
|         } | ||||
|         visit_type_str(v, &(*obj)->string, "string", &err); | ||||
|         if (err) { | ||||
|             goto out; | ||||
|         if (visit_optional(v, "string", &obj->has_string)) { | ||||
|             visit_type_str(v, "string", &obj->string, &err); | ||||
|             if (err) { | ||||
|                 goto out; | ||||
|             } | ||||
|         } | ||||
|  | ||||
|     out: | ||||
|         error_propagate(errp, err); | ||||
|     } | ||||
|  | ||||
|     void visit_type_UserDefOne(Visitor *v, UserDefOne **obj, const char *name, Error **errp) | ||||
|     void visit_type_UserDefOne(Visitor *v, const char *name, UserDefOne **obj, Error **errp) | ||||
|     { | ||||
|         Error *err = NULL; | ||||
|  | ||||
|         visit_start_struct(v, (void **)obj, "UserDefOne", name, sizeof(UserDefOne), &err); | ||||
|         if (!err) { | ||||
|             if (*obj) { | ||||
|                 visit_type_UserDefOne_fields(v, obj, errp); | ||||
|             } | ||||
|             visit_end_struct(v, &err); | ||||
|         visit_start_struct(v, name, (void **)obj, sizeof(UserDefOne), &err); | ||||
|         if (err) { | ||||
|             goto out; | ||||
|         } | ||||
|         if (!*obj) { | ||||
|             goto out_obj; | ||||
|         } | ||||
|         visit_type_UserDefOne_members(v, *obj, &err); | ||||
|         error_propagate(errp, err); | ||||
|         err = NULL; | ||||
|     out_obj: | ||||
|         visit_end_struct(v, &err); | ||||
|     out: | ||||
|         error_propagate(errp, err); | ||||
|     } | ||||
|  | ||||
|     void visit_type_UserDefOneList(Visitor *v, UserDefOneList **obj, const char *name, Error **errp) | ||||
|     void visit_type_UserDefOneList(Visitor *v, const char *name, UserDefOneList **obj, Error **errp) | ||||
|     { | ||||
|         Error *err = NULL; | ||||
|         GenericList *i, **prev; | ||||
| @@ -893,35 +918,24 @@ Example: | ||||
|         } | ||||
|  | ||||
|         for (prev = (GenericList **)obj; | ||||
|              !err && (i = visit_next_list(v, prev, &err)) != NULL; | ||||
|              !err && (i = visit_next_list(v, prev, sizeof(**obj))) != NULL; | ||||
|              prev = &i) { | ||||
|             UserDefOneList *native_i = (UserDefOneList *)i; | ||||
|             visit_type_UserDefOne(v, &native_i->value, NULL, &err); | ||||
|             visit_type_UserDefOne(v, NULL, &native_i->value, &err); | ||||
|         } | ||||
|  | ||||
|         error_propagate(errp, err); | ||||
|         err = NULL; | ||||
|         visit_end_list(v, &err); | ||||
|         visit_end_list(v); | ||||
|     out: | ||||
|         error_propagate(errp, err); | ||||
|     } | ||||
|     $ cat qapi-generated/example-qapi-visit.h | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     #ifndef EXAMPLE_QAPI_VISIT_H | ||||
|     #define EXAMPLE_QAPI_VISIT_H | ||||
|  | ||||
| [Visitors for built-in types omitted...] | ||||
|  | ||||
|     void visit_type_UserDefOne(Visitor *v, UserDefOne **obj, const char *name, Error **errp); | ||||
|     void visit_type_UserDefOneList(Visitor *v, UserDefOneList **obj, const char *name, Error **errp); | ||||
|  | ||||
|     #endif | ||||
|  | ||||
| === scripts/qapi-commands.py === | ||||
|  | ||||
| Used to generate the marshaling/dispatch functions for the commands defined | ||||
| in the schema. The following files are generated: | ||||
| Used to generate the marshaling/dispatch functions for the commands | ||||
| defined in the schema. The generated code implements | ||||
| qmp_marshal_COMMAND() (mentioned in qmp-commands.hx, and registered | ||||
| automatically), and declares qmp_COMMAND() that the user must | ||||
| implement.  The following files are generated: | ||||
|  | ||||
| $(prefix)qmp-marshal.c: command marshal/dispatch functions for each | ||||
|                         QMP command defined in the schema. Functions | ||||
| @@ -939,6 +953,19 @@ Example: | ||||
|  | ||||
|     $ python scripts/qapi-commands.py --output-dir="qapi-generated" | ||||
|     --prefix="example-" example-schema.json | ||||
|     $ cat qapi-generated/example-qmp-commands.h | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     #ifndef EXAMPLE_QMP_COMMANDS_H | ||||
|     #define EXAMPLE_QMP_COMMANDS_H | ||||
|  | ||||
|     #include "example-qapi-types.h" | ||||
|     #include "qapi/qmp/qdict.h" | ||||
|     #include "qapi/error.h" | ||||
|  | ||||
|     UserDefOne *qmp_my_command(UserDefOneList *arg1, Error **errp); | ||||
|  | ||||
|     #endif | ||||
|     $ cat qapi-generated/example-qmp-marshal.c | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
| @@ -950,7 +977,7 @@ Example: | ||||
|         Visitor *v; | ||||
|  | ||||
|         v = qmp_output_get_visitor(qov); | ||||
|         visit_type_UserDefOne(v, &ret_in, "unused", &err); | ||||
|         visit_type_UserDefOne(v, "unused", &ret_in, &err); | ||||
|         if (err) { | ||||
|             goto out; | ||||
|         } | ||||
| @@ -961,7 +988,7 @@ Example: | ||||
|         qmp_output_visitor_cleanup(qov); | ||||
|         qdv = qapi_dealloc_visitor_new(); | ||||
|         v = qapi_dealloc_get_visitor(qdv); | ||||
|         visit_type_UserDefOne(v, &ret_in, "unused", NULL); | ||||
|         visit_type_UserDefOne(v, "unused", &ret_in, NULL); | ||||
|         qapi_dealloc_visitor_cleanup(qdv); | ||||
|     } | ||||
|  | ||||
| @@ -972,10 +999,10 @@ Example: | ||||
|         QmpInputVisitor *qiv = qmp_input_visitor_new_strict(QOBJECT(args)); | ||||
|         QapiDeallocVisitor *qdv; | ||||
|         Visitor *v; | ||||
|         UserDefOne *arg1 = NULL; | ||||
|         UserDefOneList *arg1 = NULL; | ||||
|  | ||||
|         v = qmp_input_get_visitor(qiv); | ||||
|         visit_type_UserDefOne(v, &arg1, "arg1", &err); | ||||
|         visit_type_UserDefOneList(v, "arg1", &arg1, &err); | ||||
|         if (err) { | ||||
|             goto out; | ||||
|         } | ||||
| @@ -992,7 +1019,7 @@ Example: | ||||
|         qmp_input_visitor_cleanup(qiv); | ||||
|         qdv = qapi_dealloc_visitor_new(); | ||||
|         v = qapi_dealloc_get_visitor(qdv); | ||||
|         visit_type_UserDefOne(v, &arg1, "arg1", NULL); | ||||
|         visit_type_UserDefOneList(v, "arg1", &arg1, NULL); | ||||
|         qapi_dealloc_visitor_cleanup(qdv); | ||||
|     } | ||||
|  | ||||
| @@ -1002,24 +1029,12 @@ Example: | ||||
|     } | ||||
|  | ||||
|     qapi_init(qmp_init_marshal); | ||||
|     $ cat qapi-generated/example-qmp-commands.h | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     #ifndef EXAMPLE_QMP_COMMANDS_H | ||||
|     #define EXAMPLE_QMP_COMMANDS_H | ||||
|  | ||||
|     #include "example-qapi-types.h" | ||||
|     #include "qapi/qmp/qdict.h" | ||||
|     #include "qapi/error.h" | ||||
|  | ||||
|     UserDefOne *qmp_my_command(UserDefOne *arg1, Error **errp); | ||||
|  | ||||
|     #endif | ||||
|  | ||||
| === scripts/qapi-event.py === | ||||
|  | ||||
| Used to generate the event-related C code defined by a schema. The | ||||
| following files are created: | ||||
| Used to generate the event-related C code defined by a schema, with | ||||
| implementations for qapi_event_send_FOO(). The following files are | ||||
| created: | ||||
|  | ||||
| $(prefix)qapi-event.h - Function prototypes for each event type, plus an | ||||
|                         enumeration of all event names | ||||
| @@ -1029,6 +1044,27 @@ Example: | ||||
|  | ||||
|     $ python scripts/qapi-event.py --output-dir="qapi-generated" | ||||
|     --prefix="example-" example-schema.json | ||||
|     $ cat qapi-generated/example-qapi-event.h | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     #ifndef EXAMPLE_QAPI_EVENT_H | ||||
|     #define EXAMPLE_QAPI_EVENT_H | ||||
|  | ||||
|     #include "qapi/error.h" | ||||
|     #include "qapi/qmp/qdict.h" | ||||
|     #include "example-qapi-types.h" | ||||
|  | ||||
|  | ||||
|     void qapi_event_send_my_event(Error **errp); | ||||
|  | ||||
|     typedef enum example_QAPIEvent { | ||||
|         EXAMPLE_QAPI_EVENT_MY_EVENT = 0, | ||||
|         EXAMPLE_QAPI_EVENT__MAX = 1, | ||||
|     } example_QAPIEvent; | ||||
|  | ||||
|     extern const char *const example_QAPIEvent_lookup[]; | ||||
|  | ||||
|     #endif | ||||
|     $ cat qapi-generated/example-qapi-event.c | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
| @@ -1054,27 +1090,6 @@ Example: | ||||
|         [EXAMPLE_QAPI_EVENT_MY_EVENT] = "MY_EVENT", | ||||
|         [EXAMPLE_QAPI_EVENT__MAX] = NULL, | ||||
|     }; | ||||
|     $ cat qapi-generated/example-qapi-event.h | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     #ifndef EXAMPLE_QAPI_EVENT_H | ||||
|     #define EXAMPLE_QAPI_EVENT_H | ||||
|  | ||||
|     #include "qapi/error.h" | ||||
|     #include "qapi/qmp/qdict.h" | ||||
|     #include "example-qapi-types.h" | ||||
|  | ||||
|  | ||||
|     void qapi_event_send_my_event(Error **errp); | ||||
|  | ||||
|     typedef enum example_QAPIEvent { | ||||
|         EXAMPLE_QAPI_EVENT_MY_EVENT = 0, | ||||
|         EXAMPLE_QAPI_EVENT__MAX = 1, | ||||
|     } example_QAPIEvent; | ||||
|  | ||||
|     extern const char *const example_QAPIEvent_lookup[]; | ||||
|  | ||||
|     #endif | ||||
|  | ||||
| === scripts/qapi-introspect.py === | ||||
|  | ||||
| @@ -1089,17 +1104,6 @@ Example: | ||||
|  | ||||
|     $ python scripts/qapi-introspect.py --output-dir="qapi-generated" | ||||
|     --prefix="example-" example-schema.json | ||||
|     $ cat qapi-generated/example-qmp-introspect.c | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     const char example_qmp_schema_json[] = "[" | ||||
|         "{\"arg-type\": \"0\", \"meta-type\": \"event\", \"name\": \"MY_EVENT\"}, " | ||||
|         "{\"arg-type\": \"1\", \"meta-type\": \"command\", \"name\": \"my-command\", \"ret-type\": \"2\"}, " | ||||
|         "{\"members\": [], \"meta-type\": \"object\", \"name\": \"0\"}, " | ||||
|         "{\"members\": [{\"name\": \"arg1\", \"type\": \"2\"}], \"meta-type\": \"object\", \"name\": \"1\"}, " | ||||
|         "{\"members\": [{\"name\": \"integer\", \"type\": \"int\"}, {\"name\": \"string\", \"type\": \"str\"}], \"meta-type\": \"object\", \"name\": \"2\"}, " | ||||
|         "{\"json-type\": \"int\", \"meta-type\": \"builtin\", \"name\": \"int\"}, " | ||||
|         "{\"json-type\": \"string\", \"meta-type\": \"builtin\", \"name\": \"str\"}]"; | ||||
|     $ cat qapi-generated/example-qmp-introspect.h | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
| @@ -1109,3 +1113,15 @@ Example: | ||||
|     extern const char example_qmp_schema_json[]; | ||||
|  | ||||
|     #endif | ||||
|     $ cat qapi-generated/example-qmp-introspect.c | ||||
| [Uninteresting stuff omitted...] | ||||
|  | ||||
|     const char example_qmp_schema_json[] = "[" | ||||
|         "{\"arg-type\": \"0\", \"meta-type\": \"event\", \"name\": \"MY_EVENT\"}, " | ||||
|         "{\"arg-type\": \"1\", \"meta-type\": \"command\", \"name\": \"my-command\", \"ret-type\": \"2\"}, " | ||||
|         "{\"members\": [], \"meta-type\": \"object\", \"name\": \"0\"}, " | ||||
|         "{\"members\": [{\"name\": \"arg1\", \"type\": \"[2]\"}], \"meta-type\": \"object\", \"name\": \"1\"}, " | ||||
|         "{\"members\": [{\"name\": \"integer\", \"type\": \"int\"}, {\"default\": null, \"name\": \"string\", \"type\": \"str\"}], \"meta-type\": \"object\", \"name\": \"2\"}, " | ||||
|         "{\"element-type\": \"2\", \"meta-type\": \"array\", \"name\": \"[2]\"}, " | ||||
|         "{\"json-type\": \"int\", \"meta-type\": \"builtin\", \"name\": \"int\"}, " | ||||
|         "{\"json-type\": \"string\", \"meta-type\": \"builtin\", \"name\": \"str\"}]"; | ||||
|   | ||||
| @@ -3,7 +3,7 @@ | ||||
| 0. About This Document | ||||
| ====================== | ||||
|  | ||||
| Copyright (C) 2009-2015 Red Hat, Inc. | ||||
| Copyright (C) 2009-2016 Red Hat, Inc. | ||||
|  | ||||
| This work is licensed under the terms of the GNU GPL, version 2 or | ||||
| later. See the COPYING file in the top-level directory. | ||||
| @@ -277,7 +277,7 @@ However, Clients must not assume any particular: | ||||
| - Amount of errors generated by a command, that is, new errors can be added | ||||
|   to any existing command in newer versions of the Server | ||||
|  | ||||
| Any command or field name beginning with "x-" is deemed experimental, | ||||
| Any command or member name beginning with "x-" is deemed experimental, | ||||
| and may be withdrawn or changed in an incompatible manner in a future | ||||
| release. | ||||
|  | ||||
|   | ||||
| @@ -172,9 +172,6 @@ source tree.  It may not be as powerful as platform-specific or third-party | ||||
| trace backends but it is portable.  This is the recommended trace backend | ||||
| unless you have specific needs for more advanced backends. | ||||
|  | ||||
| The "simple" backend currently does not capture string arguments, it simply | ||||
| records the char* pointer value instead of the string that is pointed to. | ||||
|  | ||||
| === Ftrace === | ||||
|  | ||||
| The "ftrace" backend writes trace data to ftrace marker. This effectively | ||||
| @@ -347,3 +344,44 @@ This will immediately call: | ||||
| and will generate the TCG code to call: | ||||
|  | ||||
|     void trace_foo(uint8_t a1, uint32_t a2); | ||||
|  | ||||
| === "vcpu" === | ||||
|  | ||||
| Identifies events that trace vCPU-specific information. It implicitly adds a | ||||
| "CPUState*" argument, and extends the tracing print format to show the vCPU | ||||
| information. If used together with the "tcg" property, it adds a second | ||||
| "TCGv_env" argument that must point to the per-target global TCG register that | ||||
| points to the vCPU when guest code is executed (usually the "cpu_env" variable). | ||||
|  | ||||
| The following example events: | ||||
|  | ||||
|     foo(uint32_t a) "a=%x" | ||||
|     vcpu bar(uint32_t a) "a=%x" | ||||
|     tcg vcpu baz(uint32_t a) "a=%x", "a=%x" | ||||
|  | ||||
| Can be used as: | ||||
|  | ||||
|     #include "trace-tcg.h" | ||||
|      | ||||
|     CPUArchState *env; | ||||
|     TCGv_ptr cpu_env; | ||||
|      | ||||
|     void some_disassembly_func(...) | ||||
|     { | ||||
|         /* trace emitted at this point */ | ||||
|         trace_foo(0xd1); | ||||
|         /* trace emitted at this point */ | ||||
|         trace_bar(ENV_GET_CPU(env), 0xd2); | ||||
|         /* trace emitted at this point (env) and when guest code is executed (cpu_env) */ | ||||
|         trace_baz_tcg(ENV_GET_CPU(env), cpu_env, 0xd3); | ||||
|     } | ||||
|  | ||||
| If the translating vCPU has address 0xc1 and code is later executed by vCPU | ||||
| 0xc2, this would be an example output: | ||||
|  | ||||
|     // at guest code translation | ||||
|     foo a=0xd1 | ||||
|     bar cpu=0xc1 a=0xd2 | ||||
|     baz_trans cpu=0xc1 a=0xd3 | ||||
|     // at guest code execution | ||||
|     baz_exec cpu=0xc2 a=0xd3 | ||||
|   | ||||
| @@ -26,7 +26,6 @@ | ||||
| #include "hw/nvram/fw_cfg.h" | ||||
| #include "qemu/config-file.h" | ||||
| #include "qapi/opts-visitor.h" | ||||
| #include "qapi/dealloc-visitor.h" | ||||
| #include "qapi-visit.h" | ||||
| #include "qapi-event.h" | ||||
|  | ||||
| @@ -297,15 +296,7 @@ void acpi_table_add(const QemuOpts *opts, Error **errp) | ||||
| out: | ||||
|     g_free(blob); | ||||
|     g_strfreev(pathnames); | ||||
|  | ||||
|     if (hdrs != NULL) { | ||||
|         QapiDeallocVisitor *dv; | ||||
|  | ||||
|         dv = qapi_dealloc_visitor_new(); | ||||
|         visit_type_AcpiTableOptions(qapi_dealloc_get_visitor(dv), NULL, &hdrs, | ||||
|                                     NULL); | ||||
|         qapi_dealloc_visitor_cleanup(dv); | ||||
|     } | ||||
|     qapi_free_AcpiTableOptions(hdrs); | ||||
|  | ||||
|     error_propagate(errp, err); | ||||
| } | ||||
|   | ||||
| @@ -111,7 +111,7 @@ static void clipper_init(MachineState *machine) | ||||
|     } | ||||
|     size = load_elf(palcode_filename, cpu_alpha_superpage_to_phys, | ||||
|                     NULL, &palcode_entry, &palcode_low, &palcode_high, | ||||
|                     0, EM_ALPHA, 0); | ||||
|                     0, EM_ALPHA, 0, 0); | ||||
|     if (size < 0) { | ||||
|         error_report("could not load palcode '%s'", palcode_filename); | ||||
|         exit(1); | ||||
| @@ -131,7 +131,7 @@ static void clipper_init(MachineState *machine) | ||||
|  | ||||
|         size = load_elf(kernel_filename, cpu_alpha_superpage_to_phys, | ||||
|                         NULL, &kernel_entry, &kernel_low, &kernel_high, | ||||
|                         0, EM_ALPHA, 0); | ||||
|                         0, EM_ALPHA, 0, 0); | ||||
|         if (size < 0) { | ||||
|             error_report("could not load kernel '%s'", kernel_filename); | ||||
|             exit(1); | ||||
|   | ||||
| @@ -211,7 +211,7 @@ DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq, | ||||
|  | ||||
|     if (kernel_filename) { | ||||
|         image_size = load_elf(kernel_filename, NULL, NULL, &entry, &lowaddr, | ||||
|                               NULL, big_endian, EM_ARM, 1); | ||||
|                               NULL, big_endian, EM_ARM, 1, 0); | ||||
|         if (image_size < 0) { | ||||
|             image_size = load_image_targphys(kernel_filename, 0, mem_size); | ||||
|             lowaddr = 0; | ||||
|   | ||||
| @@ -518,9 +518,34 @@ static void do_cpu_reset(void *opaque) | ||||
|     cpu_reset(cs); | ||||
|     if (info) { | ||||
|         if (!info->is_linux) { | ||||
|             int i; | ||||
|             /* Jump to the entry point.  */ | ||||
|             uint64_t entry = info->entry; | ||||
|  | ||||
|             switch (info->endianness) { | ||||
|             case ARM_ENDIANNESS_LE: | ||||
|                 env->cp15.sctlr_el[1] &= ~SCTLR_E0E; | ||||
|                 for (i = 1; i < 4; ++i) { | ||||
|                     env->cp15.sctlr_el[i] &= ~SCTLR_EE; | ||||
|                 } | ||||
|                 env->uncached_cpsr &= ~CPSR_E; | ||||
|                 break; | ||||
|             case ARM_ENDIANNESS_BE8: | ||||
|                 env->cp15.sctlr_el[1] |= SCTLR_E0E; | ||||
|                 for (i = 1; i < 4; ++i) { | ||||
|                     env->cp15.sctlr_el[i] |= SCTLR_EE; | ||||
|                 } | ||||
|                 env->uncached_cpsr |= CPSR_E; | ||||
|                 break; | ||||
|             case ARM_ENDIANNESS_BE32: | ||||
|                 env->cp15.sctlr_el[1] |= SCTLR_B; | ||||
|                 break; | ||||
|             case ARM_ENDIANNESS_UNKNOWN: | ||||
|                 break; /* Board's decision */ | ||||
|             default: | ||||
|                 g_assert_not_reached(); | ||||
|             } | ||||
|  | ||||
|             if (!env->aarch64) { | ||||
|                 env->thumb = info->entry & 1; | ||||
|                 entry &= 0xfffffffe; | ||||
| @@ -638,6 +663,62 @@ static int do_arm_linux_init(Object *obj, void *opaque) | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry, | ||||
|                              uint64_t *lowaddr, uint64_t *highaddr, | ||||
|                              int elf_machine) | ||||
| { | ||||
|     bool elf_is64; | ||||
|     union { | ||||
|         Elf32_Ehdr h32; | ||||
|         Elf64_Ehdr h64; | ||||
|     } elf_header; | ||||
|     int data_swab = 0; | ||||
|     bool big_endian; | ||||
|     uint64_t ret = -1; | ||||
|     Error *err = NULL; | ||||
|  | ||||
|  | ||||
|     load_elf_hdr(info->kernel_filename, &elf_header, &elf_is64, &err); | ||||
|     if (err) { | ||||
|         return ret; | ||||
|     } | ||||
|  | ||||
|     if (elf_is64) { | ||||
|         big_endian = elf_header.h64.e_ident[EI_DATA] == ELFDATA2MSB; | ||||
|         info->endianness = big_endian ? ARM_ENDIANNESS_BE8 | ||||
|                                       : ARM_ENDIANNESS_LE; | ||||
|     } else { | ||||
|         big_endian = elf_header.h32.e_ident[EI_DATA] == ELFDATA2MSB; | ||||
|         if (big_endian) { | ||||
|             if (bswap32(elf_header.h32.e_flags) & EF_ARM_BE8) { | ||||
|                 info->endianness = ARM_ENDIANNESS_BE8; | ||||
|             } else { | ||||
|                 info->endianness = ARM_ENDIANNESS_BE32; | ||||
|                 /* In BE32, the CPU has a different view of the per-byte | ||||
|                  * address map than the rest of the system. BE32 ELF files | ||||
|                  * are organised such that they can be programmed through | ||||
|                  * the CPU's per-word byte-reversed view of the world. QEMU | ||||
|                  * however loads ELF files independently of the CPU. So | ||||
|                  * tell the ELF loader to byte reverse the data for us. | ||||
|                  */ | ||||
|                 data_swab = 2; | ||||
|             } | ||||
|         } else { | ||||
|             info->endianness = ARM_ENDIANNESS_LE; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     ret = load_elf(info->kernel_filename, NULL, NULL, | ||||
|                    pentry, lowaddr, highaddr, big_endian, elf_machine, | ||||
|                    1, data_swab); | ||||
|     if (ret <= 0) { | ||||
|         /* The header loaded but the image didn't */ | ||||
|         exit(1); | ||||
|     } | ||||
|  | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static void arm_load_kernel_notify(Notifier *notifier, void *data) | ||||
| { | ||||
|     CPUState *cs; | ||||
| @@ -647,7 +728,6 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data) | ||||
|     uint64_t elf_entry, elf_low_addr, elf_high_addr; | ||||
|     int elf_machine; | ||||
|     hwaddr entry, kernel_load_offset; | ||||
|     int big_endian; | ||||
|     static const ARMInsnFixup *primary_loader; | ||||
|     ArmLoadKernelNotifier *n = DO_UPCAST(ArmLoadKernelNotifier, | ||||
|                                          notifier, notifier); | ||||
| @@ -733,12 +813,6 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data) | ||||
|     if (info->nb_cpus == 0) | ||||
|         info->nb_cpus = 1; | ||||
|  | ||||
| #ifdef TARGET_WORDS_BIGENDIAN | ||||
|     big_endian = 1; | ||||
| #else | ||||
|     big_endian = 0; | ||||
| #endif | ||||
|  | ||||
|     /* We want to put the initrd far enough into RAM that when the | ||||
|      * kernel is uncompressed it will not clobber the initrd. However | ||||
|      * on boards without much RAM we must ensure that we still leave | ||||
| @@ -753,9 +827,8 @@ static void arm_load_kernel_notify(Notifier *notifier, void *data) | ||||
|         MIN(info->ram_size / 2, 128 * 1024 * 1024); | ||||
|  | ||||
|     /* Assume that raw images are linux kernels, and ELF images are not.  */ | ||||
|     kernel_size = load_elf(info->kernel_filename, NULL, NULL, &elf_entry, | ||||
|                            &elf_low_addr, &elf_high_addr, big_endian, | ||||
|                            elf_machine, 1); | ||||
|     kernel_size = arm_load_elf(info, &elf_entry, &elf_low_addr, | ||||
|                                &elf_high_addr, elf_machine); | ||||
|     if (kernel_size > 0 && have_dtb(info)) { | ||||
|         /* If there is still some room left at the base of RAM, try and put | ||||
|          * the DTB there like we do for images loaded with -bios or -pflash. | ||||
|   | ||||
							
								
								
									
										168
									
								
								hw/arm/virt.c
									
									
									
									
									
								
							
							
						
						
									
										168
									
								
								hw/arm/virt.c
									
									
									
									
									
								
							| @@ -73,6 +73,7 @@ typedef struct VirtBoardInfo { | ||||
|     uint32_t clock_phandle; | ||||
|     uint32_t gic_phandle; | ||||
|     uint32_t v2m_phandle; | ||||
|     bool using_psci; | ||||
| } VirtBoardInfo; | ||||
|  | ||||
| typedef struct { | ||||
| @@ -95,6 +96,23 @@ typedef struct { | ||||
| #define VIRT_MACHINE_CLASS(klass) \ | ||||
|     OBJECT_CLASS_CHECK(VirtMachineClass, klass, TYPE_VIRT_MACHINE) | ||||
|  | ||||
| /* RAM limit in GB. Since VIRT_MEM starts at the 1GB mark, this means | ||||
|  * RAM can go up to the 256GB mark, leaving 256GB of the physical | ||||
|  * address space unallocated and free for future use between 256G and 512G. | ||||
|  * If we need to provide more RAM to VMs in the future then we need to: | ||||
|  *  * allocate a second bank of RAM starting at 2TB and working up | ||||
|  *  * fix the DT and ACPI table generation code in QEMU to correctly | ||||
|  *    report two split lumps of RAM to the guest | ||||
|  *  * fix KVM in the host kernel to allow guests with >40 bit address spaces | ||||
|  * (We don't want to fill all the way up to 512GB with RAM because | ||||
|  * we might want it for non-RAM purposes later. Conversely it seems | ||||
|  * reasonable to assume that anybody configuring a VM with a quarter | ||||
|  * of a terabyte of RAM will be doing it on a host with more than a | ||||
|  * terabyte of physical address space.) | ||||
|  */ | ||||
| #define RAMLIMIT_GB 255 | ||||
| #define RAMLIMIT_BYTES (RAMLIMIT_GB * 1024ULL * 1024 * 1024) | ||||
|  | ||||
| /* Addresses and sizes of our components. | ||||
|  * 0..128MB is space for a flash device so we can run bootrom code such as UEFI. | ||||
|  * 128MB..256MB is used for miscellaneous device I/O. | ||||
| @@ -127,10 +145,11 @@ static const MemMapEntry a15memmap[] = { | ||||
|     [VIRT_MMIO] =               { 0x0a000000, 0x00000200 }, | ||||
|     /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */ | ||||
|     [VIRT_PLATFORM_BUS] =       { 0x0c000000, 0x02000000 }, | ||||
|     [VIRT_SECURE_MEM] =         { 0x0e000000, 0x01000000 }, | ||||
|     [VIRT_PCIE_MMIO] =          { 0x10000000, 0x2eff0000 }, | ||||
|     [VIRT_PCIE_PIO] =           { 0x3eff0000, 0x00010000 }, | ||||
|     [VIRT_PCIE_ECAM] =          { 0x3f000000, 0x01000000 }, | ||||
|     [VIRT_MEM] =                { 0x40000000, 30ULL * 1024 * 1024 * 1024 }, | ||||
|     [VIRT_MEM] =                { 0x40000000, RAMLIMIT_BYTES }, | ||||
|     /* Second PCIe window, 512GB wide at the 512GB boundary */ | ||||
|     [VIRT_PCIE_MMIO_HIGH] =   { 0x8000000000ULL, 0x8000000000ULL }, | ||||
| }; | ||||
| @@ -230,6 +249,10 @@ static void fdt_add_psci_node(const VirtBoardInfo *vbi) | ||||
|     void *fdt = vbi->fdt; | ||||
|     ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0)); | ||||
|  | ||||
|     if (!vbi->using_psci) { | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     qemu_fdt_add_subnode(fdt, "/psci"); | ||||
|     if (armcpu->psci_version == 2) { | ||||
|         const char comp[] = "arm,psci-0.2\0arm,psci"; | ||||
| @@ -341,7 +364,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi) | ||||
|         qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", | ||||
|                                     armcpu->dtb_compatible); | ||||
|  | ||||
|         if (vbi->smp_cpus > 1) { | ||||
|         if (vbi->using_psci && vbi->smp_cpus > 1) { | ||||
|             qemu_fdt_setprop_string(vbi->fdt, nodename, | ||||
|                                         "enable-method", "psci"); | ||||
|         } | ||||
| @@ -678,13 +701,15 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic) | ||||
| } | ||||
|  | ||||
| static void create_one_flash(const char *name, hwaddr flashbase, | ||||
|                              hwaddr flashsize) | ||||
|                              hwaddr flashsize, const char *file, | ||||
|                              MemoryRegion *sysmem) | ||||
| { | ||||
|     /* Create and map a single flash device. We use the same | ||||
|      * parameters as the flash devices on the Versatile Express board. | ||||
|      */ | ||||
|     DriveInfo *dinfo = drive_get_next(IF_PFLASH); | ||||
|     DeviceState *dev = qdev_create(NULL, "cfi.pflash01"); | ||||
|     SysBusDevice *sbd = SYS_BUS_DEVICE(dev); | ||||
|     const uint64_t sectorlength = 256 * 1024; | ||||
|  | ||||
|     if (dinfo) { | ||||
| @@ -704,19 +729,10 @@ static void create_one_flash(const char *name, hwaddr flashbase, | ||||
|     qdev_prop_set_string(dev, "name", name); | ||||
|     qdev_init_nofail(dev); | ||||
|  | ||||
|     sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, flashbase); | ||||
| } | ||||
|     memory_region_add_subregion(sysmem, flashbase, | ||||
|                                 sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); | ||||
|  | ||||
| static void create_flash(const VirtBoardInfo *vbi) | ||||
| { | ||||
|     /* Create two flash devices to fill the VIRT_FLASH space in the memmap. | ||||
|      * Any file passed via -bios goes in the first of these. | ||||
|      */ | ||||
|     hwaddr flashsize = vbi->memmap[VIRT_FLASH].size / 2; | ||||
|     hwaddr flashbase = vbi->memmap[VIRT_FLASH].base; | ||||
|     char *nodename; | ||||
|  | ||||
|     if (bios_name) { | ||||
|     if (file) { | ||||
|         char *fn; | ||||
|         int image_size; | ||||
|  | ||||
| @@ -726,30 +742,73 @@ static void create_flash(const VirtBoardInfo *vbi) | ||||
|                          "but you cannot use both options at once"); | ||||
|             exit(1); | ||||
|         } | ||||
|         fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); | ||||
|         fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, file); | ||||
|         if (!fn) { | ||||
|             error_report("Could not find ROM image '%s'", bios_name); | ||||
|             error_report("Could not find ROM image '%s'", file); | ||||
|             exit(1); | ||||
|         } | ||||
|         image_size = load_image_targphys(fn, flashbase, flashsize); | ||||
|         image_size = load_image_mr(fn, sysbus_mmio_get_region(sbd, 0)); | ||||
|         g_free(fn); | ||||
|         if (image_size < 0) { | ||||
|             error_report("Could not load ROM image '%s'", bios_name); | ||||
|             error_report("Could not load ROM image '%s'", file); | ||||
|             exit(1); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | ||||
|     create_one_flash("virt.flash0", flashbase, flashsize); | ||||
|     create_one_flash("virt.flash1", flashbase + flashsize, flashsize); | ||||
| static void create_flash(const VirtBoardInfo *vbi, | ||||
|                          MemoryRegion *sysmem, | ||||
|                          MemoryRegion *secure_sysmem) | ||||
| { | ||||
|     /* Create two flash devices to fill the VIRT_FLASH space in the memmap. | ||||
|      * Any file passed via -bios goes in the first of these. | ||||
|      * sysmem is the system memory space. secure_sysmem is the secure view | ||||
|      * of the system, and the first flash device should be made visible only | ||||
|      * there. The second flash device is visible to both secure and nonsecure. | ||||
|      * If sysmem == secure_sysmem this means there is no separate Secure | ||||
|      * address space and both flash devices are generally visible. | ||||
|      */ | ||||
|     hwaddr flashsize = vbi->memmap[VIRT_FLASH].size / 2; | ||||
|     hwaddr flashbase = vbi->memmap[VIRT_FLASH].base; | ||||
|     char *nodename; | ||||
|  | ||||
|     nodename = g_strdup_printf("/flash@%" PRIx64, flashbase); | ||||
|     qemu_fdt_add_subnode(vbi->fdt, nodename); | ||||
|     qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", "cfi-flash"); | ||||
|     qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg", | ||||
|                                  2, flashbase, 2, flashsize, | ||||
|                                  2, flashbase + flashsize, 2, flashsize); | ||||
|     qemu_fdt_setprop_cell(vbi->fdt, nodename, "bank-width", 4); | ||||
|     g_free(nodename); | ||||
|     create_one_flash("virt.flash0", flashbase, flashsize, | ||||
|                      bios_name, secure_sysmem); | ||||
|     create_one_flash("virt.flash1", flashbase + flashsize, flashsize, | ||||
|                      NULL, sysmem); | ||||
|  | ||||
|     if (sysmem == secure_sysmem) { | ||||
|         /* Report both flash devices as a single node in the DT */ | ||||
|         nodename = g_strdup_printf("/flash@%" PRIx64, flashbase); | ||||
|         qemu_fdt_add_subnode(vbi->fdt, nodename); | ||||
|         qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", "cfi-flash"); | ||||
|         qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg", | ||||
|                                      2, flashbase, 2, flashsize, | ||||
|                                      2, flashbase + flashsize, 2, flashsize); | ||||
|         qemu_fdt_setprop_cell(vbi->fdt, nodename, "bank-width", 4); | ||||
|         g_free(nodename); | ||||
|     } else { | ||||
|         /* Report the devices as separate nodes so we can mark one as | ||||
|          * only visible to the secure world. | ||||
|          */ | ||||
|         nodename = g_strdup_printf("/secflash@%" PRIx64, flashbase); | ||||
|         qemu_fdt_add_subnode(vbi->fdt, nodename); | ||||
|         qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", "cfi-flash"); | ||||
|         qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg", | ||||
|                                      2, flashbase, 2, flashsize); | ||||
|         qemu_fdt_setprop_cell(vbi->fdt, nodename, "bank-width", 4); | ||||
|         qemu_fdt_setprop_string(vbi->fdt, nodename, "status", "disabled"); | ||||
|         qemu_fdt_setprop_string(vbi->fdt, nodename, "secure-status", "okay"); | ||||
|         g_free(nodename); | ||||
|  | ||||
|         nodename = g_strdup_printf("/flash@%" PRIx64, flashbase); | ||||
|         qemu_fdt_add_subnode(vbi->fdt, nodename); | ||||
|         qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", "cfi-flash"); | ||||
|         qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg", | ||||
|                                      2, flashbase + flashsize, 2, flashsize); | ||||
|         qemu_fdt_setprop_cell(vbi->fdt, nodename, "bank-width", 4); | ||||
|         g_free(nodename); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void create_fw_cfg(const VirtBoardInfo *vbi, AddressSpace *as) | ||||
| @@ -960,6 +1019,27 @@ static void create_platform_bus(VirtBoardInfo *vbi, qemu_irq *pic) | ||||
|                                 sysbus_mmio_get_region(s, 0)); | ||||
| } | ||||
|  | ||||
| static void create_secure_ram(VirtBoardInfo *vbi, MemoryRegion *secure_sysmem) | ||||
| { | ||||
|     MemoryRegion *secram = g_new(MemoryRegion, 1); | ||||
|     char *nodename; | ||||
|     hwaddr base = vbi->memmap[VIRT_SECURE_MEM].base; | ||||
|     hwaddr size = vbi->memmap[VIRT_SECURE_MEM].size; | ||||
|  | ||||
|     memory_region_init_ram(secram, NULL, "virt.secure-ram", size, &error_fatal); | ||||
|     vmstate_register_ram_global(secram); | ||||
|     memory_region_add_subregion(secure_sysmem, base, secram); | ||||
|  | ||||
|     nodename = g_strdup_printf("/secram@%" PRIx64, base); | ||||
|     qemu_fdt_add_subnode(vbi->fdt, nodename); | ||||
|     qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "memory"); | ||||
|     qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg", 2, base, 2, size); | ||||
|     qemu_fdt_setprop_string(vbi->fdt, nodename, "status", "disabled"); | ||||
|     qemu_fdt_setprop_string(vbi->fdt, nodename, "secure-status", "okay"); | ||||
|  | ||||
|     g_free(nodename); | ||||
| } | ||||
|  | ||||
| static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size) | ||||
| { | ||||
|     const VirtBoardInfo *board = (const VirtBoardInfo *)binfo; | ||||
| @@ -1020,6 +1100,7 @@ static void machvirt_init(MachineState *machine) | ||||
|     VirtGuestInfoState *guest_info_state = g_malloc0(sizeof *guest_info_state); | ||||
|     VirtGuestInfo *guest_info = &guest_info_state->info; | ||||
|     char **cpustr; | ||||
|     bool firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0); | ||||
|  | ||||
|     if (!cpu_model) { | ||||
|         cpu_model = "cortex-a15"; | ||||
| @@ -1047,6 +1128,15 @@ static void machvirt_init(MachineState *machine) | ||||
|         exit(1); | ||||
|     } | ||||
|  | ||||
|     /* If we have an EL3 boot ROM then the assumption is that it will | ||||
|      * implement PSCI itself, so disable QEMU's internal implementation | ||||
|      * so it doesn't get in the way. Instead of starting secondary | ||||
|      * CPUs in PSCI powerdown state we will start them all running and | ||||
|      * let the boot ROM sort them out. | ||||
|      * The usual case is that we do use QEMU's PSCI implementation. | ||||
|      */ | ||||
|     vbi->using_psci = !(vms->secure && firmware_loaded); | ||||
|  | ||||
|     /* The maximum number of CPUs depends on the GIC version, or on how | ||||
|      * many redistributors we can fit into the memory map. | ||||
|      */ | ||||
| @@ -1066,7 +1156,7 @@ static void machvirt_init(MachineState *machine) | ||||
|     vbi->smp_cpus = smp_cpus; | ||||
|  | ||||
|     if (machine->ram_size > vbi->memmap[VIRT_MEM].size) { | ||||
|         error_report("mach-virt: cannot model more than 30GB RAM"); | ||||
|         error_report("mach-virt: cannot model more than %dGB RAM", RAMLIMIT_GB); | ||||
|         exit(1); | ||||
|     } | ||||
|  | ||||
| @@ -1114,12 +1204,15 @@ static void machvirt_init(MachineState *machine) | ||||
|             object_property_set_bool(cpuobj, false, "has_el3", NULL); | ||||
|         } | ||||
|  | ||||
|         object_property_set_int(cpuobj, QEMU_PSCI_CONDUIT_HVC, "psci-conduit", | ||||
|                                 NULL); | ||||
|         if (vbi->using_psci) { | ||||
|             object_property_set_int(cpuobj, QEMU_PSCI_CONDUIT_HVC, | ||||
|                                     "psci-conduit", NULL); | ||||
|  | ||||
|         /* Secondary CPUs start in PSCI powered-down state */ | ||||
|         if (n > 0) { | ||||
|             object_property_set_bool(cpuobj, true, "start-powered-off", NULL); | ||||
|             /* Secondary CPUs start in PSCI powered-down state */ | ||||
|             if (n > 0) { | ||||
|                 object_property_set_bool(cpuobj, true, | ||||
|                                          "start-powered-off", NULL); | ||||
|             } | ||||
|         } | ||||
|  | ||||
|         if (object_property_find(cpuobj, "reset-cbar", NULL)) { | ||||
| @@ -1145,13 +1238,14 @@ static void machvirt_init(MachineState *machine) | ||||
|                                          machine->ram_size); | ||||
|     memory_region_add_subregion(sysmem, vbi->memmap[VIRT_MEM].base, ram); | ||||
|  | ||||
|     create_flash(vbi); | ||||
|     create_flash(vbi, sysmem, secure_sysmem ? secure_sysmem : sysmem); | ||||
|  | ||||
|     create_gic(vbi, pic, gic_version, vms->secure); | ||||
|  | ||||
|     create_uart(vbi, pic, VIRT_UART, sysmem); | ||||
|  | ||||
|     if (vms->secure) { | ||||
|         create_secure_ram(vbi, secure_sysmem); | ||||
|         create_uart(vbi, pic, VIRT_SECURE_UART, secure_sysmem); | ||||
|     } | ||||
|  | ||||
| @@ -1187,7 +1281,7 @@ static void machvirt_init(MachineState *machine) | ||||
|     vbi->bootinfo.board_id = -1; | ||||
|     vbi->bootinfo.loader_start = vbi->memmap[VIRT_MEM].base; | ||||
|     vbi->bootinfo.get_dtb = machvirt_dtb; | ||||
|     vbi->bootinfo.firmware_loaded = bios_name || drive_get(IF_PFLASH, 0, 0); | ||||
|     vbi->bootinfo.firmware_loaded = firmware_loaded; | ||||
|     arm_load_kernel(ARM_CPU(first_cpu), &vbi->bootinfo); | ||||
|  | ||||
|     /* | ||||
|   | ||||
| @@ -842,14 +842,16 @@ static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src, | ||||
| { | ||||
|     ChannelState *s = (ChannelState *)dev; | ||||
|     int qcode, keycode; | ||||
|     InputKeyEvent *key; | ||||
|  | ||||
|     assert(evt->type == INPUT_EVENT_KIND_KEY); | ||||
|     qcode = qemu_input_key_value_to_qcode(evt->u.key->key); | ||||
|     key = evt->u.key; | ||||
|     qcode = qemu_input_key_value_to_qcode(key->key); | ||||
|     trace_escc_sunkbd_event_in(qcode, QKeyCode_lookup[qcode], | ||||
|                                evt->u.key->down); | ||||
|                                key->down); | ||||
|  | ||||
|     if (qcode == Q_KEY_CODE_CAPS_LOCK) { | ||||
|         if (evt->u.key->down) { | ||||
|         if (key->down) { | ||||
|             s->caps_lock_mode ^= 1; | ||||
|             if (s->caps_lock_mode == 2) { | ||||
|                 return; /* Drop second press */ | ||||
| @@ -863,7 +865,7 @@ static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src, | ||||
|     } | ||||
|  | ||||
|     if (qcode == Q_KEY_CODE_NUM_LOCK) { | ||||
|         if (evt->u.key->down) { | ||||
|         if (key->down) { | ||||
|             s->num_lock_mode ^= 1; | ||||
|             if (s->num_lock_mode == 2) { | ||||
|                 return; /* Drop second press */ | ||||
| @@ -877,7 +879,7 @@ static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src, | ||||
|     } | ||||
|  | ||||
|     keycode = qcode_to_keycode[qcode]; | ||||
|     if (!evt->u.key->down) { | ||||
|     if (!key->down) { | ||||
|         keycode |= 0x80; | ||||
|     } | ||||
|     trace_escc_sunkbd_event_out(keycode); | ||||
|   | ||||
| @@ -147,6 +147,28 @@ int load_image_targphys(const char *filename, | ||||
|     return size; | ||||
| } | ||||
|  | ||||
| int load_image_mr(const char *filename, MemoryRegion *mr) | ||||
| { | ||||
|     int size; | ||||
|  | ||||
|     if (!memory_access_is_direct(mr, false)) { | ||||
|         /* Can only load an image into RAM or ROM */ | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     size = get_image_size(filename); | ||||
|  | ||||
|     if (size > memory_region_size(mr)) { | ||||
|         return -1; | ||||
|     } | ||||
|     if (size > 0) { | ||||
|         if (rom_add_file_mr(filename, mr, -1) < 0) { | ||||
|             return -1; | ||||
|         } | ||||
|     } | ||||
|     return size; | ||||
| } | ||||
|  | ||||
| void pstrcpy_targphys(const char *name, hwaddr dest, int buf_size, | ||||
|                       const char *source) | ||||
| { | ||||
| @@ -332,10 +354,66 @@ const char *load_elf_strerror(int error) | ||||
|     } | ||||
| } | ||||
|  | ||||
| void load_elf_hdr(const char *filename, void *hdr, bool *is64, Error **errp) | ||||
| { | ||||
|     int fd; | ||||
|     uint8_t e_ident_local[EI_NIDENT]; | ||||
|     uint8_t *e_ident; | ||||
|     size_t hdr_size, off; | ||||
|     bool is64l; | ||||
|  | ||||
|     if (!hdr) { | ||||
|         hdr = e_ident_local; | ||||
|     } | ||||
|     e_ident = hdr; | ||||
|  | ||||
|     fd = open(filename, O_RDONLY | O_BINARY); | ||||
|     if (fd < 0) { | ||||
|         error_setg_errno(errp, errno, "Failed to open file: %s", filename); | ||||
|         return; | ||||
|     } | ||||
|     if (read(fd, hdr, EI_NIDENT) != EI_NIDENT) { | ||||
|         error_setg_errno(errp, errno, "Failed to read file: %s", filename); | ||||
|         goto fail; | ||||
|     } | ||||
|     if (e_ident[0] != ELFMAG0 || | ||||
|         e_ident[1] != ELFMAG1 || | ||||
|         e_ident[2] != ELFMAG2 || | ||||
|         e_ident[3] != ELFMAG3) { | ||||
|         error_setg(errp, "Bad ELF magic"); | ||||
|         goto fail; | ||||
|     } | ||||
|  | ||||
|     is64l = e_ident[EI_CLASS] == ELFCLASS64; | ||||
|     hdr_size = is64l ? sizeof(Elf64_Ehdr) : sizeof(Elf32_Ehdr); | ||||
|     if (is64) { | ||||
|         *is64 = is64l; | ||||
|     } | ||||
|  | ||||
|     off = EI_NIDENT; | ||||
|     while (hdr != e_ident_local && off < hdr_size) { | ||||
|         size_t br = read(fd, hdr + off, hdr_size - off); | ||||
|         switch (br) { | ||||
|         case 0: | ||||
|             error_setg(errp, "File too short: %s", filename); | ||||
|             goto fail; | ||||
|         case -1: | ||||
|             error_setg_errno(errp, errno, "Failed to read file: %s", | ||||
|                              filename); | ||||
|             goto fail; | ||||
|         } | ||||
|         off += br; | ||||
|     } | ||||
|  | ||||
| fail: | ||||
|     close(fd); | ||||
| } | ||||
|  | ||||
| /* return < 0 if error, otherwise the number of bytes loaded in memory */ | ||||
| int load_elf(const char *filename, uint64_t (*translate_fn)(void *, uint64_t), | ||||
|              void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, | ||||
|              uint64_t *highaddr, int big_endian, int elf_machine, int clear_lsb) | ||||
|              uint64_t *highaddr, int big_endian, int elf_machine, | ||||
|              int clear_lsb, int data_swab) | ||||
| { | ||||
|     int fd, data_order, target_data_order, must_swab, ret = ELF_LOAD_FAILED; | ||||
|     uint8_t e_ident[EI_NIDENT]; | ||||
| @@ -374,10 +452,12 @@ int load_elf(const char *filename, uint64_t (*translate_fn)(void *, uint64_t), | ||||
|     lseek(fd, 0, SEEK_SET); | ||||
|     if (e_ident[EI_CLASS] == ELFCLASS64) { | ||||
|         ret = load_elf64(filename, fd, translate_fn, translate_opaque, must_swab, | ||||
|                          pentry, lowaddr, highaddr, elf_machine, clear_lsb); | ||||
|                          pentry, lowaddr, highaddr, elf_machine, clear_lsb, | ||||
|                          data_swab); | ||||
|     } else { | ||||
|         ret = load_elf32(filename, fd, translate_fn, translate_opaque, must_swab, | ||||
|                          pentry, lowaddr, highaddr, elf_machine, clear_lsb); | ||||
|                          pentry, lowaddr, highaddr, elf_machine, clear_lsb, | ||||
|                          data_swab); | ||||
|     } | ||||
|  | ||||
|  fail: | ||||
| @@ -751,7 +831,7 @@ static void *rom_set_mr(Rom *rom, Object *owner, const char *name) | ||||
|  | ||||
| int rom_add_file(const char *file, const char *fw_dir, | ||||
|                  hwaddr addr, int32_t bootindex, | ||||
|                  bool option_rom) | ||||
|                  bool option_rom, MemoryRegion *mr) | ||||
| { | ||||
|     MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); | ||||
|     Rom *rom; | ||||
| @@ -818,7 +898,12 @@ int rom_add_file(const char *file, const char *fw_dir, | ||||
|  | ||||
|         fw_cfg_add_file(fw_cfg, fw_file_name, data, rom->romsize); | ||||
|     } else { | ||||
|         snprintf(devpath, sizeof(devpath), "/rom@" TARGET_FMT_plx, addr); | ||||
|         if (mr) { | ||||
|             rom->mr = mr; | ||||
|             snprintf(devpath, sizeof(devpath), "/rom@%s", file); | ||||
|         } else { | ||||
|             snprintf(devpath, sizeof(devpath), "/rom@" TARGET_FMT_plx, addr); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     add_boot_device_path(bootindex, NULL, devpath); | ||||
| @@ -892,12 +977,12 @@ int rom_add_elf_program(const char *name, void *data, size_t datasize, | ||||
|  | ||||
| int rom_add_vga(const char *file) | ||||
| { | ||||
|     return rom_add_file(file, "vgaroms", 0, -1, true); | ||||
|     return rom_add_file(file, "vgaroms", 0, -1, true, NULL); | ||||
| } | ||||
|  | ||||
| int rom_add_option(const char *file, int32_t bootindex) | ||||
| { | ||||
|     return rom_add_file(file, "genroms", 0, bootindex, true); | ||||
|     return rom_add_file(file, "genroms", 0, bootindex, true, NULL); | ||||
| } | ||||
|  | ||||
| static void rom_reset(void *unused) | ||||
|   | ||||
| @@ -312,6 +312,21 @@ static bool machine_get_suppress_vmdesc(Object *obj, Error **errp) | ||||
|     return ms->suppress_vmdesc; | ||||
| } | ||||
|  | ||||
| static void machine_set_enforce_config_section(Object *obj, bool value, | ||||
|                                              Error **errp) | ||||
| { | ||||
|     MachineState *ms = MACHINE(obj); | ||||
|  | ||||
|     ms->enforce_config_section = value; | ||||
| } | ||||
|  | ||||
| static bool machine_get_enforce_config_section(Object *obj, Error **errp) | ||||
| { | ||||
|     MachineState *ms = MACHINE(obj); | ||||
|  | ||||
|     return ms->enforce_config_section; | ||||
| } | ||||
|  | ||||
| static int error_on_sysbus_device(SysBusDevice *sbdev, void *opaque) | ||||
| { | ||||
|     error_report("Option '-device %s' cannot be handled by this machine", | ||||
| @@ -467,6 +482,12 @@ static void machine_initfn(Object *obj) | ||||
|     object_property_set_description(obj, "suppress-vmdesc", | ||||
|                                     "Set on to disable self-describing migration", | ||||
|                                     NULL); | ||||
|     object_property_add_bool(obj, "enforce-config-section", | ||||
|                              machine_get_enforce_config_section, | ||||
|                              machine_set_enforce_config_section, NULL); | ||||
|     object_property_set_description(obj, "enforce-config-section", | ||||
|                                     "Set on to enforce configuration section migration", | ||||
|                                     NULL); | ||||
|  | ||||
|     /* Register notifier when init is done for sysbus sanity checks */ | ||||
|     ms->sysbus_notifier.notify = machine_init_notify; | ||||
|   | ||||
| @@ -109,7 +109,7 @@ static void a15mp_priv_realize(DeviceState *dev, Error **errp) | ||||
|     /* Memory map (addresses are offsets from PERIPHBASE): | ||||
|      *  0x0000-0x0fff -- reserved | ||||
|      *  0x1000-0x1fff -- GIC Distributor | ||||
|      *  0x2000-0x2fff -- GIC CPU interface | ||||
|      *  0x2000-0x3fff -- GIC CPU interface | ||||
|      *  0x4000-0x4fff -- GIC virtual interface control (not modelled) | ||||
|      *  0x5000-0x5fff -- GIC virtual interface control (not modelled) | ||||
|      *  0x6000-0x7fff -- GIC virtual CPU interface (not modelled) | ||||
|   | ||||
| @@ -73,7 +73,7 @@ void cris_load_image(CRISCPU *cpu, struct cris_load_info *li) | ||||
|     /* Boots a kernel elf binary, os/linux-2.6/vmlinux from the axis  | ||||
|        devboard SDK.  */ | ||||
|     image_size = load_elf(li->image_filename, translate_kernel_address, NULL, | ||||
|                           &entry, NULL, &high, 0, EM_CRIS, 0); | ||||
|                           &entry, NULL, &high, 0, EM_CRIS, 0, 0); | ||||
|     li->entry = entry; | ||||
|     if (image_size < 0) { | ||||
|         /* Takes a kimage from the axis devboard SDK.  */ | ||||
|   | ||||
| @@ -276,14 +276,14 @@ static bool blit_region_is_unsafe(struct CirrusVGAState *s, | ||||
|             + ((int64_t)s->cirrus_blt_height-1) * pitch; | ||||
|         int32_t max = addr | ||||
|             + s->cirrus_blt_width; | ||||
|         if (min < 0 || max >= s->vga.vram_size) { | ||||
|         if (min < 0 || max > s->vga.vram_size) { | ||||
|             return true; | ||||
|         } | ||||
|     } else { | ||||
|         int64_t max = addr | ||||
|             + ((int64_t)s->cirrus_blt_height-1) * pitch | ||||
|             + s->cirrus_blt_width; | ||||
|         if (max >= s->vga.vram_size) { | ||||
|         if (max > s->vga.vram_size) { | ||||
|             return true; | ||||
|         } | ||||
|     } | ||||
|   | ||||
| @@ -1156,7 +1156,9 @@ static void qxl_soft_reset(PCIQXLDevice *d) | ||||
|     trace_qxl_soft_reset(d->id); | ||||
|     qxl_check_state(d); | ||||
|     qxl_clear_guest_bug(d); | ||||
|     qemu_mutex_lock(&d->async_lock); | ||||
|     d->current_async = QXL_UNDEFINED_IO; | ||||
|     qemu_mutex_unlock(&d->async_lock); | ||||
|  | ||||
|     if (d->id == 0) { | ||||
|         qxl_enter_vga_mode(d); | ||||
|   | ||||
| @@ -196,7 +196,8 @@ int load_multiboot(FWCfgState *fw_cfg, | ||||
|         } | ||||
|  | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, | ||||
|                                &elf_low, &elf_high, 0, I386_ELF_MACHINE, 0); | ||||
|                                &elf_low, &elf_high, 0, I386_ELF_MACHINE, | ||||
|                                0, 0); | ||||
|         if (kernel_size < 0) { | ||||
|             fprintf(stderr, "Error while loading elf kernel\n"); | ||||
|             exit(1); | ||||
|   | ||||
| @@ -116,37 +116,42 @@ static void hid_pointer_event(DeviceState *dev, QemuConsole *src, | ||||
|     }; | ||||
|     HIDState *hs = (HIDState *)dev; | ||||
|     HIDPointerEvent *e; | ||||
|     InputMoveEvent *move; | ||||
|     InputBtnEvent *btn; | ||||
|  | ||||
|     assert(hs->n < QUEUE_LENGTH); | ||||
|     e = &hs->ptr.queue[(hs->head + hs->n) & QUEUE_MASK]; | ||||
|  | ||||
|     switch (evt->type) { | ||||
|     case INPUT_EVENT_KIND_REL: | ||||
|         if (evt->u.rel->axis == INPUT_AXIS_X) { | ||||
|             e->xdx += evt->u.rel->value; | ||||
|         } else if (evt->u.rel->axis == INPUT_AXIS_Y) { | ||||
|             e->ydy += evt->u.rel->value; | ||||
|         move = evt->u.rel; | ||||
|         if (move->axis == INPUT_AXIS_X) { | ||||
|             e->xdx += move->value; | ||||
|         } else if (move->axis == INPUT_AXIS_Y) { | ||||
|             e->ydy += move->value; | ||||
|         } | ||||
|         break; | ||||
|  | ||||
|     case INPUT_EVENT_KIND_ABS: | ||||
|         if (evt->u.rel->axis == INPUT_AXIS_X) { | ||||
|             e->xdx = evt->u.rel->value; | ||||
|         } else if (evt->u.rel->axis == INPUT_AXIS_Y) { | ||||
|             e->ydy = evt->u.rel->value; | ||||
|         move = evt->u.abs; | ||||
|         if (move->axis == INPUT_AXIS_X) { | ||||
|             e->xdx = move->value; | ||||
|         } else if (move->axis == INPUT_AXIS_Y) { | ||||
|             e->ydy = move->value; | ||||
|         } | ||||
|         break; | ||||
|  | ||||
|     case INPUT_EVENT_KIND_BTN: | ||||
|         if (evt->u.btn->down) { | ||||
|             e->buttons_state |= bmap[evt->u.btn->button]; | ||||
|             if (evt->u.btn->button == INPUT_BUTTON_WHEELUP) { | ||||
|         btn = evt->u.btn; | ||||
|         if (btn->down) { | ||||
|             e->buttons_state |= bmap[btn->button]; | ||||
|             if (btn->button == INPUT_BUTTON_WHEEL_UP) { | ||||
|                 e->dz--; | ||||
|             } else if (evt->u.btn->button == INPUT_BUTTON_WHEELDOWN) { | ||||
|             } else if (btn->button == INPUT_BUTTON_WHEEL_DOWN) { | ||||
|                 e->dz++; | ||||
|             } | ||||
|         } else { | ||||
|             e->buttons_state &= ~bmap[evt->u.btn->button]; | ||||
|             e->buttons_state &= ~bmap[btn->button]; | ||||
|         } | ||||
|         break; | ||||
|  | ||||
| @@ -223,9 +228,10 @@ static void hid_keyboard_event(DeviceState *dev, QemuConsole *src, | ||||
|     HIDState *hs = (HIDState *)dev; | ||||
|     int scancodes[3], i, count; | ||||
|     int slot; | ||||
|     InputKeyEvent *key = evt->u.key; | ||||
|  | ||||
|     count = qemu_input_key_value_to_scancode(evt->u.key->key, | ||||
|                                              evt->u.key->down, | ||||
|     count = qemu_input_key_value_to_scancode(key->key, | ||||
|                                              key->down, | ||||
|                                              scancodes); | ||||
|     if (hs->n + count > QUEUE_LENGTH) { | ||||
|         fprintf(stderr, "usb-kbd: warning: key event queue full\n"); | ||||
|   | ||||
| @@ -182,10 +182,11 @@ static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src, | ||||
| { | ||||
|     PS2KbdState *s = (PS2KbdState *)dev; | ||||
|     int scancodes[3], i, count; | ||||
|     InputKeyEvent *key = evt->u.key; | ||||
|  | ||||
|     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); | ||||
|     count = qemu_input_key_value_to_scancode(evt->u.key->key, | ||||
|                                              evt->u.key->down, | ||||
|     count = qemu_input_key_value_to_scancode(key->key, | ||||
|                                              key->down, | ||||
|                                              scancodes); | ||||
|     for (i = 0; i < count; i++) { | ||||
|         ps2_put_keycode(s, scancodes[i]); | ||||
| @@ -389,6 +390,8 @@ static void ps2_mouse_event(DeviceState *dev, QemuConsole *src, | ||||
|         [INPUT_BUTTON_RIGHT]  = MOUSE_EVENT_RBUTTON, | ||||
|     }; | ||||
|     PS2MouseState *s = (PS2MouseState *)dev; | ||||
|     InputMoveEvent *move; | ||||
|     InputBtnEvent *btn; | ||||
|  | ||||
|     /* check if deltas are recorded when disabled */ | ||||
|     if (!(s->mouse_status & MOUSE_STATUS_ENABLED)) | ||||
| @@ -396,23 +399,25 @@ static void ps2_mouse_event(DeviceState *dev, QemuConsole *src, | ||||
|  | ||||
|     switch (evt->type) { | ||||
|     case INPUT_EVENT_KIND_REL: | ||||
|         if (evt->u.rel->axis == INPUT_AXIS_X) { | ||||
|             s->mouse_dx += evt->u.rel->value; | ||||
|         } else if (evt->u.rel->axis == INPUT_AXIS_Y) { | ||||
|             s->mouse_dy -= evt->u.rel->value; | ||||
|         move = evt->u.rel; | ||||
|         if (move->axis == INPUT_AXIS_X) { | ||||
|             s->mouse_dx += move->value; | ||||
|         } else if (move->axis == INPUT_AXIS_Y) { | ||||
|             s->mouse_dy -= move->value; | ||||
|         } | ||||
|         break; | ||||
|  | ||||
|     case INPUT_EVENT_KIND_BTN: | ||||
|         if (evt->u.btn->down) { | ||||
|             s->mouse_buttons |= bmap[evt->u.btn->button]; | ||||
|             if (evt->u.btn->button == INPUT_BUTTON_WHEELUP) { | ||||
|         btn = evt->u.btn; | ||||
|         if (btn->down) { | ||||
|             s->mouse_buttons |= bmap[btn->button]; | ||||
|             if (btn->button == INPUT_BUTTON_WHEEL_UP) { | ||||
|                 s->mouse_dz--; | ||||
|             } else if (evt->u.btn->button == INPUT_BUTTON_WHEELDOWN) { | ||||
|             } else if (btn->button == INPUT_BUTTON_WHEEL_DOWN) { | ||||
|                 s->mouse_dz++; | ||||
|             } | ||||
|         } else { | ||||
|             s->mouse_buttons &= ~bmap[evt->u.btn->button]; | ||||
|             s->mouse_buttons &= ~bmap[btn->button]; | ||||
|         } | ||||
|         break; | ||||
|  | ||||
|   | ||||
| @@ -143,8 +143,8 @@ static const unsigned int keymap_button[INPUT_BUTTON__MAX] = { | ||||
|     [INPUT_BUTTON_LEFT]              = BTN_LEFT, | ||||
|     [INPUT_BUTTON_RIGHT]             = BTN_RIGHT, | ||||
|     [INPUT_BUTTON_MIDDLE]            = BTN_MIDDLE, | ||||
|     [INPUT_BUTTON_WHEELUP]           = BTN_GEAR_UP, | ||||
|     [INPUT_BUTTON_WHEELDOWN]         = BTN_GEAR_DOWN, | ||||
|     [INPUT_BUTTON_WHEEL_UP]          = BTN_GEAR_UP, | ||||
|     [INPUT_BUTTON_WHEEL_DOWN]        = BTN_GEAR_DOWN, | ||||
| }; | ||||
|  | ||||
| static const unsigned int axismap_rel[INPUT_AXIS__MAX] = { | ||||
| @@ -191,46 +191,53 @@ static void virtio_input_handle_event(DeviceState *dev, QemuConsole *src, | ||||
|     VirtIOInput *vinput = VIRTIO_INPUT(dev); | ||||
|     virtio_input_event event; | ||||
|     int qcode; | ||||
|     InputKeyEvent *key; | ||||
|     InputMoveEvent *move; | ||||
|     InputBtnEvent *btn; | ||||
|  | ||||
|     switch (evt->type) { | ||||
|     case INPUT_EVENT_KIND_KEY: | ||||
|         qcode = qemu_input_key_value_to_qcode(evt->u.key->key); | ||||
|         key = evt->u.key; | ||||
|         qcode = qemu_input_key_value_to_qcode(key->key); | ||||
|         if (qcode && keymap_qcode[qcode]) { | ||||
|             event.type  = cpu_to_le16(EV_KEY); | ||||
|             event.code  = cpu_to_le16(keymap_qcode[qcode]); | ||||
|             event.value = cpu_to_le32(evt->u.key->down ? 1 : 0); | ||||
|             event.value = cpu_to_le32(key->down ? 1 : 0); | ||||
|             virtio_input_send(vinput, &event); | ||||
|         } else { | ||||
|             if (evt->u.key->down) { | ||||
|             if (key->down) { | ||||
|                 fprintf(stderr, "%s: unmapped key: %d [%s]\n", __func__, | ||||
|                         qcode, QKeyCode_lookup[qcode]); | ||||
|             } | ||||
|         } | ||||
|         break; | ||||
|     case INPUT_EVENT_KIND_BTN: | ||||
|         if (keymap_button[evt->u.btn->button]) { | ||||
|         btn = evt->u.btn; | ||||
|         if (keymap_button[btn->button]) { | ||||
|             event.type  = cpu_to_le16(EV_KEY); | ||||
|             event.code  = cpu_to_le16(keymap_button[evt->u.btn->button]); | ||||
|             event.value = cpu_to_le32(evt->u.btn->down ? 1 : 0); | ||||
|             event.code  = cpu_to_le16(keymap_button[btn->button]); | ||||
|             event.value = cpu_to_le32(btn->down ? 1 : 0); | ||||
|             virtio_input_send(vinput, &event); | ||||
|         } else { | ||||
|             if (evt->u.btn->down) { | ||||
|             if (btn->down) { | ||||
|                 fprintf(stderr, "%s: unmapped button: %d [%s]\n", __func__, | ||||
|                         evt->u.btn->button, | ||||
|                         InputButton_lookup[evt->u.btn->button]); | ||||
|                         btn->button, | ||||
|                         InputButton_lookup[btn->button]); | ||||
|             } | ||||
|         } | ||||
|         break; | ||||
|     case INPUT_EVENT_KIND_REL: | ||||
|         move = evt->u.rel; | ||||
|         event.type  = cpu_to_le16(EV_REL); | ||||
|         event.code  = cpu_to_le16(axismap_rel[evt->u.rel->axis]); | ||||
|         event.value = cpu_to_le32(evt->u.rel->value); | ||||
|         event.code  = cpu_to_le16(axismap_rel[move->axis]); | ||||
|         event.value = cpu_to_le32(move->value); | ||||
|         virtio_input_send(vinput, &event); | ||||
|         break; | ||||
|     case INPUT_EVENT_KIND_ABS: | ||||
|         move = evt->u.abs; | ||||
|         event.type  = cpu_to_le16(EV_ABS); | ||||
|         event.code  = cpu_to_le16(axismap_abs[evt->u.abs->axis]); | ||||
|         event.value = cpu_to_le32(evt->u.abs->value); | ||||
|         event.code  = cpu_to_le16(axismap_abs[move->axis]); | ||||
|         event.value = cpu_to_le32(move->value); | ||||
|         virtio_input_send(vinput, &event); | ||||
|         break; | ||||
|     default: | ||||
|   | ||||
| @@ -500,6 +500,41 @@ static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs) | ||||
|     } | ||||
| } | ||||
|  | ||||
| /* Return true if we should split priority drop and interrupt deactivation, | ||||
|  * ie whether the relevant EOIMode bit is set. | ||||
|  */ | ||||
| static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs) | ||||
| { | ||||
|     if (s->revision != 2) { | ||||
|         /* Before GICv2 prio-drop and deactivate are not separable */ | ||||
|         return false; | ||||
|     } | ||||
|     if (s->security_extn && !attrs.secure) { | ||||
|         return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS; | ||||
|     } | ||||
|     return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE; | ||||
| } | ||||
|  | ||||
| static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) | ||||
| { | ||||
|     int cm = 1 << cpu; | ||||
|     int group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm); | ||||
|  | ||||
|     if (!gic_eoi_split(s, cpu, attrs)) { | ||||
|         /* This is UNPREDICTABLE; we choose to ignore it */ | ||||
|         qemu_log_mask(LOG_GUEST_ERROR, | ||||
|                       "gic_deactivate_irq: GICC_DIR write when EOIMode clear"); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     if (s->security_extn && !attrs.secure && !group) { | ||||
|         DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     GIC_CLEAR_ACTIVE(irq, cm); | ||||
| } | ||||
|  | ||||
| void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) | ||||
| { | ||||
|     int cm = 1 << cpu; | ||||
| @@ -544,7 +579,11 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs) | ||||
|      */ | ||||
|  | ||||
|     gic_drop_prio(s, cpu, group); | ||||
|     GIC_CLEAR_ACTIVE(irq, cm); | ||||
|  | ||||
|     /* In GICv2 the guest can choose to split priority-drop and deactivate */ | ||||
|     if (!gic_eoi_split(s, cpu, attrs)) { | ||||
|         GIC_CLEAR_ACTIVE(irq, cm); | ||||
|     } | ||||
|     gic_update(s); | ||||
| } | ||||
|  | ||||
| @@ -1210,6 +1249,10 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset, | ||||
|         s->nsapr[regno][cpu] = value; | ||||
|         break; | ||||
|     } | ||||
|     case 0x1000: | ||||
|         /* GICC_DIR */ | ||||
|         gic_deactivate_irq(s, cpu, value & 0x3ff, attrs); | ||||
|         break; | ||||
|     default: | ||||
|         qemu_log_mask(LOG_GUEST_ERROR, | ||||
|                       "gic_cpu_write: Bad offset %x\n", (int)offset); | ||||
|   | ||||
| @@ -121,7 +121,7 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler, | ||||
|          * neither it can use KVM. | ||||
|          */ | ||||
|         memory_region_init_io(&s->cpuiomem[0], OBJECT(s), ops ? &ops[1] : NULL, | ||||
|                               s, "gic_cpu", s->revision == 2 ? 0x1000 : 0x100); | ||||
|                               s, "gic_cpu", s->revision == 2 ? 0x2000 : 0x100); | ||||
|         sysbus_init_mmio(sbd, &s->cpuiomem[0]); | ||||
|     } | ||||
| } | ||||
|   | ||||
| @@ -712,7 +712,7 @@ static int ics_find_free_block(ICSState *ics, int num, int alignnum) | ||||
|     return -1; | ||||
| } | ||||
|  | ||||
| int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) | ||||
| int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp) | ||||
| { | ||||
|     ICSState *ics = &icp->ics[src]; | ||||
|     int irq; | ||||
| @@ -720,14 +720,14 @@ int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) | ||||
|     if (irq_hint) { | ||||
|         assert(src == xics_find_source(icp, irq_hint)); | ||||
|         if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) { | ||||
|             trace_xics_alloc_failed_hint(src, irq_hint); | ||||
|             error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint); | ||||
|             return -1; | ||||
|         } | ||||
|         irq = irq_hint; | ||||
|     } else { | ||||
|         irq = ics_find_free_block(ics, 1, 1); | ||||
|         if (irq < 0) { | ||||
|             trace_xics_alloc_failed_no_left(src); | ||||
|             error_setg(errp, "can't allocate IRQ: no IRQ left"); | ||||
|             return -1; | ||||
|         } | ||||
|         irq += ics->offset; | ||||
| @@ -743,7 +743,8 @@ int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi) | ||||
|  * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block. | ||||
|  * If align==true, aligns the first IRQ number to num. | ||||
|  */ | ||||
| int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align) | ||||
| int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align, | ||||
|                      Error **errp) | ||||
| { | ||||
|     int i, first = -1; | ||||
|     ICSState *ics = &icp->ics[src]; | ||||
| @@ -763,6 +764,10 @@ int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align) | ||||
|     } else { | ||||
|         first = ics_find_free_block(ics, num, 1); | ||||
|     } | ||||
|     if (first < 0) { | ||||
|         error_setg(errp, "can't find a free %d-IRQ block", num); | ||||
|         return -1; | ||||
|     } | ||||
|  | ||||
|     if (first >= 0) { | ||||
|         for (i = first; i < first + num; ++i) { | ||||
|   | ||||
| @@ -143,7 +143,7 @@ static void lm32_evr_init(MachineState *machine) | ||||
|         int kernel_size; | ||||
|  | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL, | ||||
|                                1, EM_LATTICEMICO32, 0); | ||||
|                                1, EM_LATTICEMICO32, 0, 0); | ||||
|         reset_info->bootstrap_pc = entry; | ||||
|  | ||||
|         if (kernel_size < 0) { | ||||
| @@ -245,7 +245,7 @@ static void lm32_uclinux_init(MachineState *machine) | ||||
|         int kernel_size; | ||||
|  | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL, | ||||
|                                1, EM_LATTICEMICO32, 0); | ||||
|                                1, EM_LATTICEMICO32, 0, 0); | ||||
|         reset_info->bootstrap_pc = entry; | ||||
|  | ||||
|         if (kernel_size < 0) { | ||||
|   | ||||
| @@ -177,7 +177,7 @@ milkymist_init(MachineState *machine) | ||||
|  | ||||
|         /* Boots a kernel elf binary.  */ | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL, | ||||
|                                1, EM_LATTICEMICO32, 0); | ||||
|                                1, EM_LATTICEMICO32, 0, 0); | ||||
|         reset_info->bootstrap_pc = entry; | ||||
|  | ||||
|         if (kernel_size < 0) { | ||||
|   | ||||
| @@ -73,7 +73,7 @@ static void an5206_init(MachineState *machine) | ||||
|     } | ||||
|  | ||||
|     kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, | ||||
|                            NULL, NULL, 1, EM_68K, 0); | ||||
|                            NULL, NULL, 1, EM_68K, 0, 0); | ||||
|     entry = elf_entry; | ||||
|     if (kernel_size < 0) { | ||||
|         kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL, | ||||
|   | ||||
| @@ -50,7 +50,7 @@ static void dummy_m68k_init(MachineState *machine) | ||||
|     /* Load kernel.  */ | ||||
|     if (kernel_filename) { | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, | ||||
|                                NULL, NULL, 1, EM_68K, 0); | ||||
|                                NULL, NULL, 1, EM_68K, 0, 0); | ||||
|         entry = elf_entry; | ||||
|         if (kernel_size < 0) { | ||||
|             kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL, | ||||
|   | ||||
| @@ -276,7 +276,7 @@ static void mcf5208evb_init(MachineState *machine) | ||||
|     } | ||||
|  | ||||
|     kernel_size = load_elf(kernel_filename, NULL, NULL, &elf_entry, | ||||
|                            NULL, NULL, 1, EM_68K, 0); | ||||
|                            NULL, NULL, 1, EM_68K, 0, 0); | ||||
|     entry = elf_entry; | ||||
|     if (kernel_size < 0) { | ||||
|         kernel_size = load_uimage(kernel_filename, &entry, NULL, NULL, | ||||
|   | ||||
| @@ -142,12 +142,12 @@ void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, | ||||
|         /* Boots a kernel elf binary.  */ | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, | ||||
|                                &entry, &low, &high, | ||||
|                                big_endian, EM_MICROBLAZE, 0); | ||||
|                                big_endian, EM_MICROBLAZE, 0, 0); | ||||
|         base32 = entry; | ||||
|         if (base32 == 0xc0000000) { | ||||
|             kernel_size = load_elf(kernel_filename, translate_kernel_address, | ||||
|                                    NULL, &entry, NULL, NULL, | ||||
|                                    big_endian, EM_MICROBLAZE, 0); | ||||
|                                    big_endian, EM_MICROBLAZE, 0, 0); | ||||
|         } | ||||
|         /* Always boot into physical ram.  */ | ||||
|         boot_info.bootstrap_pc = (uint32_t)entry; | ||||
|   | ||||
| @@ -117,7 +117,7 @@ static int64_t load_kernel (CPUMIPSState *env) | ||||
|  | ||||
|     if (load_elf(loaderparams.kernel_filename, cpu_mips_kseg0_to_phys, NULL, | ||||
|                  (uint64_t *)&kernel_entry, (uint64_t *)&kernel_low, | ||||
|                  (uint64_t *)&kernel_high, 0, EM_MIPS, 1) < 0) { | ||||
|                  (uint64_t *)&kernel_high, 0, EM_MIPS, 1, 0) < 0) { | ||||
|         fprintf(stderr, "qemu: could not load kernel '%s'\n", | ||||
|                 loaderparams.kernel_filename); | ||||
|         exit(1); | ||||
|   | ||||
| @@ -796,7 +796,7 @@ static int64_t load_kernel (void) | ||||
|  | ||||
|     if (load_elf(loaderparams.kernel_filename, cpu_mips_kseg0_to_phys, NULL, | ||||
|                  (uint64_t *)&kernel_entry, NULL, (uint64_t *)&kernel_high, | ||||
|                  big_endian, EM_MIPS, 1) < 0) { | ||||
|                  big_endian, EM_MIPS, 1, 0) < 0) { | ||||
|         fprintf(stderr, "qemu: could not load kernel '%s'\n", | ||||
|                 loaderparams.kernel_filename); | ||||
|         exit(1); | ||||
|   | ||||
| @@ -70,7 +70,7 @@ static int64_t load_kernel(void) | ||||
|     kernel_size = load_elf(loaderparams.kernel_filename, cpu_mips_kseg0_to_phys, | ||||
|                            NULL, (uint64_t *)&entry, NULL, | ||||
|                            (uint64_t *)&kernel_high, big_endian, | ||||
|                            EM_MIPS, 1); | ||||
|                            EM_MIPS, 1, 0); | ||||
|     if (kernel_size >= 0) { | ||||
|         if ((entry & ~0x7fffffffULL) == 0x80000000) | ||||
|             entry = (int32_t)entry; | ||||
|   | ||||
| @@ -88,7 +88,7 @@ static int64_t load_kernel(void) | ||||
|     kernel_size = load_elf(loaderparams.kernel_filename, cpu_mips_kseg0_to_phys, | ||||
|                            NULL, (uint64_t *)&entry, NULL, | ||||
|                            (uint64_t *)&kernel_high, big_endian, | ||||
|                            EM_MIPS, 1); | ||||
|                            EM_MIPS, 1, 0); | ||||
|     if (kernel_size >= 0) { | ||||
|         if ((entry & ~0x7fffffffULL) == 0x80000000) | ||||
|             entry = (int32_t)entry; | ||||
|   | ||||
| @@ -98,7 +98,7 @@ static void bcm2835_mbox_update(BCM2835MboxState *s) | ||||
|      */ | ||||
|     for (n = 0; n < MBOX_CHAN_COUNT; n++) { | ||||
|         while (s->available[n] && !(s->mbox[0].status & ARM_MS_FULL)) { | ||||
|             value = ldl_phys(&s->mbox_as, n << MBOX_AS_CHAN_SHIFT); | ||||
|             value = ldl_le_phys(&s->mbox_as, n << MBOX_AS_CHAN_SHIFT); | ||||
|             assert(value != MBOX_INVALID_DATA); /* Pending interrupt but no data */ | ||||
|             mbox_push(&s->mbox[0], value); | ||||
|         } | ||||
| @@ -207,12 +207,12 @@ static void bcm2835_mbox_write(void *opaque, hwaddr offset, | ||||
|             ch = value & 0xf; | ||||
|             if (ch < MBOX_CHAN_COUNT) { | ||||
|                 childaddr = ch << MBOX_AS_CHAN_SHIFT; | ||||
|                 if (ldl_phys(&s->mbox_as, childaddr + MBOX_AS_PENDING)) { | ||||
|                 if (ldl_le_phys(&s->mbox_as, childaddr + MBOX_AS_PENDING)) { | ||||
|                     /* Child busy, push delayed. Push it in the arm->vc mbox */ | ||||
|                     mbox_push(&s->mbox[1], value); | ||||
|                 } else { | ||||
|                     /* Push it directly to the child device */ | ||||
|                     stl_phys(&s->mbox_as, childaddr, value); | ||||
|                     stl_le_phys(&s->mbox_as, childaddr, value); | ||||
|                 } | ||||
|             } else { | ||||
|                 /* Invalid channel number */ | ||||
|   | ||||
| @@ -22,20 +22,20 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) | ||||
|  | ||||
|     s->addr = value; | ||||
|  | ||||
|     tot_len = ldl_phys(&s->dma_as, value); | ||||
|     tot_len = ldl_le_phys(&s->dma_as, value); | ||||
|  | ||||
|     /* @(addr + 4) : Buffer response code */ | ||||
|     value = s->addr + 8; | ||||
|     while (value + 8 <= s->addr + tot_len) { | ||||
|         tag = ldl_phys(&s->dma_as, value); | ||||
|         bufsize = ldl_phys(&s->dma_as, value + 4); | ||||
|         tag = ldl_le_phys(&s->dma_as, value); | ||||
|         bufsize = ldl_le_phys(&s->dma_as, value + 4); | ||||
|         /* @(value + 8) : Request/response indicator */ | ||||
|         resplen = 0; | ||||
|         switch (tag) { | ||||
|         case 0x00000000: /* End tag */ | ||||
|             break; | ||||
|         case 0x00000001: /* Get firmware revision */ | ||||
|             stl_phys(&s->dma_as, value + 12, 346337); | ||||
|             stl_le_phys(&s->dma_as, value + 12, 346337); | ||||
|             resplen = 4; | ||||
|             break; | ||||
|         case 0x00010001: /* Get board model */ | ||||
| @@ -44,7 +44,7 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) | ||||
|             resplen = 4; | ||||
|             break; | ||||
|         case 0x00010002: /* Get board revision */ | ||||
|             stl_phys(&s->dma_as, value + 12, s->board_rev); | ||||
|             stl_le_phys(&s->dma_as, value + 12, s->board_rev); | ||||
|             resplen = 4; | ||||
|             break; | ||||
|         case 0x00010003: /* Get board MAC address */ | ||||
| @@ -58,24 +58,24 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) | ||||
|             break; | ||||
|         case 0x00010005: /* Get ARM memory */ | ||||
|             /* base */ | ||||
|             stl_phys(&s->dma_as, value + 12, 0); | ||||
|             stl_le_phys(&s->dma_as, value + 12, 0); | ||||
|             /* size */ | ||||
|             stl_phys(&s->dma_as, value + 16, s->ram_size); | ||||
|             stl_le_phys(&s->dma_as, value + 16, s->ram_size); | ||||
|             resplen = 8; | ||||
|             break; | ||||
|         case 0x00028001: /* Set power state */ | ||||
|             /* Assume that whatever device they asked for exists, | ||||
|              * and we'll just claim we set it to the desired state | ||||
|              */ | ||||
|             tmp = ldl_phys(&s->dma_as, value + 16); | ||||
|             stl_phys(&s->dma_as, value + 16, (tmp & 1)); | ||||
|             tmp = ldl_le_phys(&s->dma_as, value + 16); | ||||
|             stl_le_phys(&s->dma_as, value + 16, (tmp & 1)); | ||||
|             resplen = 8; | ||||
|             break; | ||||
|  | ||||
|         /* Clocks */ | ||||
|  | ||||
|         case 0x00030001: /* Get clock state */ | ||||
|             stl_phys(&s->dma_as, value + 16, 0x1); | ||||
|             stl_le_phys(&s->dma_as, value + 16, 0x1); | ||||
|             resplen = 8; | ||||
|             break; | ||||
|  | ||||
| @@ -88,15 +88,15 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) | ||||
|         case 0x00030002: /* Get clock rate */ | ||||
|         case 0x00030004: /* Get max clock rate */ | ||||
|         case 0x00030007: /* Get min clock rate */ | ||||
|             switch (ldl_phys(&s->dma_as, value + 12)) { | ||||
|             switch (ldl_le_phys(&s->dma_as, value + 12)) { | ||||
|             case 1: /* EMMC */ | ||||
|                 stl_phys(&s->dma_as, value + 16, 50000000); | ||||
|                 stl_le_phys(&s->dma_as, value + 16, 50000000); | ||||
|                 break; | ||||
|             case 2: /* UART */ | ||||
|                 stl_phys(&s->dma_as, value + 16, 3000000); | ||||
|                 stl_le_phys(&s->dma_as, value + 16, 3000000); | ||||
|                 break; | ||||
|             default: | ||||
|                 stl_phys(&s->dma_as, value + 16, 700000000); | ||||
|                 stl_le_phys(&s->dma_as, value + 16, 700000000); | ||||
|                 break; | ||||
|             } | ||||
|             resplen = 8; | ||||
| @@ -113,19 +113,19 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) | ||||
|         /* Temperature */ | ||||
|  | ||||
|         case 0x00030006: /* Get temperature */ | ||||
|             stl_phys(&s->dma_as, value + 16, 25000); | ||||
|             stl_le_phys(&s->dma_as, value + 16, 25000); | ||||
|             resplen = 8; | ||||
|             break; | ||||
|  | ||||
|         case 0x0003000A: /* Get max temperature */ | ||||
|             stl_phys(&s->dma_as, value + 16, 99000); | ||||
|             stl_le_phys(&s->dma_as, value + 16, 99000); | ||||
|             resplen = 8; | ||||
|             break; | ||||
|  | ||||
|  | ||||
|         case 0x00060001: /* Get DMA channels */ | ||||
|             /* channels 2-5 */ | ||||
|             stl_phys(&s->dma_as, value + 12, 0x003C); | ||||
|             stl_le_phys(&s->dma_as, value + 12, 0x003C); | ||||
|             resplen = 4; | ||||
|             break; | ||||
|  | ||||
| @@ -143,12 +143,12 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) | ||||
|             break; | ||||
|         } | ||||
|  | ||||
|         stl_phys(&s->dma_as, value + 8, (1 << 31) | resplen); | ||||
|         stl_le_phys(&s->dma_as, value + 8, (1 << 31) | resplen); | ||||
|         value += bufsize + 12; | ||||
|     } | ||||
|  | ||||
|     /* Buffer response code */ | ||||
|     stl_phys(&s->dma_as, s->addr + 4, (1 << 31)); | ||||
|     stl_le_phys(&s->dma_as, s->addr + 4, (1 << 31)); | ||||
| } | ||||
|  | ||||
| static uint64_t bcm2835_property_read(void *opaque, hwaddr offset, | ||||
|   | ||||
| @@ -557,11 +557,13 @@ void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, | ||||
|  | ||||
|     DBDMA_DPRINTF("DBDMA_register_channel 0x%x\n", nchan); | ||||
|  | ||||
|     assert(rw); | ||||
|     assert(flush); | ||||
|  | ||||
|     ch->irq = irq; | ||||
|     ch->rw = rw; | ||||
|     ch->flush = flush; | ||||
|     ch->io.opaque = opaque; | ||||
|     ch->io.channel = ch; | ||||
| } | ||||
|  | ||||
| static void | ||||
| @@ -775,6 +777,20 @@ static void dbdma_reset(void *opaque) | ||||
|         memset(s->channels[i].regs, 0, DBDMA_SIZE); | ||||
| } | ||||
|  | ||||
| static void dbdma_unassigned_rw(DBDMA_io *io) | ||||
| { | ||||
|     DBDMA_channel *ch = io->channel; | ||||
|     qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", | ||||
|                   __func__, ch->channel); | ||||
| } | ||||
|  | ||||
| static void dbdma_unassigned_flush(DBDMA_io *io) | ||||
| { | ||||
|     DBDMA_channel *ch = io->channel; | ||||
|     qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", | ||||
|                   __func__, ch->channel); | ||||
| } | ||||
|  | ||||
| void* DBDMA_init (MemoryRegion **dbdma_mem) | ||||
| { | ||||
|     DBDMAState *s; | ||||
| @@ -784,8 +800,13 @@ void* DBDMA_init (MemoryRegion **dbdma_mem) | ||||
|  | ||||
|     for (i = 0; i < DBDMA_CHANNELS; i++) { | ||||
|         DBDMA_io *io = &s->channels[i].io; | ||||
|         DBDMA_channel *ch = &s->channels[i]; | ||||
|         qemu_iovec_init(&io->iov, 1); | ||||
|         s->channels[i].channel = i; | ||||
|  | ||||
|         ch->rw = dbdma_unassigned_rw; | ||||
|         ch->flush = dbdma_unassigned_flush; | ||||
|         ch->channel = i; | ||||
|         ch->io.channel = ch; | ||||
|     } | ||||
|  | ||||
|     memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000); | ||||
|   | ||||
| @@ -54,7 +54,8 @@ static void load_kernel(MoxieCPU *cpu, LoaderParams *loader_params) | ||||
|     ram_addr_t initrd_offset; | ||||
|  | ||||
|     kernel_size = load_elf(loader_params->kernel_filename,  NULL, NULL, | ||||
|                            &entry, &kernel_low, &kernel_high, 1, EM_MOXIE, 0); | ||||
|                            &entry, &kernel_low, &kernel_high, 1, EM_MOXIE, | ||||
|                            0, 0); | ||||
|  | ||||
|     if (kernel_size <= 0) { | ||||
|         fprintf(stderr, "qemu: could not load kernel '%s'\n", | ||||
|   | ||||
| @@ -778,17 +778,19 @@ FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase, | ||||
|     DeviceState *dev; | ||||
|     FWCfgState *s; | ||||
|     uint32_t version = FW_CFG_VERSION; | ||||
|     bool dma_enabled = dma_iobase && dma_as; | ||||
|     bool dma_requested = dma_iobase && dma_as; | ||||
|  | ||||
|     dev = qdev_create(NULL, TYPE_FW_CFG_IO); | ||||
|     qdev_prop_set_uint32(dev, "iobase", iobase); | ||||
|     qdev_prop_set_uint32(dev, "dma_iobase", dma_iobase); | ||||
|     qdev_prop_set_bit(dev, "dma_enabled", dma_enabled); | ||||
|     if (!dma_requested) { | ||||
|         qdev_prop_set_bit(dev, "dma_enabled", false); | ||||
|     } | ||||
|  | ||||
|     fw_cfg_init1(dev); | ||||
|     s = FW_CFG(dev); | ||||
|  | ||||
|     if (dma_enabled) { | ||||
|     if (s->dma_enabled) { | ||||
|         /* 64 bits for the address field */ | ||||
|         s->dma_as = dma_as; | ||||
|         s->dma_addr = 0; | ||||
| @@ -814,11 +816,13 @@ FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr, | ||||
|     SysBusDevice *sbd; | ||||
|     FWCfgState *s; | ||||
|     uint32_t version = FW_CFG_VERSION; | ||||
|     bool dma_enabled = dma_addr && dma_as; | ||||
|     bool dma_requested = dma_addr && dma_as; | ||||
|  | ||||
|     dev = qdev_create(NULL, TYPE_FW_CFG_MEM); | ||||
|     qdev_prop_set_uint32(dev, "data_width", data_width); | ||||
|     qdev_prop_set_bit(dev, "dma_enabled", dma_enabled); | ||||
|     if (!dma_requested) { | ||||
|         qdev_prop_set_bit(dev, "dma_enabled", false); | ||||
|     } | ||||
|  | ||||
|     fw_cfg_init1(dev); | ||||
|  | ||||
| @@ -828,7 +832,7 @@ FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr, | ||||
|  | ||||
|     s = FW_CFG(dev); | ||||
|  | ||||
|     if (dma_enabled) { | ||||
|     if (s->dma_enabled) { | ||||
|         s->dma_as = dma_as; | ||||
|         s->dma_addr = 0; | ||||
|         sysbus_mmio_map(sbd, 2, dma_addr); | ||||
| @@ -873,7 +877,7 @@ static Property fw_cfg_io_properties[] = { | ||||
|     DEFINE_PROP_UINT32("iobase", FWCfgIoState, iobase, -1), | ||||
|     DEFINE_PROP_UINT32("dma_iobase", FWCfgIoState, dma_iobase, -1), | ||||
|     DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled, | ||||
|                      false), | ||||
|                      true), | ||||
|     DEFINE_PROP_END_OF_LIST(), | ||||
| }; | ||||
|  | ||||
| @@ -913,7 +917,7 @@ static const TypeInfo fw_cfg_io_info = { | ||||
| static Property fw_cfg_mem_properties[] = { | ||||
|     DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1), | ||||
|     DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled, | ||||
|                      false), | ||||
|                      true), | ||||
|     DEFINE_PROP_END_OF_LIST(), | ||||
| }; | ||||
|  | ||||
|   | ||||
| @@ -69,7 +69,8 @@ static void cpu_openrisc_load_kernel(ram_addr_t ram_size, | ||||
|  | ||||
|     if (kernel_filename && !qtest_enabled()) { | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, | ||||
|                                &elf_entry, NULL, NULL, 1, EM_OPENRISC, 1); | ||||
|                                &elf_entry, NULL, NULL, 1, EM_OPENRISC, | ||||
|                                1, 0); | ||||
|         entry = elf_entry; | ||||
|         if (kernel_size < 0) { | ||||
|             kernel_size = load_uimage(kernel_filename, | ||||
|   | ||||
| @@ -313,7 +313,7 @@ static void raven_realize(PCIDevice *d, Error **errp) | ||||
|         if (filename) { | ||||
|             if (s->elf_machine != EM_NONE) { | ||||
|                 bios_size = load_elf(filename, NULL, NULL, NULL, | ||||
|                                      NULL, NULL, 1, s->elf_machine, 0); | ||||
|                                      NULL, NULL, 1, s->elf_machine, 0, 0); | ||||
|             } | ||||
|             if (bios_size < 0) { | ||||
|                 bios_size = get_image_size(filename); | ||||
|   | ||||
| @@ -1017,7 +1017,7 @@ void ppce500_init(MachineState *machine, PPCE500Params *params) | ||||
|     filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); | ||||
|  | ||||
|     bios_size = load_elf(filename, NULL, NULL, &bios_entry, &loadaddr, NULL, | ||||
|                          1, PPC_ELF_MACHINE, 0); | ||||
|                          1, PPC_ELF_MACHINE, 0, 0); | ||||
|     if (bios_size < 0) { | ||||
|         /* | ||||
|          * Hrm. No ELF image? Try a uImage, maybe someone is giving us an | ||||
|   | ||||
| @@ -221,7 +221,7 @@ static void ppc_core99_init(MachineState *machine) | ||||
|     /* Load OpenBIOS (ELF) */ | ||||
|     if (filename) { | ||||
|         bios_size = load_elf(filename, NULL, NULL, NULL, | ||||
|                              NULL, NULL, 1, PPC_ELF_MACHINE, 0); | ||||
|                              NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); | ||||
|  | ||||
|         g_free(filename); | ||||
|     } else { | ||||
| @@ -244,7 +244,8 @@ static void ppc_core99_init(MachineState *machine) | ||||
|         kernel_base = KERNEL_LOAD_ADDR; | ||||
|  | ||||
|         kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, | ||||
|                                NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, 0); | ||||
|                                NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, | ||||
|                                0, 0); | ||||
|         if (kernel_size < 0) | ||||
|             kernel_size = load_aout(kernel_filename, kernel_base, | ||||
|                                     ram_size - kernel_base, bswap_needed, | ||||
|   | ||||
| @@ -149,7 +149,7 @@ static void ppc_heathrow_init(MachineState *machine) | ||||
|     /* Load OpenBIOS (ELF) */ | ||||
|     if (filename) { | ||||
|         bios_size = load_elf(filename, 0, NULL, NULL, NULL, NULL, | ||||
|                              1, PPC_ELF_MACHINE, 0); | ||||
|                              1, PPC_ELF_MACHINE, 0, 0); | ||||
|         g_free(filename); | ||||
|     } else { | ||||
|         bios_size = -1; | ||||
| @@ -170,7 +170,8 @@ static void ppc_heathrow_init(MachineState *machine) | ||||
| #endif | ||||
|         kernel_base = KERNEL_LOAD_ADDR; | ||||
|         kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, | ||||
|                                NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, 0); | ||||
|                                NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, | ||||
|                                0, 0); | ||||
|         if (kernel_size < 0) | ||||
|             kernel_size = load_aout(kernel_filename, kernel_base, | ||||
|                                     ram_size - kernel_base, bswap_needed, | ||||
|   | ||||
| @@ -256,7 +256,8 @@ static void bamboo_init(MachineState *machine) | ||||
|                               NULL, NULL); | ||||
|         if (success < 0) { | ||||
|             success = load_elf(kernel_filename, NULL, NULL, &elf_entry, | ||||
|                                &elf_lowaddr, NULL, 1, PPC_ELF_MACHINE, 0); | ||||
|                                &elf_lowaddr, NULL, 1, PPC_ELF_MACHINE, | ||||
|                                0, 0); | ||||
|             entry = elf_entry; | ||||
|             loadaddr = elf_lowaddr; | ||||
|         } | ||||
|   | ||||
| @@ -1942,11 +1942,13 @@ static void ppc_spapr_init(MachineState *machine) | ||||
|         uint64_t lowaddr = 0; | ||||
|  | ||||
|         kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, | ||||
|                                NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, 0); | ||||
|                                NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, | ||||
|                                0, 0); | ||||
|         if (kernel_size == ELF_LOAD_WRONG_ENDIAN) { | ||||
|             kernel_size = load_elf(kernel_filename, | ||||
|                                    translate_kernel_address, NULL, | ||||
|                                    NULL, &lowaddr, NULL, 0, PPC_ELF_MACHINE, 0); | ||||
|                                    NULL, &lowaddr, NULL, 0, PPC_ELF_MACHINE, | ||||
|                                    0, 0); | ||||
|             kernel_le = kernel_size > 0; | ||||
|         } | ||||
|         if (kernel_size < 0) { | ||||
| @@ -2427,6 +2429,7 @@ static void spapr_machine_2_3_instance_options(MachineState *machine) | ||||
|     spapr_machine_2_4_instance_options(machine); | ||||
|     savevm_skip_section_footers(); | ||||
|     global_state_set_optional(); | ||||
|     savevm_skip_configuration(); | ||||
| } | ||||
|  | ||||
| static void spapr_machine_2_3_class_options(MachineClass *mc) | ||||
| @@ -2452,6 +2455,7 @@ DEFINE_SPAPR_MACHINE(2_3, "2.3", false); | ||||
| static void spapr_machine_2_2_instance_options(MachineState *machine) | ||||
| { | ||||
|     spapr_machine_2_3_instance_options(machine); | ||||
|     machine->suppress_vmdesc = true; | ||||
| } | ||||
|  | ||||
| static void spapr_machine_2_2_class_options(MachineClass *mc) | ||||
|   | ||||
| @@ -588,7 +588,8 @@ out_no_events: | ||||
| void spapr_events_init(sPAPRMachineState *spapr) | ||||
| { | ||||
|     QTAILQ_INIT(&spapr->pending_events); | ||||
|     spapr->check_exception_irq = xics_alloc(spapr->icp, 0, 0, false); | ||||
|     spapr->check_exception_irq = xics_alloc(spapr->icp, 0, 0, false, | ||||
|                                             &error_fatal); | ||||
|     spapr->epow_notifier.notify = spapr_powerdown_req; | ||||
|     qemu_register_powerdown_notifier(&spapr->epow_notifier); | ||||
|     spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception", | ||||
|   | ||||
| @@ -275,11 +275,12 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, | ||||
|     unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */ | ||||
|     unsigned int seq_num = rtas_ld(args, 5); | ||||
|     unsigned int ret_intr_type; | ||||
|     unsigned int irq, max_irqs = 0, num = 0; | ||||
|     unsigned int irq, max_irqs = 0; | ||||
|     sPAPRPHBState *phb = NULL; | ||||
|     PCIDevice *pdev = NULL; | ||||
|     spapr_pci_msi *msi; | ||||
|     int *config_addr_key; | ||||
|     Error *err = NULL; | ||||
|  | ||||
|     switch (func) { | ||||
|     case RTAS_CHANGE_MSI_FN: | ||||
| @@ -305,9 +306,10 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); | ||||
|  | ||||
|     /* Releasing MSIs */ | ||||
|     if (!req_num) { | ||||
|         msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr); | ||||
|         if (!msi) { | ||||
|             trace_spapr_pci_msi("Releasing wrong config", config_addr); | ||||
|             rtas_st(rets, 0, RTAS_OUT_HW_ERROR); | ||||
| @@ -316,10 +318,10 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, | ||||
|  | ||||
|         xics_free(spapr->icp, msi->first_irq, msi->num); | ||||
|         if (msi_present(pdev)) { | ||||
|             spapr_msi_setmsg(pdev, 0, false, 0, num); | ||||
|             spapr_msi_setmsg(pdev, 0, false, 0, 0); | ||||
|         } | ||||
|         if (msix_present(pdev)) { | ||||
|             spapr_msi_setmsg(pdev, 0, true, 0, num); | ||||
|             spapr_msi_setmsg(pdev, 0, true, 0, 0); | ||||
|         } | ||||
|         g_hash_table_remove(phb->msi, &config_addr); | ||||
|  | ||||
| @@ -353,13 +355,20 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr, | ||||
|  | ||||
|     /* Allocate MSIs */ | ||||
|     irq = xics_alloc_block(spapr->icp, 0, req_num, false, | ||||
|                            ret_intr_type == RTAS_TYPE_MSI); | ||||
|     if (!irq) { | ||||
|         error_report("Cannot allocate MSIs for device %x", config_addr); | ||||
|                            ret_intr_type == RTAS_TYPE_MSI, &err); | ||||
|     if (err) { | ||||
|         error_reportf_err(err, "Can't allocate MSIs for device %x: ", | ||||
|                           config_addr); | ||||
|         rtas_st(rets, 0, RTAS_OUT_HW_ERROR); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|     /* Release previous MSIs */ | ||||
|     if (msi) { | ||||
|         xics_free(spapr->icp, msi->first_irq, msi->num); | ||||
|         g_hash_table_remove(phb->msi, &config_addr); | ||||
|     } | ||||
|  | ||||
|     /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */ | ||||
|     spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX, | ||||
|                      irq, req_num); | ||||
| @@ -1360,10 +1369,12 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp) | ||||
|     /* Initialize the LSI table */ | ||||
|     for (i = 0; i < PCI_NUM_PINS; i++) { | ||||
|         uint32_t irq; | ||||
|         Error *local_err = NULL; | ||||
|  | ||||
|         irq = xics_alloc_block(spapr->icp, 0, 1, true, false); | ||||
|         if (!irq) { | ||||
|             error_setg(errp, "spapr_allocate_lsi failed"); | ||||
|         irq = xics_alloc_block(spapr->icp, 0, 1, true, false, &local_err); | ||||
|         if (local_err) { | ||||
|             error_propagate(errp, local_err); | ||||
|             error_prepend(errp, "can't allocate LSIs: "); | ||||
|             return; | ||||
|         } | ||||
|  | ||||
|   | ||||
| @@ -170,6 +170,7 @@ static void spapr_rng_class_init(ObjectClass *oc, void *data) | ||||
|     dc->realize = spapr_rng_realize; | ||||
|     set_bit(DEVICE_CATEGORY_MISC, dc->categories); | ||||
|     dc->props = spapr_rng_properties; | ||||
|     dc->hotpluggable = false; | ||||
| } | ||||
|  | ||||
| static const TypeInfo spapr_rng_info = { | ||||
|   | ||||
| @@ -431,6 +431,7 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) | ||||
|     VIOsPAPRDevice *dev = (VIOsPAPRDevice *)qdev; | ||||
|     VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev); | ||||
|     char *id; | ||||
|     Error *local_err = NULL; | ||||
|  | ||||
|     if (dev->reg != -1) { | ||||
|         /* | ||||
| @@ -463,9 +464,9 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp) | ||||
|         dev->qdev.id = id; | ||||
|     } | ||||
|  | ||||
|     dev->irq = xics_alloc(spapr->icp, 0, dev->irq, false); | ||||
|     if (!dev->irq) { | ||||
|         error_setg(errp, "can't allocate IRQ"); | ||||
|     dev->irq = xics_alloc(spapr->icp, 0, dev->irq, false, &local_err); | ||||
|     if (local_err) { | ||||
|         error_propagate(errp, local_err); | ||||
|         return; | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -258,7 +258,8 @@ static void virtex_init(MachineState *machine) | ||||
|  | ||||
|         /* Boots a kernel elf binary.  */ | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, | ||||
|                                &entry, &low, &high, 1, PPC_ELF_MACHINE, 0); | ||||
|                                &entry, &low, &high, 1, PPC_ELF_MACHINE, | ||||
|                                0, 0); | ||||
|         boot_info.bootstrap_pc = entry & 0x00ffffff; | ||||
|  | ||||
|         if (kernel_size < 0) { | ||||
|   | ||||
							
								
								
									
										260
									
								
								hw/s390x/css.c
									
									
									
									
									
								
							
							
						
						
									
										260
									
								
								hw/s390x/css.c
									
									
									
									
									
								
							| @@ -60,9 +60,81 @@ typedef struct ChannelSubSys { | ||||
|     CssImage *css[MAX_CSSID + 1]; | ||||
|     uint8_t default_cssid; | ||||
|     QTAILQ_HEAD(, IoAdapter) io_adapters; | ||||
|     QTAILQ_HEAD(, IndAddr) indicator_addresses; | ||||
| } ChannelSubSys; | ||||
|  | ||||
| static ChannelSubSys *channel_subsys; | ||||
| static ChannelSubSys channel_subsys = { | ||||
|     .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws), | ||||
|     .do_crw_mchk = true, | ||||
|     .sei_pending = false, | ||||
|     .do_crw_mchk = true, | ||||
|     .crws_lost = false, | ||||
|     .chnmon_active = false, | ||||
|     .io_adapters = QTAILQ_HEAD_INITIALIZER(channel_subsys.io_adapters), | ||||
|     .indicator_addresses = | ||||
|         QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses), | ||||
| }; | ||||
|  | ||||
| IndAddr *get_indicator(hwaddr ind_addr, int len) | ||||
| { | ||||
|     IndAddr *indicator; | ||||
|  | ||||
|     QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) { | ||||
|         if (indicator->addr == ind_addr) { | ||||
|             indicator->refcnt++; | ||||
|             return indicator; | ||||
|         } | ||||
|     } | ||||
|     indicator = g_new0(IndAddr, 1); | ||||
|     indicator->addr = ind_addr; | ||||
|     indicator->len = len; | ||||
|     indicator->refcnt = 1; | ||||
|     QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses, | ||||
|                        indicator, sibling); | ||||
|     return indicator; | ||||
| } | ||||
|  | ||||
| static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr, | ||||
|                                bool do_map) | ||||
| { | ||||
|     S390FLICState *fs = s390_get_flic(); | ||||
|     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); | ||||
|  | ||||
|     return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map); | ||||
| } | ||||
|  | ||||
| void release_indicator(AdapterInfo *adapter, IndAddr *indicator) | ||||
| { | ||||
|     assert(indicator->refcnt > 0); | ||||
|     indicator->refcnt--; | ||||
|     if (indicator->refcnt > 0) { | ||||
|         return; | ||||
|     } | ||||
|     QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling); | ||||
|     if (indicator->map) { | ||||
|         s390_io_adapter_map(adapter, indicator->map, false); | ||||
|     } | ||||
|     g_free(indicator); | ||||
| } | ||||
|  | ||||
| int map_indicator(AdapterInfo *adapter, IndAddr *indicator) | ||||
| { | ||||
|     int ret; | ||||
|  | ||||
|     if (indicator->map) { | ||||
|         return 0; /* already mapped is not an error */ | ||||
|     } | ||||
|     indicator->map = indicator->addr; | ||||
|     ret = s390_io_adapter_map(adapter, indicator->map, true); | ||||
|     if ((ret != 0) && (ret != -ENOSYS)) { | ||||
|         goto out_err; | ||||
|     } | ||||
|     return 0; | ||||
|  | ||||
| out_err: | ||||
|     indicator->map = 0; | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| int css_create_css_image(uint8_t cssid, bool default_image) | ||||
| { | ||||
| @@ -70,12 +142,12 @@ int css_create_css_image(uint8_t cssid, bool default_image) | ||||
|     if (cssid > MAX_CSSID) { | ||||
|         return -EINVAL; | ||||
|     } | ||||
|     if (channel_subsys->css[cssid]) { | ||||
|     if (channel_subsys.css[cssid]) { | ||||
|         return -EBUSY; | ||||
|     } | ||||
|     channel_subsys->css[cssid] = g_malloc0(sizeof(CssImage)); | ||||
|     channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage)); | ||||
|     if (default_image) { | ||||
|         channel_subsys->default_cssid = cssid; | ||||
|         channel_subsys.default_cssid = cssid; | ||||
|     } | ||||
|     return 0; | ||||
| } | ||||
| @@ -90,7 +162,7 @@ int css_register_io_adapter(uint8_t type, uint8_t isc, bool swap, | ||||
|     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); | ||||
|  | ||||
|     *id = 0; | ||||
|     QTAILQ_FOREACH(adapter, &channel_subsys->io_adapters, sibling) { | ||||
|     QTAILQ_FOREACH(adapter, &channel_subsys.io_adapters, sibling) { | ||||
|         if ((adapter->type == type) && (adapter->isc == isc)) { | ||||
|             *id = adapter->id; | ||||
|             found = true; | ||||
| @@ -110,7 +182,7 @@ int css_register_io_adapter(uint8_t type, uint8_t isc, bool swap, | ||||
|         adapter->id = *id; | ||||
|         adapter->isc = isc; | ||||
|         adapter->type = type; | ||||
|         QTAILQ_INSERT_TAIL(&channel_subsys->io_adapters, adapter, sibling); | ||||
|         QTAILQ_INSERT_TAIL(&channel_subsys.io_adapters, adapter, sibling); | ||||
|     } else { | ||||
|         g_free(adapter); | ||||
|         fprintf(stderr, "Unexpected error %d when registering adapter %d\n", | ||||
| @@ -122,7 +194,7 @@ out: | ||||
|  | ||||
| uint16_t css_build_subchannel_id(SubchDev *sch) | ||||
| { | ||||
|     if (channel_subsys->max_cssid > 0) { | ||||
|     if (channel_subsys.max_cssid > 0) { | ||||
|         return (sch->cssid << 8) | (1 << 3) | (sch->ssid << 1) | 1; | ||||
|     } | ||||
|     return (sch->ssid << 1) | 1; | ||||
| @@ -270,7 +342,8 @@ static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1) | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr) | ||||
| static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr, | ||||
|                              bool suspend_allowed) | ||||
| { | ||||
|     int ret; | ||||
|     bool check_len; | ||||
| @@ -298,7 +371,7 @@ static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr) | ||||
|     } | ||||
|  | ||||
|     if (ccw.flags & CCW_FLAG_SUSPEND) { | ||||
|         return -EINPROGRESS; | ||||
|         return suspend_allowed ? -EINPROGRESS : -EINVAL; | ||||
|     } | ||||
|  | ||||
|     check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); | ||||
| @@ -396,6 +469,7 @@ static void sch_handle_start_func(SubchDev *sch, ORB *orb) | ||||
|     SCSW *s = &sch->curr_status.scsw; | ||||
|     int path; | ||||
|     int ret; | ||||
|     bool suspend_allowed; | ||||
|  | ||||
|     /* Path management: In our simple css, we always choose the only path. */ | ||||
|     path = 0x80; | ||||
| @@ -415,12 +489,15 @@ static void sch_handle_start_func(SubchDev *sch, ORB *orb) | ||||
|         } | ||||
|         sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT); | ||||
|         sch->ccw_no_data_cnt = 0; | ||||
|         suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND); | ||||
|     } else { | ||||
|         s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND); | ||||
|         /* The channel program had been suspended before. */ | ||||
|         suspend_allowed = true; | ||||
|     } | ||||
|     sch->last_cmd_valid = false; | ||||
|     do { | ||||
|         ret = css_interpret_ccw(sch, sch->channel_prog); | ||||
|         ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed); | ||||
|         switch (ret) { | ||||
|         case -EAGAIN: | ||||
|             /* ccw chain, continue processing */ | ||||
| @@ -778,12 +855,12 @@ static void css_update_chnmon(SubchDev *sch) | ||||
|  | ||||
|         offset = sch->curr_status.pmcw.mbi << 5; | ||||
|         count = address_space_lduw(&address_space_memory, | ||||
|                                    channel_subsys->chnmon_area + offset, | ||||
|                                    channel_subsys.chnmon_area + offset, | ||||
|                                    MEMTXATTRS_UNSPECIFIED, | ||||
|                                    NULL); | ||||
|         count++; | ||||
|         address_space_stw(&address_space_memory, | ||||
|                           channel_subsys->chnmon_area + offset, count, | ||||
|                           channel_subsys.chnmon_area + offset, count, | ||||
|                           MEMTXATTRS_UNSPECIFIED, NULL); | ||||
|     } | ||||
| } | ||||
| @@ -812,7 +889,7 @@ int css_do_ssch(SubchDev *sch, ORB *orb) | ||||
|     } | ||||
|  | ||||
|     /* If monitoring is active, update counter. */ | ||||
|     if (channel_subsys->chnmon_active) { | ||||
|     if (channel_subsys.chnmon_active) { | ||||
|         css_update_chnmon(sch); | ||||
|     } | ||||
|     sch->channel_prog = orb->cpa; | ||||
| @@ -971,16 +1048,16 @@ int css_do_stcrw(CRW *crw) | ||||
|     CrwContainer *crw_cont; | ||||
|     int ret; | ||||
|  | ||||
|     crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws); | ||||
|     crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws); | ||||
|     if (crw_cont) { | ||||
|         QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling); | ||||
|         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling); | ||||
|         copy_crw_to_guest(crw, &crw_cont->crw); | ||||
|         g_free(crw_cont); | ||||
|         ret = 0; | ||||
|     } else { | ||||
|         /* List was empty, turn crw machine checks on again. */ | ||||
|         memset(crw, 0, sizeof(*crw)); | ||||
|         channel_subsys->do_crw_mchk = true; | ||||
|         channel_subsys.do_crw_mchk = true; | ||||
|         ret = 1; | ||||
|     } | ||||
|  | ||||
| @@ -999,12 +1076,12 @@ void css_undo_stcrw(CRW *crw) | ||||
|  | ||||
|     crw_cont = g_try_malloc0(sizeof(CrwContainer)); | ||||
|     if (!crw_cont) { | ||||
|         channel_subsys->crws_lost = true; | ||||
|         channel_subsys.crws_lost = true; | ||||
|         return; | ||||
|     } | ||||
|     copy_crw_from_guest(&crw_cont->crw, crw); | ||||
|  | ||||
|     QTAILQ_INSERT_HEAD(&channel_subsys->pending_crws, crw_cont, sibling); | ||||
|     QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling); | ||||
| } | ||||
|  | ||||
| int css_do_tpi(IOIntCode *int_code, int lowcore) | ||||
| @@ -1022,9 +1099,9 @@ int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid, | ||||
|     CssImage *css; | ||||
|  | ||||
|     if (!m && !cssid) { | ||||
|         css = channel_subsys->css[channel_subsys->default_cssid]; | ||||
|         css = channel_subsys.css[channel_subsys.default_cssid]; | ||||
|     } else { | ||||
|         css = channel_subsys->css[cssid]; | ||||
|         css = channel_subsys.css[cssid]; | ||||
|     } | ||||
|     if (!css) { | ||||
|         return 0; | ||||
| @@ -1059,15 +1136,15 @@ void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo) | ||||
| { | ||||
|     /* dct is currently ignored (not really meaningful for our devices) */ | ||||
|     /* TODO: Don't ignore mbk. */ | ||||
|     if (update && !channel_subsys->chnmon_active) { | ||||
|     if (update && !channel_subsys.chnmon_active) { | ||||
|         /* Enable measuring. */ | ||||
|         channel_subsys->chnmon_area = mbo; | ||||
|         channel_subsys->chnmon_active = true; | ||||
|         channel_subsys.chnmon_area = mbo; | ||||
|         channel_subsys.chnmon_active = true; | ||||
|     } | ||||
|     if (!update && channel_subsys->chnmon_active) { | ||||
|     if (!update && channel_subsys.chnmon_active) { | ||||
|         /* Disable measuring. */ | ||||
|         channel_subsys->chnmon_area = 0; | ||||
|         channel_subsys->chnmon_active = false; | ||||
|         channel_subsys.chnmon_area = 0; | ||||
|         channel_subsys.chnmon_active = false; | ||||
|     } | ||||
| } | ||||
|  | ||||
| @@ -1095,7 +1172,7 @@ int css_do_rsch(SubchDev *sch) | ||||
|     } | ||||
|  | ||||
|     /* If monitoring is active, update counter. */ | ||||
|     if (channel_subsys->chnmon_active) { | ||||
|     if (channel_subsys.chnmon_active) { | ||||
|         css_update_chnmon(sch); | ||||
|     } | ||||
|  | ||||
| @@ -1111,23 +1188,23 @@ int css_do_rchp(uint8_t cssid, uint8_t chpid) | ||||
| { | ||||
|     uint8_t real_cssid; | ||||
|  | ||||
|     if (cssid > channel_subsys->max_cssid) { | ||||
|     if (cssid > channel_subsys.max_cssid) { | ||||
|         return -EINVAL; | ||||
|     } | ||||
|     if (channel_subsys->max_cssid == 0) { | ||||
|         real_cssid = channel_subsys->default_cssid; | ||||
|     if (channel_subsys.max_cssid == 0) { | ||||
|         real_cssid = channel_subsys.default_cssid; | ||||
|     } else { | ||||
|         real_cssid = cssid; | ||||
|     } | ||||
|     if (!channel_subsys->css[real_cssid]) { | ||||
|     if (!channel_subsys.css[real_cssid]) { | ||||
|         return -EINVAL; | ||||
|     } | ||||
|  | ||||
|     if (!channel_subsys->css[real_cssid]->chpids[chpid].in_use) { | ||||
|     if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) { | ||||
|         return -ENODEV; | ||||
|     } | ||||
|  | ||||
|     if (!channel_subsys->css[real_cssid]->chpids[chpid].is_virtual) { | ||||
|     if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) { | ||||
|         fprintf(stderr, | ||||
|                 "rchp unsupported for non-virtual chpid %x.%02x!\n", | ||||
|                 real_cssid, chpid); | ||||
| @@ -1136,8 +1213,8 @@ int css_do_rchp(uint8_t cssid, uint8_t chpid) | ||||
|  | ||||
|     /* We don't really use a channel path, so we're done here. */ | ||||
|     css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, | ||||
|                   channel_subsys->max_cssid > 0 ? 1 : 0, chpid); | ||||
|     if (channel_subsys->max_cssid > 0) { | ||||
|                   channel_subsys.max_cssid > 0 ? 1 : 0, chpid); | ||||
|     if (channel_subsys.max_cssid > 0) { | ||||
|         css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8); | ||||
|     } | ||||
|     return 0; | ||||
| @@ -1148,13 +1225,13 @@ bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid) | ||||
|     SubchSet *set; | ||||
|     uint8_t real_cssid; | ||||
|  | ||||
|     real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid; | ||||
|     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; | ||||
|     if (real_cssid > MAX_CSSID || ssid > MAX_SSID || | ||||
|         !channel_subsys->css[real_cssid] || | ||||
|         !channel_subsys->css[real_cssid]->sch_set[ssid]) { | ||||
|         !channel_subsys.css[real_cssid] || | ||||
|         !channel_subsys.css[real_cssid]->sch_set[ssid]) { | ||||
|         return true; | ||||
|     } | ||||
|     set = channel_subsys->css[real_cssid]->sch_set[ssid]; | ||||
|     set = channel_subsys.css[real_cssid]->sch_set[ssid]; | ||||
|     return schid > find_last_bit(set->schids_used, | ||||
|                                  (MAX_SCHID + 1) / sizeof(unsigned long)); | ||||
| } | ||||
| @@ -1167,7 +1244,7 @@ static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type) | ||||
|     if (cssid > MAX_CSSID) { | ||||
|         return -EINVAL; | ||||
|     } | ||||
|     css = channel_subsys->css[cssid]; | ||||
|     css = channel_subsys.css[cssid]; | ||||
|     if (!css) { | ||||
|         return -EINVAL; | ||||
|     } | ||||
| @@ -1188,7 +1265,7 @@ void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type) | ||||
|     PMCW *p = &sch->curr_status.pmcw; | ||||
|     SCSW *s = &sch->curr_status.scsw; | ||||
|     int i; | ||||
|     CssImage *css = channel_subsys->css[sch->cssid]; | ||||
|     CssImage *css = channel_subsys.css[sch->cssid]; | ||||
|  | ||||
|     assert(css != NULL); | ||||
|     memset(p, 0, sizeof(PMCW)); | ||||
| @@ -1214,27 +1291,27 @@ SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid) | ||||
| { | ||||
|     uint8_t real_cssid; | ||||
|  | ||||
|     real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid; | ||||
|     real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; | ||||
|  | ||||
|     if (!channel_subsys->css[real_cssid]) { | ||||
|     if (!channel_subsys.css[real_cssid]) { | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     if (!channel_subsys->css[real_cssid]->sch_set[ssid]) { | ||||
|     if (!channel_subsys.css[real_cssid]->sch_set[ssid]) { | ||||
|         return NULL; | ||||
|     } | ||||
|  | ||||
|     return channel_subsys->css[real_cssid]->sch_set[ssid]->sch[schid]; | ||||
|     return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid]; | ||||
| } | ||||
|  | ||||
| bool css_subch_visible(SubchDev *sch) | ||||
| { | ||||
|     if (sch->ssid > channel_subsys->max_ssid) { | ||||
|     if (sch->ssid > channel_subsys.max_ssid) { | ||||
|         return false; | ||||
|     } | ||||
|  | ||||
|     if (sch->cssid != channel_subsys->default_cssid) { | ||||
|         return (channel_subsys->max_cssid > 0); | ||||
|     if (sch->cssid != channel_subsys.default_cssid) { | ||||
|         return (channel_subsys.max_cssid > 0); | ||||
|     } | ||||
|  | ||||
|     return true; | ||||
| @@ -1242,20 +1319,20 @@ bool css_subch_visible(SubchDev *sch) | ||||
|  | ||||
| bool css_present(uint8_t cssid) | ||||
| { | ||||
|     return (channel_subsys->css[cssid] != NULL); | ||||
|     return (channel_subsys.css[cssid] != NULL); | ||||
| } | ||||
|  | ||||
| bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno) | ||||
| { | ||||
|     if (!channel_subsys->css[cssid]) { | ||||
|     if (!channel_subsys.css[cssid]) { | ||||
|         return false; | ||||
|     } | ||||
|     if (!channel_subsys->css[cssid]->sch_set[ssid]) { | ||||
|     if (!channel_subsys.css[cssid]->sch_set[ssid]) { | ||||
|         return false; | ||||
|     } | ||||
|  | ||||
|     return !!test_bit(devno, | ||||
|                       channel_subsys->css[cssid]->sch_set[ssid]->devnos_used); | ||||
|                       channel_subsys.css[cssid]->sch_set[ssid]->devnos_used); | ||||
| } | ||||
|  | ||||
| void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid, | ||||
| @@ -1266,13 +1343,13 @@ void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid, | ||||
|  | ||||
|     trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid, | ||||
|                            devno); | ||||
|     if (!channel_subsys->css[cssid]) { | ||||
|     if (!channel_subsys.css[cssid]) { | ||||
|         fprintf(stderr, | ||||
|                 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n", | ||||
|                 __func__, cssid, ssid, schid); | ||||
|         return; | ||||
|     } | ||||
|     css = channel_subsys->css[cssid]; | ||||
|     css = channel_subsys.css[cssid]; | ||||
|  | ||||
|     if (!css->sch_set[ssid]) { | ||||
|         css->sch_set[ssid] = g_malloc0(sizeof(SubchSet)); | ||||
| @@ -1297,7 +1374,7 @@ void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid) | ||||
|     /* TODO: Maybe use a static crw pool? */ | ||||
|     crw_cont = g_try_malloc0(sizeof(CrwContainer)); | ||||
|     if (!crw_cont) { | ||||
|         channel_subsys->crws_lost = true; | ||||
|         channel_subsys.crws_lost = true; | ||||
|         return; | ||||
|     } | ||||
|     crw_cont->crw.flags = (rsc << 8) | erc; | ||||
| @@ -1305,15 +1382,15 @@ void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid) | ||||
|         crw_cont->crw.flags |= CRW_FLAGS_MASK_C; | ||||
|     } | ||||
|     crw_cont->crw.rsid = rsid; | ||||
|     if (channel_subsys->crws_lost) { | ||||
|     if (channel_subsys.crws_lost) { | ||||
|         crw_cont->crw.flags |= CRW_FLAGS_MASK_R; | ||||
|         channel_subsys->crws_lost = false; | ||||
|         channel_subsys.crws_lost = false; | ||||
|     } | ||||
|  | ||||
|     QTAILQ_INSERT_TAIL(&channel_subsys->pending_crws, crw_cont, sibling); | ||||
|     QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling); | ||||
|  | ||||
|     if (channel_subsys->do_crw_mchk) { | ||||
|         channel_subsys->do_crw_mchk = false; | ||||
|     if (channel_subsys.do_crw_mchk) { | ||||
|         channel_subsys.do_crw_mchk = false; | ||||
|         /* Inject crw pending machine check. */ | ||||
|         s390_crw_mchk(); | ||||
|     } | ||||
| @@ -1328,9 +1405,9 @@ void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid, | ||||
|     if (add && !hotplugged) { | ||||
|         return; | ||||
|     } | ||||
|     if (channel_subsys->max_cssid == 0) { | ||||
|     if (channel_subsys.max_cssid == 0) { | ||||
|         /* Default cssid shows up as 0. */ | ||||
|         guest_cssid = (cssid == channel_subsys->default_cssid) ? 0 : cssid; | ||||
|         guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid; | ||||
|     } else { | ||||
|         /* Show real cssid to the guest. */ | ||||
|         guest_cssid = cssid; | ||||
| @@ -1339,14 +1416,14 @@ void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid, | ||||
|      * Only notify for higher subchannel sets/channel subsystems if the | ||||
|      * guest has enabled it. | ||||
|      */ | ||||
|     if ((ssid > channel_subsys->max_ssid) || | ||||
|         (guest_cssid > channel_subsys->max_cssid) || | ||||
|         ((channel_subsys->max_cssid == 0) && | ||||
|          (cssid != channel_subsys->default_cssid))) { | ||||
|     if ((ssid > channel_subsys.max_ssid) || | ||||
|         (guest_cssid > channel_subsys.max_cssid) || | ||||
|         ((channel_subsys.max_cssid == 0) && | ||||
|          (cssid != channel_subsys.default_cssid))) { | ||||
|         return; | ||||
|     } | ||||
|     chain_crw = (channel_subsys->max_ssid > 0) || | ||||
|             (channel_subsys->max_cssid > 0); | ||||
|     chain_crw = (channel_subsys.max_ssid > 0) || | ||||
|             (channel_subsys.max_cssid > 0); | ||||
|     css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid); | ||||
|     if (chain_crw) { | ||||
|         css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, | ||||
| @@ -1361,28 +1438,28 @@ void css_generate_chp_crws(uint8_t cssid, uint8_t chpid) | ||||
|  | ||||
| void css_generate_css_crws(uint8_t cssid) | ||||
| { | ||||
|     if (!channel_subsys->sei_pending) { | ||||
|     if (!channel_subsys.sei_pending) { | ||||
|         css_queue_crw(CRW_RSC_CSS, 0, 0, cssid); | ||||
|     } | ||||
|     channel_subsys->sei_pending = true; | ||||
|     channel_subsys.sei_pending = true; | ||||
| } | ||||
|  | ||||
| void css_clear_sei_pending(void) | ||||
| { | ||||
|     channel_subsys->sei_pending = false; | ||||
|     channel_subsys.sei_pending = false; | ||||
| } | ||||
|  | ||||
| int css_enable_mcsse(void) | ||||
| { | ||||
|     trace_css_enable_facility("mcsse"); | ||||
|     channel_subsys->max_cssid = MAX_CSSID; | ||||
|     channel_subsys.max_cssid = MAX_CSSID; | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| int css_enable_mss(void) | ||||
| { | ||||
|     trace_css_enable_facility("mss"); | ||||
|     channel_subsys->max_ssid = MAX_SSID; | ||||
|     channel_subsys.max_ssid = MAX_SSID; | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| @@ -1505,28 +1582,15 @@ int subch_device_load(SubchDev *s, QEMUFile *f) | ||||
|      */ | ||||
|     if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) { | ||||
|         if (s->ssid) { | ||||
|             channel_subsys->max_ssid = MAX_SSID; | ||||
|             channel_subsys.max_ssid = MAX_SSID; | ||||
|         } | ||||
|         if (s->cssid != channel_subsys->default_cssid) { | ||||
|             channel_subsys->max_cssid = MAX_CSSID; | ||||
|         if (s->cssid != channel_subsys.default_cssid) { | ||||
|             channel_subsys.max_cssid = MAX_CSSID; | ||||
|         } | ||||
|     } | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
|  | ||||
| static void css_init(void) | ||||
| { | ||||
|     channel_subsys = g_malloc0(sizeof(*channel_subsys)); | ||||
|     QTAILQ_INIT(&channel_subsys->pending_crws); | ||||
|     channel_subsys->sei_pending = false; | ||||
|     channel_subsys->do_crw_mchk = true; | ||||
|     channel_subsys->crws_lost = false; | ||||
|     channel_subsys->chnmon_active = false; | ||||
|     QTAILQ_INIT(&channel_subsys->io_adapters); | ||||
| } | ||||
| machine_init(css_init); | ||||
|  | ||||
| void css_reset_sch(SubchDev *sch) | ||||
| { | ||||
|     PMCW *p = &sch->curr_status.pmcw; | ||||
| @@ -1564,19 +1628,19 @@ void css_reset(void) | ||||
|     CrwContainer *crw_cont; | ||||
|  | ||||
|     /* Clean up monitoring. */ | ||||
|     channel_subsys->chnmon_active = false; | ||||
|     channel_subsys->chnmon_area = 0; | ||||
|     channel_subsys.chnmon_active = false; | ||||
|     channel_subsys.chnmon_area = 0; | ||||
|  | ||||
|     /* Clear pending CRWs. */ | ||||
|     while ((crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws))) { | ||||
|         QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling); | ||||
|     while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) { | ||||
|         QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling); | ||||
|         g_free(crw_cont); | ||||
|     } | ||||
|     channel_subsys->sei_pending = false; | ||||
|     channel_subsys->do_crw_mchk = true; | ||||
|     channel_subsys->crws_lost = false; | ||||
|     channel_subsys.sei_pending = false; | ||||
|     channel_subsys.do_crw_mchk = true; | ||||
|     channel_subsys.crws_lost = false; | ||||
|  | ||||
|     /* Reset maximum ids. */ | ||||
|     channel_subsys->max_cssid = 0; | ||||
|     channel_subsys->max_ssid = 0; | ||||
|     channel_subsys.max_cssid = 0; | ||||
|     channel_subsys.max_ssid = 0; | ||||
| } | ||||
|   | ||||
| @@ -12,6 +12,8 @@ | ||||
| #ifndef CSS_H | ||||
| #define CSS_H | ||||
|  | ||||
| #include "hw/s390x/adapter.h" | ||||
| #include "hw/s390x/s390_flic.h" | ||||
| #include "ioinst.h" | ||||
|  | ||||
| /* Channel subsystem constants. */ | ||||
| @@ -86,6 +88,18 @@ struct SubchDev { | ||||
|     void *driver_data; | ||||
| }; | ||||
|  | ||||
| typedef struct IndAddr { | ||||
|     hwaddr addr; | ||||
|     uint64_t map; | ||||
|     unsigned long refcnt; | ||||
|     int len; | ||||
|     QTAILQ_ENTRY(IndAddr) sibling; | ||||
| } IndAddr; | ||||
|  | ||||
| IndAddr *get_indicator(hwaddr ind_addr, int len); | ||||
| void release_indicator(AdapterInfo *adapter, IndAddr *indicator); | ||||
| int map_indicator(AdapterInfo *adapter, IndAddr *indicator); | ||||
|  | ||||
| typedef SubchDev *(*css_subch_cb_func)(uint8_t m, uint8_t cssid, uint8_t ssid, | ||||
|                                        uint16_t schid); | ||||
| void subch_device_save(SubchDev *s, QEMUFile *f); | ||||
|   | ||||
| @@ -101,7 +101,7 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) | ||||
|  | ||||
|         bios_size = load_elf(bios_filename, bios_translate_addr, &fwbase, | ||||
|                              &ipl->bios_start_addr, NULL, NULL, 1, | ||||
|                              EM_S390, 0); | ||||
|                              EM_S390, 0, 0); | ||||
|         if (bios_size > 0) { | ||||
|             /* Adjust ELF start address to final location */ | ||||
|             ipl->bios_start_addr += fwbase; | ||||
| @@ -124,7 +124,7 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) | ||||
|  | ||||
|     if (ipl->kernel) { | ||||
|         kernel_size = load_elf(ipl->kernel, NULL, NULL, &pentry, NULL, | ||||
|                                NULL, 1, EM_S390, 0); | ||||
|                                NULL, 1, EM_S390, 0, 0); | ||||
|         if (kernel_size < 0) { | ||||
|             kernel_size = load_image_targphys(ipl->kernel, 0, ram_size); | ||||
|         } | ||||
|   | ||||
| @@ -524,7 +524,7 @@ static int s390_pcihost_setup_msix(S390PCIBusDevice *pbdev) | ||||
|         return 0; | ||||
|     } | ||||
|  | ||||
|     ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_CAP_FLAGS, | ||||
|     ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS, | ||||
|              pci_config_size(pbdev->pdev), sizeof(ctrl)); | ||||
|     table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE, | ||||
|              pci_config_size(pbdev->pdev), sizeof(table)); | ||||
|   | ||||
| @@ -233,6 +233,8 @@ typedef struct S390PCIBusDevice { | ||||
|     AddressSpace as; | ||||
|     MemoryRegion mr; | ||||
|     MemoryRegion iommu_mr; | ||||
|     IndAddr *summary_ind; | ||||
|     IndAddr *indicator; | ||||
| } S390PCIBusDevice; | ||||
|  | ||||
| typedef struct S390pciState { | ||||
|   | ||||
| @@ -621,19 +621,19 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, | ||||
|  | ||||
| static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) | ||||
| { | ||||
|     int ret; | ||||
|     S390FLICState *fs = s390_get_flic(); | ||||
|     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); | ||||
|     int ret, len; | ||||
|  | ||||
|     ret = css_register_io_adapter(S390_PCIPT_ADAPTER, | ||||
|                                   FIB_DATA_ISC(ldl_p(&fib.data)), true, false, | ||||
|                                   &pbdev->routes.adapter.adapter_id); | ||||
|     assert(ret == 0); | ||||
|  | ||||
|     fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, | ||||
|         ldq_p(&fib.aisb), true); | ||||
|     fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, | ||||
|         ldq_p(&fib.aibv), true); | ||||
|     pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t)); | ||||
|     len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long); | ||||
|     pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len); | ||||
|  | ||||
|     map_indicator(&pbdev->routes.adapter, pbdev->summary_ind); | ||||
|     map_indicator(&pbdev->routes.adapter, pbdev->indicator); | ||||
|  | ||||
|     pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb); | ||||
|     pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data)); | ||||
| @@ -649,12 +649,11 @@ static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) | ||||
|  | ||||
| static int dereg_irqs(S390PCIBusDevice *pbdev) | ||||
| { | ||||
|     S390FLICState *fs = s390_get_flic(); | ||||
|     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); | ||||
|  | ||||
|     fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, | ||||
|                         pbdev->routes.adapter.ind_addr, false); | ||||
|     release_indicator(&pbdev->routes.adapter, pbdev->summary_ind); | ||||
|     release_indicator(&pbdev->routes.adapter, pbdev->indicator); | ||||
|  | ||||
|     pbdev->summary_ind = NULL; | ||||
|     pbdev->indicator = NULL; | ||||
|     pbdev->routes.adapter.summary_addr = 0; | ||||
|     pbdev->routes.adapter.summary_offset = 0; | ||||
|     pbdev->routes.adapter.ind_addr = 0; | ||||
|   | ||||
| @@ -54,8 +54,6 @@ | ||||
| #endif | ||||
|  | ||||
| #define MAX_BLK_DEVS                    10 | ||||
| #define S390_MACHINE                    "s390-virtio" | ||||
| #define TYPE_S390_MACHINE               MACHINE_TYPE_NAME(S390_MACHINE) | ||||
|  | ||||
| #define S390_TOD_CLOCK_VALUE_MISSING    0x00 | ||||
| #define S390_TOD_CLOCK_VALUE_PRESENT    0x01 | ||||
|   | ||||
| @@ -32,69 +32,6 @@ | ||||
| #include "virtio-ccw.h" | ||||
| #include "trace.h" | ||||
|  | ||||
| static QTAILQ_HEAD(, IndAddr) indicator_addresses = | ||||
|     QTAILQ_HEAD_INITIALIZER(indicator_addresses); | ||||
|  | ||||
| static IndAddr *get_indicator(hwaddr ind_addr, int len) | ||||
| { | ||||
|     IndAddr *indicator; | ||||
|  | ||||
|     QTAILQ_FOREACH(indicator, &indicator_addresses, sibling) { | ||||
|         if (indicator->addr == ind_addr) { | ||||
|             indicator->refcnt++; | ||||
|             return indicator; | ||||
|         } | ||||
|     } | ||||
|     indicator = g_new0(IndAddr, 1); | ||||
|     indicator->addr = ind_addr; | ||||
|     indicator->len = len; | ||||
|     indicator->refcnt = 1; | ||||
|     QTAILQ_INSERT_TAIL(&indicator_addresses, indicator, sibling); | ||||
|     return indicator; | ||||
| } | ||||
|  | ||||
| static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr, | ||||
|                                bool do_map) | ||||
| { | ||||
|     S390FLICState *fs = s390_get_flic(); | ||||
|     S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); | ||||
|  | ||||
|     return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map); | ||||
| } | ||||
|  | ||||
| static void release_indicator(AdapterInfo *adapter, IndAddr *indicator) | ||||
| { | ||||
|     assert(indicator->refcnt > 0); | ||||
|     indicator->refcnt--; | ||||
|     if (indicator->refcnt > 0) { | ||||
|         return; | ||||
|     } | ||||
|     QTAILQ_REMOVE(&indicator_addresses, indicator, sibling); | ||||
|     if (indicator->map) { | ||||
|         s390_io_adapter_map(adapter, indicator->map, false); | ||||
|     } | ||||
|     g_free(indicator); | ||||
| } | ||||
|  | ||||
| static int map_indicator(AdapterInfo *adapter, IndAddr *indicator) | ||||
| { | ||||
|     int ret; | ||||
|  | ||||
|     if (indicator->map) { | ||||
|         return 0; /* already mapped is not an error */ | ||||
|     } | ||||
|     indicator->map = indicator->addr; | ||||
|     ret = s390_io_adapter_map(adapter, indicator->map, true); | ||||
|     if ((ret != 0) && (ret != -ENOSYS)) { | ||||
|         goto out_err; | ||||
|     } | ||||
|     return 0; | ||||
|  | ||||
| out_err: | ||||
|     indicator->map = 0; | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, | ||||
|                                VirtioCcwDevice *dev); | ||||
|  | ||||
|   | ||||
| @@ -23,7 +23,8 @@ | ||||
| #include <hw/virtio/virtio-balloon.h> | ||||
| #include <hw/virtio/virtio-rng.h> | ||||
| #include <hw/virtio/virtio-bus.h> | ||||
| #include <hw/s390x/s390_flic.h> | ||||
|  | ||||
| #include "css.h" | ||||
|  | ||||
| #define VIRTUAL_CSSID 0xfe | ||||
|  | ||||
| @@ -75,14 +76,6 @@ typedef struct VirtIOCCWDeviceClass { | ||||
| #define VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT 1 | ||||
| #define VIRTIO_CCW_FLAG_USE_IOEVENTFD   (1 << VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT) | ||||
|  | ||||
| typedef struct IndAddr { | ||||
|     hwaddr addr; | ||||
|     uint64_t map; | ||||
|     unsigned long refcnt; | ||||
|     int len; | ||||
|     QTAILQ_ENTRY(IndAddr) sibling; | ||||
| } IndAddr; | ||||
|  | ||||
| struct VirtioCcwDevice { | ||||
|     DeviceState parent_obj; | ||||
|     SubchDev *sch; | ||||
|   | ||||
| @@ -449,7 +449,7 @@ static void sd_reset(DeviceState *dev) | ||||
|  | ||||
| static bool sd_get_inserted(SDState *sd) | ||||
| { | ||||
|     return blk_is_inserted(sd->blk); | ||||
|     return sd->blk && blk_is_inserted(sd->blk); | ||||
| } | ||||
|  | ||||
| static bool sd_get_readonly(SDState *sd) | ||||
|   | ||||
| @@ -207,6 +207,21 @@ static void sdhci_reset(SDHCIState *s) | ||||
|     s->pending_insert_state = false; | ||||
| } | ||||
|  | ||||
| static void sdhci_poweron_reset(DeviceState *dev) | ||||
| { | ||||
|     /* QOM (ie power-on) reset. This is identical to reset | ||||
|      * commanded via device register apart from handling of the | ||||
|      * 'pending insert on powerup' quirk. | ||||
|      */ | ||||
|     SDHCIState *s = (SDHCIState *)dev; | ||||
|  | ||||
|     sdhci_reset(s); | ||||
|  | ||||
|     if (s->pending_insert_quirk) { | ||||
|         s->pending_insert_state = true; | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void sdhci_data_transfer(void *opaque); | ||||
|  | ||||
| static void sdhci_send_command(SDHCIState *s) | ||||
| @@ -1290,6 +1305,7 @@ static void sdhci_pci_class_init(ObjectClass *klass, void *data) | ||||
|     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); | ||||
|     dc->vmsd = &sdhci_vmstate; | ||||
|     dc->props = sdhci_pci_properties; | ||||
|     dc->reset = sdhci_poweron_reset; | ||||
| } | ||||
|  | ||||
| static const TypeInfo sdhci_pci_info = { | ||||
| @@ -1332,10 +1348,6 @@ static void sdhci_sysbus_realize(DeviceState *dev, Error ** errp) | ||||
|     memory_region_init_io(&s->iomem, OBJECT(s), &sdhci_mmio_ops, s, "sdhci", | ||||
|             SDHC_REGISTERS_MAP_SIZE); | ||||
|     sysbus_init_mmio(sbd, &s->iomem); | ||||
|  | ||||
|     if (s->pending_insert_quirk) { | ||||
|         s->pending_insert_state = true; | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) | ||||
| @@ -1345,6 +1357,7 @@ static void sdhci_sysbus_class_init(ObjectClass *klass, void *data) | ||||
|     dc->vmsd = &sdhci_vmstate; | ||||
|     dc->props = sdhci_sysbus_properties; | ||||
|     dc->realize = sdhci_sysbus_realize; | ||||
|     dc->reset = sdhci_poweron_reset; | ||||
| } | ||||
|  | ||||
| static const TypeInfo sdhci_sysbus_info = { | ||||
|   | ||||
| @@ -194,7 +194,7 @@ static void leon3_generic_hw_init(MachineState *machine) | ||||
|         uint64_t entry; | ||||
|  | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, NULL, NULL, | ||||
|                                1 /* big endian */, EM_SPARC, 0); | ||||
|                                1 /* big endian */, EM_SPARC, 0, 0); | ||||
|         if (kernel_size < 0) { | ||||
|             fprintf(stderr, "qemu: could not load kernel '%s'\n", | ||||
|                     kernel_filename); | ||||
|   | ||||
| @@ -279,7 +279,7 @@ static unsigned long sun4m_load_kernel(const char *kernel_filename, | ||||
|         bswap_needed = 0; | ||||
| #endif | ||||
|         kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, | ||||
|                                NULL, NULL, NULL, 1, EM_SPARC, 0); | ||||
|                                NULL, NULL, NULL, 1, EM_SPARC, 0, 0); | ||||
|         if (kernel_size < 0) | ||||
|             kernel_size = load_aout(kernel_filename, KERNEL_LOAD_ADDR, | ||||
|                                     RAM_size - KERNEL_LOAD_ADDR, bswap_needed, | ||||
| @@ -723,7 +723,7 @@ static void prom_init(hwaddr addr, const char *bios_name) | ||||
|     filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); | ||||
|     if (filename) { | ||||
|         ret = load_elf(filename, translate_prom_address, &addr, NULL, | ||||
|                        NULL, NULL, 1, EM_SPARC, 0); | ||||
|                        NULL, NULL, 1, EM_SPARC, 0, 0); | ||||
|         if (ret < 0 || ret > PROM_SIZE_MAX) { | ||||
|             ret = load_image_targphys(filename, addr, PROM_SIZE_MAX); | ||||
|         } | ||||
|   | ||||
| @@ -187,7 +187,7 @@ static uint64_t sun4u_load_kernel(const char *kernel_filename, | ||||
|         bswap_needed = 0; | ||||
| #endif | ||||
|         kernel_size = load_elf(kernel_filename, NULL, NULL, kernel_entry, | ||||
|                                kernel_addr, &kernel_top, 1, EM_SPARCV9, 0); | ||||
|                                kernel_addr, &kernel_top, 1, EM_SPARCV9, 0, 0); | ||||
|         if (kernel_size < 0) { | ||||
|             *kernel_addr = KERNEL_LOAD_ADDR; | ||||
|             *kernel_entry = KERNEL_LOAD_ADDR; | ||||
| @@ -633,7 +633,7 @@ static void prom_init(hwaddr addr, const char *bios_name) | ||||
|     filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); | ||||
|     if (filename) { | ||||
|         ret = load_elf(filename, translate_prom_address, &addr, | ||||
|                        NULL, NULL, NULL, 1, EM_SPARCV9, 0); | ||||
|                        NULL, NULL, NULL, 1, EM_SPARCV9, 0, 0); | ||||
|         if (ret < 0 || ret > PROM_SIZE_MAX) { | ||||
|             ret = load_image_targphys(filename, addr, PROM_SIZE_MAX); | ||||
|         } | ||||
|   | ||||
| @@ -45,7 +45,7 @@ static void tricore_load_kernel(CPUTriCoreState *env) | ||||
|     kernel_size = load_elf(tricoretb_binfo.kernel_filename, NULL, | ||||
|                            NULL, (uint64_t *)&entry, NULL, | ||||
|                            NULL, 0, | ||||
|                            EM_TRICORE, 1); | ||||
|                            EM_TRICORE, 1, 0); | ||||
|     if (kernel_size <= 0) { | ||||
|         error_report("qemu: no kernel file '%s'", | ||||
|                 tricoretb_binfo.kernel_filename); | ||||
|   | ||||
| @@ -447,7 +447,7 @@ static USBPacket *usbredir_find_packet_by_id(USBRedirDevice *dev, | ||||
|     return p; | ||||
| } | ||||
|  | ||||
| static void bufp_alloc(USBRedirDevice *dev, uint8_t *data, uint16_t len, | ||||
| static int bufp_alloc(USBRedirDevice *dev, uint8_t *data, uint16_t len, | ||||
|     uint8_t status, uint8_t ep, void *free_on_destroy) | ||||
| { | ||||
|     struct buf_packet *bufp; | ||||
| @@ -464,7 +464,7 @@ static void bufp_alloc(USBRedirDevice *dev, uint8_t *data, uint16_t len, | ||||
|         if (dev->endpoint[EP2I(ep)].bufpq_size > | ||||
|                 dev->endpoint[EP2I(ep)].bufpq_target_size) { | ||||
|             free(data); | ||||
|             return; | ||||
|             return -1; | ||||
|         } | ||||
|         dev->endpoint[EP2I(ep)].bufpq_dropping_packets = 0; | ||||
|     } | ||||
| @@ -477,6 +477,7 @@ static void bufp_alloc(USBRedirDevice *dev, uint8_t *data, uint16_t len, | ||||
|     bufp->free_on_destroy = free_on_destroy; | ||||
|     QTAILQ_INSERT_TAIL(&dev->endpoint[EP2I(ep)].bufpq, bufp, next); | ||||
|     dev->endpoint[EP2I(ep)].bufpq_size++; | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static void bufp_free(USBRedirDevice *dev, struct buf_packet *bufp, | ||||
| @@ -2082,13 +2083,17 @@ static void usbredir_buffered_bulk_packet(void *priv, uint64_t id, | ||||
|     status = usb_redir_success; | ||||
|     free_on_destroy = NULL; | ||||
|     for (i = 0; i < data_len; i += len) { | ||||
|         int r; | ||||
|         if (len >= (data_len - i)) { | ||||
|             len = data_len - i; | ||||
|             status = buffered_bulk_packet->status; | ||||
|             free_on_destroy = data; | ||||
|         } | ||||
|         /* bufp_alloc also adds the packet to the ep queue */ | ||||
|         bufp_alloc(dev, data + i, len, status, ep, free_on_destroy); | ||||
|         r = bufp_alloc(dev, data + i, len, status, ep, free_on_destroy); | ||||
|         if (r) { | ||||
|             break; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     if (dev->endpoint[EP2I(ep)].pending_async_packet) { | ||||
|   | ||||
| @@ -69,6 +69,13 @@ static void chr_read(void *opaque, const void *buf, size_t size) | ||||
|         g_free(elem); | ||||
|     } | ||||
|     virtio_notify(vdev, vrng->vq); | ||||
|  | ||||
|     if (!virtio_queue_empty(vrng->vq)) { | ||||
|         /* If we didn't drain the queue, call virtio_rng_process | ||||
|          * to take care of asking for more data as appropriate. | ||||
|          */ | ||||
|         virtio_rng_process(vrng); | ||||
|     } | ||||
| } | ||||
|  | ||||
| static void virtio_rng_process(VirtIORNG *vrng) | ||||
|   | ||||
| @@ -51,15 +51,19 @@ static void diag288_reset(void *opaque) | ||||
| static void diag288_timer_expired(void *dev) | ||||
| { | ||||
|     qemu_log_mask(CPU_LOG_RESET, "Watchdog timer expired.\n"); | ||||
|     watchdog_perform_action(); | ||||
|     /* Reset the watchdog only if the guest was notified about expiry. */ | ||||
|     /* Reset the watchdog only if the guest gets notified about | ||||
|      * expiry. watchdog_perform_action() may temporarily relinquish | ||||
|      * the BQL; reset before triggering the action to avoid races with | ||||
|      * diag288 instructions. */ | ||||
|     switch (get_watchdog_action()) { | ||||
|     case WDT_DEBUG: | ||||
|     case WDT_NONE: | ||||
|     case WDT_PAUSE: | ||||
|          return; | ||||
|         break; | ||||
|     default: | ||||
|         wdt_diag288_reset(dev); | ||||
|     } | ||||
|     wdt_diag288_reset(dev); | ||||
|     watchdog_perform_action(); | ||||
| } | ||||
|  | ||||
| static int wdt_diag288_handle_timer(DIAG288State *diag288, | ||||
|   | ||||
| @@ -94,10 +94,10 @@ static void xtensa_sim_init(MachineState *machine) | ||||
|         uint64_t elf_lowaddr; | ||||
| #ifdef TARGET_WORDS_BIGENDIAN | ||||
|         int success = load_elf(kernel_filename, translate_phys_addr, cpu, | ||||
|                 &elf_entry, &elf_lowaddr, NULL, 1, EM_XTENSA, 0); | ||||
|                 &elf_entry, &elf_lowaddr, NULL, 1, EM_XTENSA, 0, 0); | ||||
| #else | ||||
|         int success = load_elf(kernel_filename, translate_phys_addr, cpu, | ||||
|                 &elf_entry, &elf_lowaddr, NULL, 0, EM_XTENSA, 0); | ||||
|                 &elf_entry, &elf_lowaddr, NULL, 0, EM_XTENSA, 0, 0); | ||||
| #endif | ||||
|         if (success > 0) { | ||||
|             env->pc = elf_entry; | ||||
|   | ||||
| @@ -355,7 +355,7 @@ static void lx_init(const LxBoardDesc *board, MachineState *machine) | ||||
|         uint64_t elf_entry; | ||||
|         uint64_t elf_lowaddr; | ||||
|         int success = load_elf(kernel_filename, translate_phys_addr, cpu, | ||||
|                 &elf_entry, &elf_lowaddr, NULL, be, EM_XTENSA, 0); | ||||
|                 &elf_entry, &elf_lowaddr, NULL, be, EM_XTENSA, 0, 0); | ||||
|         if (success > 0) { | ||||
|             entry_point = elf_entry; | ||||
|         } else { | ||||
|   | ||||
| @@ -16,6 +16,13 @@ | ||||
| #include "qemu/notify.h" | ||||
| #include "cpu.h" | ||||
|  | ||||
| typedef enum { | ||||
|     ARM_ENDIANNESS_UNKNOWN = 0, | ||||
|     ARM_ENDIANNESS_LE, | ||||
|     ARM_ENDIANNESS_BE8, | ||||
|     ARM_ENDIANNESS_BE32, | ||||
| } arm_endianness; | ||||
|  | ||||
| /* armv7m.c */ | ||||
| DeviceState *armv7m_init(MemoryRegion *system_memory, int mem_size, int num_irq, | ||||
|                       const char *kernel_filename, const char *cpu_model); | ||||
| @@ -103,6 +110,8 @@ struct arm_boot_info { | ||||
|      * changing to non-secure state if implementing a non-secure boot | ||||
|      */ | ||||
|     bool secure_board_setup; | ||||
|  | ||||
|     arm_endianness endianness; | ||||
| }; | ||||
|  | ||||
| /** | ||||
|   | ||||
| @@ -61,6 +61,7 @@ enum { | ||||
|     VIRT_PCIE_MMIO_HIGH, | ||||
|     VIRT_GPIO, | ||||
|     VIRT_SECURE_UART, | ||||
|     VIRT_SECURE_MEM, | ||||
| }; | ||||
|  | ||||
| typedef struct MemMapEntry { | ||||
|   | ||||
| @@ -127,6 +127,7 @@ struct MachineState { | ||||
|     char *firmware; | ||||
|     bool iommu; | ||||
|     bool suppress_vmdesc; | ||||
|     bool enforce_config_section; | ||||
|  | ||||
|     ram_addr_t ram_size; | ||||
|     ram_addr_t maxram_size; | ||||
|   | ||||
| @@ -42,6 +42,14 @@ | ||||
|         .driver   = "virtio-pci",\ | ||||
|         .property = "migrate-extra",\ | ||||
|         .value    = "off",\ | ||||
|     },{\ | ||||
|         .driver   = "fw_cfg_mem",\ | ||||
|         .property = "dma_enabled",\ | ||||
|         .value    = "off",\ | ||||
|     },{\ | ||||
|         .driver   = "fw_cfg_io",\ | ||||
|         .property = "dma_enabled",\ | ||||
|         .value    = "off",\ | ||||
|     }, | ||||
|  | ||||
| #define HW_COMPAT_2_3 \ | ||||
|   | ||||
| @@ -263,7 +263,7 @@ static int glue(load_elf, SZ)(const char *name, int fd, | ||||
|                               void *translate_opaque, | ||||
|                               int must_swab, uint64_t *pentry, | ||||
|                               uint64_t *lowaddr, uint64_t *highaddr, | ||||
|                               int elf_machine, int clear_lsb) | ||||
|                               int elf_machine, int clear_lsb, int data_swab) | ||||
| { | ||||
|     struct elfhdr ehdr; | ||||
|     struct elf_phdr *phdr = NULL, *ph; | ||||
| @@ -366,6 +366,26 @@ static int glue(load_elf, SZ)(const char *name, int fd, | ||||
|                 addr = ph->p_paddr; | ||||
|             } | ||||
|  | ||||
|             if (data_swab) { | ||||
|                 int j; | ||||
|                 for (j = 0; j < file_size; j += (1 << data_swab)) { | ||||
|                     uint8_t *dp = data + j; | ||||
|                     switch (data_swab) { | ||||
|                     case (1): | ||||
|                         *(uint16_t *)dp = bswap16(*(uint16_t *)dp); | ||||
|                         break; | ||||
|                     case (2): | ||||
|                         *(uint32_t *)dp = bswap32(*(uint32_t *)dp); | ||||
|                         break; | ||||
|                     case (3): | ||||
|                         *(uint64_t *)dp = bswap64(*(uint64_t *)dp); | ||||
|                         break; | ||||
|                     default: | ||||
|                         g_assert_not_reached(); | ||||
|                     } | ||||
|                 } | ||||
|             } | ||||
|  | ||||
|             /* the entry pointer in the ELF header is a virtual | ||||
|              * address, if the text segments paddr and vaddr differ | ||||
|              * we need to adjust the entry */ | ||||
|   | ||||
| @@ -16,6 +16,18 @@ int load_image(const char *filename, uint8_t *addr); /* deprecated */ | ||||
| ssize_t load_image_size(const char *filename, void *addr, size_t size); | ||||
| int load_image_targphys(const char *filename, hwaddr, | ||||
|                         uint64_t max_sz); | ||||
| /** | ||||
|  * load_image_mr: load an image into a memory region | ||||
|  * @filename: Path to the image file | ||||
|  * @mr: Memory Region to load into | ||||
|  * | ||||
|  * Load the specified file into the memory region. | ||||
|  * The file loaded is registered as a ROM, so its contents will be | ||||
|  * reinstated whenever the system is reset. | ||||
|  * If the file is larger than the memory region's size the call will fail. | ||||
|  * Returns -1 on failure, or the size of the file. | ||||
|  */ | ||||
| int load_image_mr(const char *filename, MemoryRegion *mr); | ||||
|  | ||||
| /* This is the limit on the maximum uncompressed image size that | ||||
|  * load_image_gzipped_buffer() and load_image_gzipped() will read. It prevents | ||||
| @@ -32,10 +44,49 @@ int load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz); | ||||
| #define ELF_LOAD_WRONG_ARCH   -3 | ||||
| #define ELF_LOAD_WRONG_ENDIAN -4 | ||||
| const char *load_elf_strerror(int error); | ||||
|  | ||||
| /** load_elf: | ||||
|  * @filename: Path of ELF file | ||||
|  * @translate_fn: optional function to translate load addresses | ||||
|  * @translate_opaque: opaque data passed to @translate_fn | ||||
|  * @pentry: Populated with program entry point. Ignored if NULL. | ||||
|  * @lowaddr: Populated with lowest loaded address. Ignored if NULL. | ||||
|  * @highaddr: Populated with highest loaded address. Ignored if NULL. | ||||
|  * @bigendian: Expected ELF endianness. 0 for LE otherwise BE | ||||
|  * @elf_machine: Expected ELF machine type | ||||
|  * @clear_lsb: Set to mask off LSB of addresses (Some architectures use | ||||
|  *             this for non-address data) | ||||
|  * @data_swab: Set to order of byte swapping for data. 0 for no swap, 1 | ||||
|  *             for swapping bytes within halfwords, 2 for bytes within | ||||
|  *             words and 3 for within doublewords. | ||||
|  * | ||||
|  * Load an ELF file's contents to the emulated system's address space. | ||||
|  * Clients may optionally specify a callback to perform address | ||||
|  * translations. @pentry, @lowaddr and @highaddr are optional pointers | ||||
|  * which will be populated with various load information. @bigendian and | ||||
|  * @elf_machine give the expected endianness and machine for the ELF the | ||||
|  * load will fail if the target ELF does not match. Some architectures | ||||
|  * have some architecture-specific behaviours that come into effect when | ||||
|  * their particular values for @elf_machine are set. | ||||
|  */ | ||||
|  | ||||
| int load_elf(const char *filename, uint64_t (*translate_fn)(void *, uint64_t), | ||||
|              void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, | ||||
|              uint64_t *highaddr, int big_endian, int elf_machine, | ||||
|              int clear_lsb); | ||||
|              int clear_lsb, int data_swab); | ||||
|  | ||||
| /** load_elf_hdr: | ||||
|  * @filename: Path of ELF file | ||||
|  * @hdr: Buffer to populate with header data. Header data will not be | ||||
|  * filled if set to NULL. | ||||
|  * @is64: Set to true if the ELF is 64bit. Ignored if set to NULL | ||||
|  * @errp: Populated with an error in failure cases | ||||
|  * | ||||
|  * Inspect an ELF file's header. Read its full header contents into a | ||||
|  * buffer and/or determine if the ELF is 64bit. | ||||
|  */ | ||||
| void load_elf_hdr(const char *filename, void *hdr, bool *is64, Error **errp); | ||||
|  | ||||
| int load_aout(const char *filename, hwaddr addr, int max_sz, | ||||
|               int bswap_needed, hwaddr target_page_size); | ||||
| int load_uimage(const char *filename, hwaddr *ep, | ||||
| @@ -67,7 +118,7 @@ extern bool rom_file_has_mr; | ||||
|  | ||||
| int rom_add_file(const char *file, const char *fw_dir, | ||||
|                  hwaddr addr, int32_t bootindex, | ||||
|                  bool option_rom); | ||||
|                  bool option_rom, MemoryRegion *mr); | ||||
| MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len, | ||||
|                            size_t max_len, hwaddr addr, | ||||
|                            const char *fw_file_name, | ||||
| @@ -82,9 +133,11 @@ void *rom_ptr(hwaddr addr); | ||||
| void hmp_info_roms(Monitor *mon, const QDict *qdict); | ||||
|  | ||||
| #define rom_add_file_fixed(_f, _a, _i)          \ | ||||
|     rom_add_file(_f, NULL, _a, _i, false) | ||||
|     rom_add_file(_f, NULL, _a, _i, false, NULL) | ||||
| #define rom_add_blob_fixed(_f, _b, _l, _a)      \ | ||||
|     rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL) | ||||
| #define rom_add_file_mr(_f, _mr, _i)            \ | ||||
|     rom_add_file(_f, NULL, 0, _i, false, mr) | ||||
|  | ||||
| #define PC_ROM_MIN_VGA     0xc0000 | ||||
| #define PC_ROM_MIN_OPTION  0xc8000 | ||||
|   | ||||
| @@ -161,8 +161,9 @@ struct ICSIRQState { | ||||
|  | ||||
| qemu_irq xics_get_qirq(XICSState *icp, int irq); | ||||
| void xics_set_irq_type(XICSState *icp, int irq, bool lsi); | ||||
| int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi); | ||||
| int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align); | ||||
| int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp); | ||||
| int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align, | ||||
|                      Error **errp); | ||||
| void xics_free(XICSState *icp, int irq, int num); | ||||
|  | ||||
| void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu); | ||||
|   | ||||
| @@ -18,6 +18,7 @@ typedef struct BusState BusState; | ||||
| typedef struct CharDriverState CharDriverState; | ||||
| typedef struct CompatProperty CompatProperty; | ||||
| typedef struct CPUAddressSpace CPUAddressSpace; | ||||
| typedef struct CPUState CPUState; | ||||
| typedef struct DeviceListener DeviceListener; | ||||
| typedef struct DeviceState DeviceState; | ||||
| typedef struct DisplayChangeListener DisplayChangeListener; | ||||
|   | ||||
| @@ -62,7 +62,6 @@ typedef uint64_t vaddr; | ||||
| #define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU) | ||||
| #define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU) | ||||
|  | ||||
| typedef struct CPUState CPUState; | ||||
| typedef struct CPUWatchpoint CPUWatchpoint; | ||||
|  | ||||
| typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr, | ||||
|   | ||||
| @@ -24,6 +24,7 @@ | ||||
| #define RNG_BACKEND_CLASS(klass) \ | ||||
|     OBJECT_CLASS_CHECK(RngBackendClass, (klass), TYPE_RNG_BACKEND) | ||||
|  | ||||
| typedef struct RngRequest RngRequest; | ||||
| typedef struct RngBackendClass RngBackendClass; | ||||
| typedef struct RngBackend RngBackend; | ||||
|  | ||||
| @@ -31,13 +32,20 @@ typedef void (EntropyReceiveFunc)(void *opaque, | ||||
|                                   const void *data, | ||||
|                                   size_t size); | ||||
|  | ||||
| struct RngRequest | ||||
| { | ||||
|     EntropyReceiveFunc *receive_entropy; | ||||
|     uint8_t *data; | ||||
|     void *opaque; | ||||
|     size_t offset; | ||||
|     size_t size; | ||||
| }; | ||||
|  | ||||
| struct RngBackendClass | ||||
| { | ||||
|     ObjectClass parent_class; | ||||
|  | ||||
|     void (*request_entropy)(RngBackend *s, size_t size, | ||||
|                             EntropyReceiveFunc *receive_entropy, void *opaque); | ||||
|     void (*cancel_requests)(RngBackend *s); | ||||
|     void (*request_entropy)(RngBackend *s, RngRequest *req); | ||||
|  | ||||
|     void (*opened)(RngBackend *s, Error **errp); | ||||
| }; | ||||
| @@ -48,8 +56,10 @@ struct RngBackend | ||||
|  | ||||
|     /*< protected >*/ | ||||
|     bool opened; | ||||
|     GSList *requests; | ||||
| }; | ||||
|  | ||||
|  | ||||
| /** | ||||
|  * rng_backend_request_entropy: | ||||
|  * @s: the backend to request entropy from | ||||
| @@ -70,12 +80,13 @@ void rng_backend_request_entropy(RngBackend *s, size_t size, | ||||
|                                  void *opaque); | ||||
|  | ||||
| /** | ||||
|  * rng_backend_cancel_requests: | ||||
|  * @s: the backend to cancel all pending requests in | ||||
|  * rng_backend_free_request: | ||||
|  * @s: the backend that created the request | ||||
|  * @req: the request to finalize | ||||
|  * | ||||
|  * Cancels all pending requests submitted by @rng_backend_request_entropy.  This | ||||
|  * should be used by a device during reset or in preparation for live migration | ||||
|  * to stop tracking any request. | ||||
|  * Used by child rng backend classes to finalize requests once they've been | ||||
|  * processed. The request is removed from the list of active requests and | ||||
|  * deleted. | ||||
|  */ | ||||
| void rng_backend_cancel_requests(RngBackend *s); | ||||
| void rng_backend_finalize_request(RngBackend *s, RngRequest *req); | ||||
| #endif | ||||
|   | ||||
| @@ -378,6 +378,8 @@ void graphic_hw_gl_block(QemuConsole *con, bool block); | ||||
|  | ||||
| QemuConsole *qemu_console_lookup_by_index(unsigned int index); | ||||
| QemuConsole *qemu_console_lookup_by_device(DeviceState *dev, uint32_t head); | ||||
| QemuConsole *qemu_console_lookup_by_device_name(const char *device_id, | ||||
|                                                 uint32_t head, Error **errp); | ||||
| bool qemu_console_is_visible(QemuConsole *con); | ||||
| bool qemu_console_is_graphic(QemuConsole *con); | ||||
| bool qemu_console_is_fixedsize(QemuConsole *con); | ||||
|   | ||||
| @@ -65,4 +65,6 @@ void qemu_input_check_mode_change(void); | ||||
| void qemu_add_mouse_mode_change_notifier(Notifier *notify); | ||||
| void qemu_remove_mouse_mode_change_notifier(Notifier *notify); | ||||
|  | ||||
| int input_linux_init(void *opaque, QemuOpts *opts, Error **errp); | ||||
|  | ||||
| #endif /* INPUT_H */ | ||||
|   | ||||
| @@ -416,6 +416,8 @@ | ||||
| #define __NR_execveat			(__NR_SYSCALL_BASE+387) | ||||
| #define __NR_userfaultfd		(__NR_SYSCALL_BASE+388) | ||||
| #define __NR_membarrier			(__NR_SYSCALL_BASE+389) | ||||
| #define __NR_mlock2			(__NR_SYSCALL_BASE+390) | ||||
| #define __NR_copy_file_range		(__NR_SYSCALL_BASE+391) | ||||
|  | ||||
| /* | ||||
|  * The following SWIs are ARM private. | ||||
|   | ||||
| @@ -388,18 +388,7 @@ | ||||
| #define __NR_switch_endian	363 | ||||
| #define __NR_userfaultfd	364 | ||||
| #define __NR_membarrier		365 | ||||
| #define __NR_semop		366 | ||||
| #define __NR_semget		367 | ||||
| #define __NR_semctl		368 | ||||
| #define __NR_semtimedop		369 | ||||
| #define __NR_msgsnd		370 | ||||
| #define __NR_msgrcv		371 | ||||
| #define __NR_msgget		372 | ||||
| #define __NR_msgctl		373 | ||||
| #define __NR_shmat		374 | ||||
| #define __NR_shmdt		375 | ||||
| #define __NR_shmget		376 | ||||
| #define __NR_shmctl		377 | ||||
| #define __NR_mlock2		378 | ||||
| #define __NR_copy_file_range	379 | ||||
|  | ||||
| #endif /* _ASM_POWERPC_UNISTD_H_ */ | ||||
|   | ||||
| @@ -153,6 +153,8 @@ struct kvm_guest_debug_arch { | ||||
| #define KVM_SYNC_ARCH0  (1UL << 4) | ||||
| #define KVM_SYNC_PFAULT (1UL << 5) | ||||
| #define KVM_SYNC_VRS    (1UL << 6) | ||||
| #define KVM_SYNC_RICCB  (1UL << 7) | ||||
| #define KVM_SYNC_FPRS   (1UL << 8) | ||||
| /* definition of registers in kvm_run */ | ||||
| struct kvm_sync_regs { | ||||
| 	__u64 prefix;	/* prefix register */ | ||||
| @@ -167,9 +169,14 @@ struct kvm_sync_regs { | ||||
| 	__u64 pft;	/* pfault token [PFAULT] */ | ||||
| 	__u64 pfs;	/* pfault select [PFAULT] */ | ||||
| 	__u64 pfc;	/* pfault compare [PFAULT] */ | ||||
| 	__u64 vrs[32][2];	/* vector registers */ | ||||
| 	union { | ||||
| 		__u64 vrs[32][2];	/* vector registers (KVM_SYNC_VRS) */ | ||||
| 		__u64 fprs[16];		/* fp registers (KVM_SYNC_FPRS) */ | ||||
| 	}; | ||||
| 	__u8  reserved[512];	/* for future vector expansion */ | ||||
| 	__u32 fpc;	/* only valid with vector registers */ | ||||
| 	__u32 fpc;		/* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ | ||||
| 	__u8 padding[52];	/* riccb needs to be 64byte aligned */ | ||||
| 	__u8 riccb[64];		/* runtime instrumentation controls block */ | ||||
| }; | ||||
|  | ||||
| #define KVM_REG_S390_TODPR	(KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) | ||||
|   | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user