From f196fa2c00fccd497260531f2ed3569150b04cc0199ddbbe13bc8b9a6ed5b9f9 Mon Sep 17 00:00:00 2001 From: Charles Arnold Date: Fri, 29 Jan 2010 20:39:04 +0000 Subject: [PATCH 1/6] - bnc#573376 - OS reboot while create DomU with Windows CD - bnc#573881 - /usr/lib64/xen/bin/qemu-dm is a broken link - Update to changeset 20840 RC1+ for sle11-sp1 beta3. - bnc#569581 - SuSEfirewall2 should handle rules. Disable handle_iptable in vif-bridge script vif-bridge-no-iptables.patch - bnc#569577 - /etc/modprove.d/xen_pvdrivers, installed by xen-kmp-default, to ../xen_pvdrivers.conf - bnc#536176 - Xen panic when using iommu after updating hypervisor 19380-vtd-feature-check.patch - bnc#530959 - virsh autostart doesn't work Fixing this libvirt bug also required fixing xend's op_pincpu method with upstream c/s 19580 19580-xend-pincpu.patch - bnc#534146 - Xen: Fix SRAT check for discontig memory 20120-x86-srat-check-discontig.patch - bnc#491081 - Xen time goes backwards x3950M2 - disable module build for ec2 correctly to fix build (at the suse_kernel_module_package macro) runs - Upstream bugfixes from Jan. 19896-32on64-arg-xlat.patch 19960-show-page-walk.patch 19945-pae-xen-l2-entries.patch 19953-x86-fsgs-base.patch 19931-gnttblop-preempt.patch 19885-kexec-gdt-switch.patch 19894-shadow-resync-fastpath-race.patch - hvperv shim patches no longer require being applied conditionally - bnc#520234 - npiv does not work with XEN in SLE11 Update block-npiv - bnc#496033 - Support for creating NPIV ports without starting vm block-npiv-common.sh block-npiv-vport Update block-npiv - bnc#500043 - Fix access to NPIV disk from HVM vm Update xen-qemu-iscsi-fix.patch - Don't build the KMPs for the ec2 kernel. - Upstream fixes from Jan Beulich 19606-hvm-x2apic-cpuid.patch 19734-vtd-gcmd-submit.patch 19752-vtd-srtp-sirtp-flush.patch 19753-vtd-reg-write-lock.patch 19764-hvm-domain-lock-leak.patch 19765-hvm-post-restore-vcpu-state.patch 19767-hvm-port80-inhibit.patch 19768-x86-dom0-stack-dump.patch 19770-x86-amd-s3-resume.patch 19801-x86-p2m-2mb-hap-only.patch 19815-vtd-kill-correct-timer.patch - Patch from Jan Beulich to aid in debugging bnc#509911 gnttblop-preempt.patch - bnc#515220 - qemu-img-xen snapshot Segmentation fault qemu-img-snapshot.patch update - Upstream fixes from Jan Beulich. 19474-32on64-S3.patch 19490-log-dirty.patch 19492-sched-timer-non-idle.patch 19493-hvm-io-intercept-count.patch 19505-x86_64-clear-cr1.patch 19519-domctl-deadlock.patch 19523-32on64-restore-p2m.patch 19555-ept-live-migration.patch 19557-amd-iommu-ioapic-remap.patch 19560-x86-flush-tlb-empty-mask.patch 19571-x86-numa-shift.patch 19578-hvm-load-ldt-first.patch 19592-vmx-exit-reason-perfc-size.patch 19595-hvm-set-callback-irq-level.patch 19597-x86-ioport-quirks-BL2xx.patch 19602-vtd-multi-ioapic-remap.patch 19631-x86-frametable-map.patch 19653-hvm-vcpuid-range-checks.patch - bnc#382112 - Caps lock not being passed to vm correctly. capslock_enable.patch - bnc#506833 - Use pidof in xend and xendomains init scripts - bnc#484778 - XEN: PXE boot of FV domU using non-Realtek NIC fails enable_more_nic_pxe.patch cross-migrate.patch - bnc#390961 - cross-migration of a VM causes it to become unresponsive (remains paused after migration) - Patches taken to fix the xenctx tool. The fixed version of this tool is needed to debug bnc#502735. 18962-xc_translate_foreign_address.patch 18963-xenctx.patch 19168-hvm-domctl.patch 19169-remove-declare-bitmap.patch 19170-libxc.patch 19171-xenctx.patch 19450-xc_translate_foreign_address.patch -bnc#503782 - Using converted vmdk image does not work ioemu-tapdisk-compat-QEMU_IMG.patch - bnc#474738 - adding CD drive to VM guest makes it unbootable. parse_boot_disk.patch - bnc#495300 - L3: Xen unable to PXE boot Windows based DomU's 18545-hvm-gpxe-rom.patch, 18548-hvm-gpxe-rom.patch - bnc#459836 - Fix rtc_timeoffset when localtime=0 xend-timeoffset.patch - bnc#497440 - xmclone.sh script incorrectly handles networking for SLE11. - bnc#477890 - VM becomes unresponsive after applying snapshot - bnc#494892 - Update xend-domain-lock.patch to flock the lock file. - bnc#439639 - SVVP Test 273 System - Sleep Stress With IO" fails Turned off s3/s4 sleep states for HVM guests. - bnc#468169 - fix domUloader to umount the mounted device mapper target in dom0 when install a sles10 guest with disk = /dev/disk/by_path - bnc#488490 - domUloader can't handle block device names with ':' - bnc#486244 - vms fail to start after reboot when using qcow2 - bnc#490835 - VTd errata on Cantiga chipset 19230-vtd-mobile-series4-chipset.patch - bnc#482515 - Missing dependency in xen.spec - Additional upstream bug fix patches from Jan Beulich. 19132-page-list-mfn-links.patch 19134-fold-shadow-page-info.patch 19135-next-shadow-mfn.patch 19136-page-info-rearrange.patch 19156-page-list-simplify.patch 19161-pv-ldt-handling.patch 19162-page-info-no-cpumask.patch 19216-msix-fixmap.patch 19268-page-get-owner.patch 19293-vcpu-migration-delay.patch 19391-vpmu-double-free.patch 19415-vtd-dom0-s3.patch - Imported numerous upstream bug fix patches. 19083-memory-is-conventional-fix.patch 19097-M2P-table-1G-page-mappings.patch 19137-lock-domain-page-list.patch 19140-init-heap-pages-max-order.patch 19167-recover-pat-value-s3-resume.patch 19172-irq-to-vector.patch 19173-pci-passthrough-fix.patch 19176-free-irq-shutdown-fix.patch 19190-pciif-typo-fix.patch 19204-allow-old-images-restore.patch 19232-xend-exception-fix.patch 19239-ioapic-s3-suspend-fix.patch 19240-ioapic-s3-suspend-fix.patch 19242-xenstored-use-after-free-fix.patch 19259-ignore-shutdown-deferrals.patch 19266-19365-event-channel-access-fix.patch 19275-19296-schedular-deadlock-fixes.patch 19276-cpu-selection-allocation-fix.patch 19302-passthrough-pt-irq-time-out.patch 19313-hvmemul-read-msr-fix.patch 19317-vram-tracking-fix.patch 19335-apic-s3-resume-error-fix.patch 19353-amd-migration-fix.patch 19354-amd-migration-fix.patch 19371-in-sync-L1s-writable.patch 19372-2-on-3-shadow-mode-fix.patch 19377-xend-vnclisten.patch 19400-ensure-ltr-execute.patch 19410-virt-to-maddr-fix.patch - bnc#483565 - Fix block-iscsi script. Updated block-iscsi and xen-domUloader.diff - bnc#465814 - Mouse stops responding when wheel is used in Windows VM. mouse-wheel-roll.patch (James Song) - bnc#470704 - save/restore of windows VM throws off the mouse tracking. usb-save-restore.patch (James Song) - bnc#436629 - Use global vnc-listen setting specified in xend configuration file. xend-vnclisten.patch - bnc#482623 - Fix pygrub to append user-supplied 'extra' args to kernel args. 19234_pygrub.patch - bnc#481161 upgrade - sles10sp2 to sles11 upgrade keeps xen-tools-ioemu OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=28 --- 32on64-extra-mem.patch | 2 +- bdrv_open2_flags_2.patch | 26 +-- blktap.patch | 20 +- cdrom-removable.patch | 52 ++--- disable_emulated_device.diff | 2 +- hv_tools.patch | 25 +-- hv_xen_extension.patch | 40 ++-- kmp_filelist | 2 +- magic_ioport_compat.patch | 8 +- pvdrv-import-shared-info.patch | 24 ++- qemu-console-retry.patch | 25 +++ snapshot-ioemu-delete.patch | 20 +- snapshot-ioemu-save.patch | 38 ++-- snapshot-xend.patch | 16 +- svm-lmsl.patch | 4 +- vif-bridge-no-iptables.patch | 13 ++ xen-4.0.0-testing-src.tar.bz2 | 4 +- xen-config.diff | 4 +- xen-domUloader.diff | 10 +- xen-hvm-default-bridge.diff | 4 +- xen-hvm-default-pae.diff | 2 +- xen-max-free-mem.diff | 4 +- xen-qemu-iscsi-fix.patch | 12 +- xen-warnings.diff | 4 +- xen.changes | 354 ++++++++++++++++++++++++++------- xen.spec | 37 ++-- xen_pvdrivers.conf | 7 + xenapi-console-protocol.patch | 2 +- xend-core-dump-loc.diff | 2 +- xend-domain-lock.patch | 8 +- 30 files changed, 502 insertions(+), 269 deletions(-) create mode 100644 qemu-console-retry.patch create mode 100644 vif-bridge-no-iptables.patch create mode 100644 xen_pvdrivers.conf diff --git a/32on64-extra-mem.patch b/32on64-extra-mem.patch index da8f123..43d6515 100644 --- a/32on64-extra-mem.patch +++ b/32on64-extra-mem.patch @@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py +++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -@@ -2863,7 +2863,7 @@ class XendDomainInfo: +@@ -2883,7 +2883,7 @@ class XendDomainInfo: self.guest_bitsize = self.image.getBitSize() # Make sure there's enough RAM available for the domain diff --git a/bdrv_open2_flags_2.patch b/bdrv_open2_flags_2.patch index 9f09ffd..ae9cf67 100644 --- a/bdrv_open2_flags_2.patch +++ b/bdrv_open2_flags_2.patch @@ -1,7 +1,5 @@ -Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c -+++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c +--- a/tools/ioemu-remote/hw/xen_blktap.c ++++ b/tools/ioemu-remote/hw/xen_blktap.c @@ -225,6 +225,7 @@ static int open_disk(struct td_state *s, BlockDriver* drv; char* devname; @@ -19,11 +17,9 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c fprintf(stderr, "Could not open image file %s\n", path); return -ENOMEM; } -Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c -+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -@@ -133,7 +133,8 @@ static void insert_media(void *opaque) +--- a/tools/ioemu-remote/xenstore.c ++++ b/tools/ioemu-remote/xenstore.c +@@ -134,7 +134,8 @@ static void insert_media(void *opaque) else format = &bdrv_raw; @@ -33,7 +29,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c #ifdef CONFIG_STUBDOM { char *buf, *backend, *params_path, *params; -@@ -397,9 +398,9 @@ void xenstore_parse_domain_config(int hv +@@ -398,9 +399,9 @@ void xenstore_parse_domain_config(int hv { char **e_danger = NULL; char *buf = NULL; @@ -45,7 +41,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c unsigned int len, num, hd_index, pci_devid = 0; BlockDriverState *bs; BlockDriver *format; -@@ -461,7 +462,8 @@ void xenstore_parse_domain_config(int hv +@@ -462,7 +463,8 @@ void xenstore_parse_domain_config(int hv } for (i = 0; i < num; i++) { @@ -55,8 +51,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c /* read the backend path */ xenstore_get_backend_path(&bpath, "vbd", danger_path, hvm_domid, e_danger[i]); if (bpath == NULL) -@@ -560,6 +562,17 @@ void xenstore_parse_domain_config(int hv - } +@@ -548,6 +550,17 @@ void xenstore_parse_domain_config(int hv + format = &bdrv_raw; } + /* read the mode of the device */ @@ -73,7 +69,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c #if 0 /* Phantom VBDs are disabled because the use of paths * from guest-controlled areas in xenstore is unsafe. -@@ -612,7 +625,7 @@ void xenstore_parse_domain_config(int hv +@@ -615,7 +628,7 @@ void xenstore_parse_domain_config(int hv #ifdef CONFIG_STUBDOM if (pasprintf(&danger_buf, "%s/device/vbd/%s", danger_path, e_danger[i]) == -1) continue; @@ -82,7 +78,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c pstrcpy(bs->filename, sizeof(bs->filename), params); } #else -@@ -641,7 +654,7 @@ void xenstore_parse_domain_config(int hv +@@ -644,7 +657,7 @@ void xenstore_parse_domain_config(int hv } } pstrcpy(bs->filename, sizeof(bs->filename), params); diff --git a/blktap.patch b/blktap.patch index 7be792a..d35adf6 100644 --- a/blktap.patch +++ b/blktap.patch @@ -1,11 +1,9 @@ bug #239173 bug #242953 -Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -=================================================================== ---- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py -+++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -@@ -3262,7 +3262,7 @@ class XendDomainInfo: +--- a/tools/python/xen/xend/XendDomainInfo.py ++++ b/tools/python/xen/xend/XendDomainInfo.py +@@ -3282,7 +3282,7 @@ class XendDomainInfo: (fn, BOOTLOADER_LOOPBACK_DEVICE)) vbd = { @@ -14,11 +12,9 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py 'device': BOOTLOADER_LOOPBACK_DEVICE, } -Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c -+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -@@ -396,9 +396,9 @@ void xenstore_parse_domain_config(int hv +--- a/tools/ioemu-remote/xenstore.c ++++ b/tools/ioemu-remote/xenstore.c +@@ -397,9 +397,9 @@ void xenstore_parse_domain_config(int hv { char **e_danger = NULL; char *buf = NULL; @@ -30,7 +26,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c unsigned int len, num, hd_index, pci_devid = 0; BlockDriverState *bs; BlockDriver *format; -@@ -438,6 +438,14 @@ void xenstore_parse_domain_config(int hv +@@ -439,6 +439,14 @@ void xenstore_parse_domain_config(int hv e_danger[i]); if (bpath == NULL) continue; @@ -45,7 +41,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c /* read the name of the device */ if (pasprintf(&buf, "%s/dev", bpath) == -1) continue; -@@ -712,6 +720,7 @@ void xenstore_parse_domain_config(int hv +@@ -715,6 +723,7 @@ void xenstore_parse_domain_config(int hv free(danger_type); free(params); free(dev); diff --git a/cdrom-removable.patch b/cdrom-removable.patch index 422d00e..1175f1c 100644 --- a/cdrom-removable.patch +++ b/cdrom-removable.patch @@ -1,7 +1,5 @@ -Index: xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py ++++ b/tools/python/xen/xend/server/HalDaemon.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python +# -*- mode: python; -*- @@ -246,10 +244,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/HalDaemon.py + print 'Falling off end' + + -Index: xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py ++++ b/tools/python/xen/xend/server/Hald.py @@ -0,0 +1,125 @@ +#============================================================================ +# This library is free software; you can redistribute it and/or @@ -376,10 +372,8 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/Hald.py + watcher.run() + time.sleep(10) + watcher.shutdown() -Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py -=================================================================== ---- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py -+++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py +--- a/tools/python/xen/xend/server/SrvServer.py ++++ b/tools/python/xen/xend/server/SrvServer.py @@ -56,6 +56,7 @@ from xen.web.SrvDir import SrvDir from SrvRoot import SrvRoot @@ -397,15 +391,22 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py def create(): root = SrvDir() -Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c -+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -@@ -513,6 +513,19 @@ void xenstore_parse_domain_config(int hv - params = newparams; - format = &bdrv_raw; - } -+ /* if cdrom pyhsical put a watch on media-present */ +--- a/tools/ioemu-remote/xenstore.c ++++ b/tools/ioemu-remote/xenstore.c +@@ -18,6 +18,7 @@ + #include "exec-all.h" + #include "sysemu.h" + ++#include "console.h" + #include "hw.h" + #include "pci.h" + #include "qemu-timer.h" +@@ -548,6 +549,21 @@ void xenstore_parse_domain_config(int hv + #endif + + bs = bdrv_new(dev); ++ ++ /* if cdrom physical put a watch on media-present */ + if (bdrv_get_type_hint(bs) == BDRV_TYPE_CDROM) { + if (drv && !strcmp(drv, "phy")) { + if (pasprintf(&buf, "%s/media-present", bpath) != -1) { @@ -418,14 +419,15 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c + } + } + } - - #if 0 - /* Phantom VBDs are disabled because the use of paths -@@ -938,6 +951,50 @@ void xenstore_record_dm_state(const char ++ + /* check if it is a cdrom */ + if (danger_type && !strcmp(danger_type, "cdrom")) { + bdrv_set_type_hint(bs, BDRV_TYPE_CDROM); +@@ -938,6 +954,50 @@ void xenstore_record_dm_state(const char xenstore_record_dm("state", state); } -+void xenstore_process_media_change_event(char **vec) ++static void xenstore_process_media_change_event(char **vec) +{ + char *media_present = NULL; + unsigned int len; @@ -472,7 +474,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c void xenstore_process_event(void *opaque) { char **vec, *offset, *bpath = NULL, *buf = NULL, *drv = NULL, *image = NULL; -@@ -968,6 +1025,11 @@ void xenstore_process_event(void *opaque +@@ -968,6 +1028,11 @@ void xenstore_process_event(void *opaque xenstore_watch_callbacks[i].cb(vec[XS_WATCH_TOKEN], xenstore_watch_callbacks[i].opaque); diff --git a/disable_emulated_device.diff b/disable_emulated_device.diff index 30ca259..1870b4c 100644 --- a/disable_emulated_device.diff +++ b/disable_emulated_device.diff @@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci. =================================================================== --- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c +++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c -@@ -363,6 +363,11 @@ static int __devinit platform_pci_init(s +@@ -401,6 +401,11 @@ static int __devinit platform_pci_init(s platform_mmio = mmio_addr; platform_mmiolen = mmio_len; diff --git a/hv_tools.patch b/hv_tools.patch index dd3d8c9..5ed7de0 100644 --- a/hv_tools.patch +++ b/hv_tools.patch @@ -2,28 +2,29 @@ Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c +++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c -@@ -914,15 +914,15 @@ static PyObject *pyxc_hvm_build(XcObject - int i; +@@ -914,16 +914,16 @@ static PyObject *pyxc_hvm_build(XcObject #endif + int i; char *image; - int memsize, target=-1, vcpus = 1, acpi = 0, apic = 1; + int memsize, target=-1, vcpus = 1, acpi = 0, apic = 1, extid = 0; - uint64_t vcpu_avail = 1; + PyObject *vcpu_avail_handle = NULL; + uint8_t vcpu_avail[HVM_MAX_VCPUS/8]; static char *kwd_list[] = { "domid", - "memsize", "image", "target", "vcpus", - "vcpu_avail", "acpi", "apic", NULL }; -- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|iilii", kwd_list, +- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|iiOii", kwd_list, + "memsize", "image", "target", "vcpus", + "vcpu_avail", "extid", "acpi", "apic", NULL }; -+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|iiliii", kwd_list, ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iis|iiOiii", kwd_list, &dom, &memsize, &image, &target, &vcpus, -- &vcpu_avail, &acpi, &apic) ) -+ &vcpu_avail, &extid, &acpi, &apic) ) +- &vcpu_avail_handle, &acpi, &apic) ) ++ &vcpu_avail_handle, &extid, &acpi, &apic) ) return NULL; - if ( target == -1 ) -@@ -950,6 +950,7 @@ static PyObject *pyxc_hvm_build(XcObject + memset(vcpu_avail, 0, sizeof(vcpu_avail)); +@@ -975,6 +975,7 @@ static PyObject *pyxc_hvm_build(XcObject va_hvm->checksum -= sum; munmap(va_map, XC_PAGE_SIZE); #endif @@ -47,7 +48,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/image.py +++ xen-4.0.0-testing/tools/python/xen/xend/image.py -@@ -841,6 +841,7 @@ class HVMImageHandler(ImageHandler): +@@ -839,6 +839,7 @@ class HVMImageHandler(ImageHandler): self.apic = int(vmConfig['platform'].get('apic', 0)) self.acpi = int(vmConfig['platform'].get('acpi', 0)) @@ -55,7 +56,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py self.guest_os_type = vmConfig['platform'].get('guest_os_type') self.memory_sharing = int(vmConfig['memory_sharing']) xc.dom_set_memshr(self.vm.getDomid(), self.memory_sharing) -@@ -966,6 +967,7 @@ class HVMImageHandler(ImageHandler): +@@ -964,6 +965,7 @@ class HVMImageHandler(ImageHandler): log.debug("target = %d", mem_mb) log.debug("vcpus = %d", self.vm.getVCpuCount()) log.debug("vcpu_avail = %li", self.vm.getVCpuAvail()) @@ -63,7 +64,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py log.debug("acpi = %d", self.acpi) log.debug("apic = %d", self.apic) -@@ -975,6 +977,7 @@ class HVMImageHandler(ImageHandler): +@@ -973,6 +975,7 @@ class HVMImageHandler(ImageHandler): target = mem_mb, vcpus = self.vm.getVCpuCount(), vcpu_avail = self.vm.getVCpuAvail(), diff --git a/hv_xen_extension.patch b/hv_xen_extension.patch index 248d69d..aae751a 100644 --- a/hv_xen_extension.patch +++ b/hv_xen_extension.patch @@ -1,8 +1,5 @@ -%patch -Index: xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm_extensions.h -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm_extensions.h ++++ b/xen/include/asm-x86/hvm/hvm_extensions.h @@ -0,0 +1,183 @@ +/**************************************************************************** + | @@ -187,17 +184,13 @@ Index: xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm_extensions.h +int hyperx_initialize(struct domain *d); + +#endif -Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/Makefile -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/Makefile ++++ b/xen/arch/x86/hvm/hyperv/Makefile @@ -0,0 +1,2 @@ +obj-y += hv_intercept.o +obj-y += hv_hypercall.o -Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_errno.h -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_errno.h ++++ b/xen/arch/x86/hvm/hyperv/hv_errno.h @@ -0,0 +1,62 @@ +/**************************************************************************** + | @@ -261,10 +254,8 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_errno.h +#define HV_STATUS_NO_MEMORY_256PAGES 0x0103 +#define HV_STATUS_NO_MEMORY_1024PAGES 0x0104 +#endif -Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c ++++ b/xen/arch/x86/hvm/hyperv/hv_hypercall.c @@ -0,0 +1,153 @@ +/**************************************************************************** + | @@ -419,10 +410,8 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.c + return; + } +} -Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h ++++ b/xen/arch/x86/hvm/hyperv/hv_hypercall.h @@ -0,0 +1,46 @@ +/**************************************************************************** + | @@ -470,10 +459,8 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_hypercall.h +#define HV_NOTIFY_LONG_SPIN_WAIT 0x0008 + +#endif /* HV_HYPERCALL_H */ -Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c ++++ b/xen/arch/x86/hvm/hyperv/hv_intercept.c @@ -0,0 +1,1009 @@ +/**************************************************************************** + | @@ -647,7 +634,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c + printk("Hyperv dom create failed\n"); + return (1); + } -+ for (i=0; i < MAX_VIRT_CPUS; i++) ++ for (i = 0; i < d->max_vcpus; i++) + { + if (d->vcpu[i] != NULL) + { @@ -723,7 +710,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c +static inline u32 +hv_get_max_vcpus_supported(void) +{ -+ return (MAX_VIRT_CPUS); ++ return HVM_MAX_VCPUS; +} + + @@ -929,7 +916,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c + ASSERT(curp != NULL); +#ifdef HV_STATS + printk("DUMP STATS\n"); -+ for (i=0; i < MAX_VIRT_CPUS; i++) ++ for (i = 0; i < d->max_vcpus; i++) + if (d->vcpu[i] != NULL) + hv_print_stats(curp, i); +#endif @@ -1484,11 +1471,9 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_intercept.c + hvm_inject_exception(TRAP_gp_fault, 0, 0); + return (1); +} -Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_shim.h -=================================================================== --- /dev/null -+++ xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_shim.h -@@ -0,0 +1,285 @@ ++++ b/xen/arch/x86/hvm/hyperv/hv_shim.h +@@ -0,0 +1,286 @@ +/**************************************************************************** + | + | Copyright (c) [2007, 2008] Novell, Inc. @@ -1528,6 +1513,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_shim.h +#include +#include +#include ++#include + +#include "hv_hypercall.h" + @@ -1704,7 +1690,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/hyperv/hv_shim.h + * Each VCPU here corresponds to the vcpu in the underlying hypervisor; + * they share the same ID. + */ -+ hv_vcpu_t vcpu_state[MAX_VIRT_CPUS]; ++ hv_vcpu_t vcpu_state[HVM_MAX_VCPUS]; +} hv_partition_t; + + diff --git a/kmp_filelist b/kmp_filelist index 3ad3337..150d88d 100644 --- a/kmp_filelist +++ b/kmp_filelist @@ -1,3 +1,3 @@ %defattr (-,root,root) /lib/modules/%2-%1 -/etc/modprobe.d/xen_pvdrivers +/etc/modprobe.d/xen_pvdrivers.conf diff --git a/magic_ioport_compat.patch b/magic_ioport_compat.patch index 69ca3be..179c6be 100644 --- a/magic_ioport_compat.patch +++ b/magic_ioport_compat.patch @@ -6,12 +6,12 @@ Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci. =================================================================== --- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c +++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c -@@ -289,7 +289,10 @@ static int check_platform_magic(struct d +@@ -320,7 +320,10 @@ static int check_platform_magic(struct d if (magic != XEN_IOPORT_MAGIC_VAL) { - dev_err(dev, "invalid magic %#x", magic); -- return -ENODEV; -+ /* + err = "unrecognised magic value"; +- goto no_dev; ++ /* + * Older backend; just return 0 to be compatible. + */ + return 0; diff --git a/pvdrv-import-shared-info.patch b/pvdrv-import-shared-info.patch index dfded56..b172f2c 100644 --- a/pvdrv-import-shared-info.patch +++ b/pvdrv-import-shared-info.patch @@ -1,5 +1,7 @@ ---- 2009-11-09.orig/unmodified_drivers/linux-2.6/platform-pci/evtchn.c 2008-10-14 19:44:11.000000000 +0200 -+++ 2009-11-09/unmodified_drivers/linux-2.6/platform-pci/evtchn.c 2009-11-24 17:38:08.000000000 +0100 +Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/evtchn.c +=================================================================== +--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/evtchn.c ++++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/evtchn.c @@ -40,7 +40,9 @@ #include #endif @@ -10,9 +12,11 @@ #define is_valid_evtchn(x) ((x) != 0) #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn) ---- 2009-11-09.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c 2009-02-24 20:09:53.000000000 +0100 -+++ 2009-11-09/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c 2009-11-24 17:45:02.000000000 +0100 -@@ -70,7 +70,6 @@ static uint64_t callback_via; +Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c +=================================================================== +--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c ++++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c +@@ -77,7 +77,6 @@ static uint64_t callback_via; static int __devinit init_xen_info(void) { struct xen_add_to_physmap xatp; @@ -20,7 +24,7 @@ #ifdef __ia64__ xencomm_initialize(); -@@ -78,6 +77,7 @@ static int __devinit init_xen_info(void) +@@ -85,6 +84,7 @@ static int __devinit init_xen_info(void) setup_xen_features(); @@ -28,7 +32,7 @@ shared_info_frame = alloc_xen_mmio(PAGE_SIZE) >> PAGE_SHIFT; xatp.domid = DOMID_SELF; xatp.idx = 0; -@@ -90,6 +90,11 @@ static int __devinit init_xen_info(void) +@@ -97,6 +97,11 @@ static int __devinit init_xen_info(void) ioremap(shared_info_frame << PAGE_SHIFT, PAGE_SIZE); if (shared_info_area == NULL) panic("can't map shared info\n"); @@ -40,8 +44,10 @@ return 0; } ---- 2009-11-09.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h 2008-10-14 19:44:11.000000000 +0200 -+++ 2009-11-09/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h 2009-11-24 17:40:08.000000000 +0100 +Index: xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h +=================================================================== +--- xen-4.0.0-testing.orig/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h ++++ xen-4.0.0-testing/unmodified_drivers/linux-2.6/platform-pci/platform-pci.h @@ -27,6 +27,11 @@ unsigned long alloc_xen_mmio(unsigned long len); void platform_pci_resume(void); diff --git a/qemu-console-retry.patch b/qemu-console-retry.patch new file mode 100644 index 0000000..9f25ae4 --- /dev/null +++ b/qemu-console-retry.patch @@ -0,0 +1,25 @@ +Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_console.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_console.c ++++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_console.c +@@ -182,6 +182,7 @@ static int con_init(struct XenDevice *xe + { + struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); + char *type, *dom; ++ int retries = 0; + + /* setup */ + dom = xs_get_domain_path(xenstore, con->xendev.dom); +@@ -191,7 +192,11 @@ static int con_init(struct XenDevice *xe + snprintf(con->console, sizeof(con->console), "%s/device/console/%d", dom, xendev->dev); + free(dom); + +- type = xenstore_read_str(con->console, "type"); ++ while (!(type = xenstore_read_str(con->console, "type")) && retries < 5) { ++ usleep(250000); ++ retries++; ++ } ++ + if (!type || 0 != strcmp(type, "ioemu")) { + xen_be_printf(xendev, 1, "not for me (type=%s)\n", type); + if (type) diff --git a/snapshot-ioemu-delete.patch b/snapshot-ioemu-delete.patch index 34b8f62..3e3aab6 100644 --- a/snapshot-ioemu-delete.patch +++ b/snapshot-ioemu-delete.patch @@ -1,8 +1,6 @@ -Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c -+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -@@ -865,6 +865,18 @@ static void xenstore_process_dm_command_ +--- a/tools/ioemu-remote/xenstore.c ++++ b/tools/ioemu-remote/xenstore.c +@@ -868,6 +868,18 @@ static void xenstore_process_dm_command_ } snapshot_name = xs_read(xsh, XBT_NULL, path, &len); @@ -21,10 +19,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c } else if (!strncmp(command, "continue", len)) { fprintf(logfile, "dm-command: continue after state save\n"); xen_pause_requested = 0; -Index: xen-4.0.0-testing/tools/ioemu-remote/savevm.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/savevm.c -+++ xen-4.0.0-testing/tools/ioemu-remote/savevm.c +--- a/tools/ioemu-remote/savevm.c ++++ b/tools/ioemu-remote/savevm.c @@ -1096,6 +1096,35 @@ the_end: return ret; } @@ -61,10 +57,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/savevm.c #ifndef CONFIG_DM void do_savevm(const char *name) -Index: xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/qemu-xen.h -+++ xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h +--- a/tools/ioemu-remote/qemu-xen.h ++++ b/tools/ioemu-remote/qemu-xen.h @@ -42,6 +42,7 @@ enum { /* xen-vl-extra.c */ diff --git a/snapshot-ioemu-save.patch b/snapshot-ioemu-save.patch index 8067a2a..5ed6a47 100644 --- a/snapshot-ioemu-save.patch +++ b/snapshot-ioemu-save.patch @@ -1,7 +1,5 @@ -Index: xen-4.0.0-testing/tools/ioemu-remote/savevm.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/savevm.c -+++ xen-4.0.0-testing/tools/ioemu-remote/savevm.c +--- a/tools/ioemu-remote/savevm.c ++++ b/tools/ioemu-remote/savevm.c @@ -28,6 +28,7 @@ #include "sysemu.h" #include "qemu-timer.h" @@ -97,10 +95,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/savevm.c #ifndef CONFIG_DM void do_savevm(const char *name) -Index: xen-4.0.0-testing/tools/ioemu-remote/i386-dm/helper2.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/i386-dm/helper2.c -+++ xen-4.0.0-testing/tools/ioemu-remote/i386-dm/helper2.c +--- a/tools/ioemu-remote/i386-dm/helper2.c ++++ b/tools/ioemu-remote/i386-dm/helper2.c @@ -109,6 +109,9 @@ int send_vcpu = 0; //the evtchn port for polling the notification, evtchn_port_t *ioreq_local_port; @@ -167,10 +163,8 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/i386-dm/helper2.c /* Wait to be allowed to continue */ while (xen_pause_requested) { -Index: xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/qemu-xen.h -+++ xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h +--- a/tools/ioemu-remote/qemu-xen.h ++++ b/tools/ioemu-remote/qemu-xen.h @@ -34,6 +34,15 @@ void qemu_invalidate_map_cache(void) #define mapcache_lock() ((void)0) #define mapcache_unlock() ((void)0) @@ -191,23 +185,21 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/qemu-xen.h void xenstore_process_event(void *opaque); void xenstore_record_dm(const char *subpath, const char *state); void xenstore_record_dm_state(const char *state); -+void xenstore_record_dm_error(char *errmsg); ++void xenstore_record_dm_error(const char *errmsg); void xenstore_check_new_media_present(int timeout); void xenstore_write_vncport(int vnc_display); void xenstore_read_vncpasswd(int domid, char *pwbuf, size_t pwbuflen); -Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c -+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c +--- a/tools/ioemu-remote/xenstore.c ++++ b/tools/ioemu-remote/xenstore.c @@ -17,6 +17,7 @@ #include "exec-all.h" #include "sysemu.h" +#include "qemu-xen.h" + #include "console.h" #include "hw.h" - #include "pci.h" -@@ -836,6 +837,7 @@ static void xenstore_process_dm_command_ +@@ -839,6 +840,7 @@ static void xenstore_process_dm_command_ { char *path = NULL, *command = NULL, *par = NULL; unsigned int len; @@ -215,7 +207,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c if (pasprintf(&path, "/local/domain/0/device-model/%u/command", domid) == -1) { -@@ -851,7 +853,18 @@ static void xenstore_process_dm_command_ +@@ -854,7 +856,18 @@ static void xenstore_process_dm_command_ if (!strncmp(command, "save", len)) { fprintf(logfile, "dm-command: pause and save state\n"); @@ -235,17 +227,17 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c } else if (!strncmp(command, "continue", len)) { fprintf(logfile, "dm-command: continue after state save\n"); xen_pause_requested = 0; -@@ -984,6 +997,13 @@ void xenstore_record_dm_state(const char +@@ -987,6 +1000,13 @@ void xenstore_record_dm_state(const char xenstore_record_dm("state", state); } -+void xenstore_record_dm_error(char *errmsg) ++void xenstore_record_dm_error(const char *errmsg) +{ + fprintf(logfile, "%s\n", errmsg); + xenstore_record_dm("error", errmsg); + xenstore_record_dm_state("error"); +} + - void xenstore_process_media_change_event(char **vec) + static void xenstore_process_media_change_event(char **vec) { char *media_present = NULL; diff --git a/snapshot-xend.patch b/snapshot-xend.patch index 031ea13..9a00f72 100644 --- a/snapshot-xend.patch +++ b/snapshot-xend.patch @@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/image.py +++ xen-4.0.0-testing/tools/python/xen/xend/image.py -@@ -492,7 +492,7 @@ class ImageHandler: +@@ -490,7 +490,7 @@ class ImageHandler: domains.domains_lock.acquire() @@ -11,7 +11,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py if self.device_model is None: return # Signal the device model to for action -@@ -529,10 +529,17 @@ class ImageHandler: +@@ -527,10 +527,17 @@ class ImageHandler: while state != ret: state = xstransact.Read("/local/domain/0/device-model/%i/state" % self.vm.getDomid()) @@ -32,7 +32,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py #resotre orig state xstransact.Store("/local/domain/0/device-model/%i" -@@ -557,6 +564,10 @@ class ImageHandler: +@@ -555,6 +562,10 @@ class ImageHandler: except: pass @@ -295,7 +295,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCheckpoint.py if dominfo: dominfo.resume() else: -@@ -329,26 +403,7 @@ def restore(xd, fd, dominfo = None, paus +@@ -329,24 +403,7 @@ def restore(xd, fd, dominfo = None, paus dominfo.completeRestore(handler.store_mfn, handler.console_mfn) @@ -314,11 +314,9 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCheckpoint.py - - try: - dominfo.waitForDevices() # Wait for backends to set up -- except Exception, exn: -- log.exception(exn) -- -- if lock: -- XendDomain.instance().domains_lock.acquire() +- finally: +- if lock: +- XendDomain.instance().domains_lock.acquire() + wait_devs(dominfo) if not paused: diff --git a/svm-lmsl.patch b/svm-lmsl.patch index 7585d56..d0001b0 100644 --- a/svm-lmsl.patch +++ b/svm-lmsl.patch @@ -45,7 +45,7 @@ Index: xen-4.0.0-testing/xen/arch/x86/hvm/svm/svm.c #define set_segment_register(name, value) \ asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) ) -@@ -840,6 +845,29 @@ static int svm_cpu_up(struct cpuinfo_x86 +@@ -847,6 +852,29 @@ static int svm_cpu_up(struct cpuinfo_x86 /* Initialize core's ASID handling. */ svm_asid_init(c); @@ -79,7 +79,7 @@ Index: xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm.h =================================================================== --- xen-4.0.0-testing.orig/xen/include/asm-x86/hvm/hvm.h +++ xen-4.0.0-testing/xen/include/asm-x86/hvm/hvm.h -@@ -131,6 +131,12 @@ struct hvm_function_table { +@@ -132,6 +132,12 @@ struct hvm_function_table { extern struct hvm_function_table hvm_funcs; extern int hvm_enabled; diff --git a/vif-bridge-no-iptables.patch b/vif-bridge-no-iptables.patch new file mode 100644 index 0000000..d19c5ef --- /dev/null +++ b/vif-bridge-no-iptables.patch @@ -0,0 +1,13 @@ +Index: xen-4.0.0-testing/tools/hotplug/Linux/vif-bridge +=================================================================== +--- xen-4.0.0-testing.orig/tools/hotplug/Linux/vif-bridge ++++ xen-4.0.0-testing/tools/hotplug/Linux/vif-bridge +@@ -91,7 +91,7 @@ case "$command" in + ;; + esac + +-handle_iptable ++#handle_iptable + + log debug "Successful vif-bridge $command for $vif, bridge $bridge." + if [ "$command" == "online" ] diff --git a/xen-4.0.0-testing-src.tar.bz2 b/xen-4.0.0-testing-src.tar.bz2 index 42c0e76..29b6814 100644 --- a/xen-4.0.0-testing-src.tar.bz2 +++ b/xen-4.0.0-testing-src.tar.bz2 @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e09d164603872500120d70e947b329525b920f45ce924c3661057cdff3fb97bd -size 23209042 +oid sha256:4947d275a04f0a6ce9b6c027c84281f03611ef2fb6d81f8d0175d2f7c72b7619 +size 23218651 diff --git a/xen-config.diff b/xen-config.diff index 3c187e5..0a9fa5d 100644 --- a/xen-config.diff +++ b/xen-config.diff @@ -9,8 +9,8 @@ Index: xen-4.0.0-testing/Config.mk -CONFIG_QEMU ?= $(QEMU_REMOTE) +CONFIG_QEMU ?= ioemu-remote - QEMU_TAG := xen-4.0.0-rc1 - #QEMU_TAG ?= 2621a102cd74cd6691bed30f638581639fcb141d + QEMU_TAG := xen-4.0.0-rc2 + #QEMU_TAG ?= a0066d08514ecfec34c717c7184250e95519f39c @@ -164,9 +164,9 @@ CONFIG_OCAML_XENSTORED ?= n # Optional components XENSTAT_XENTOP ?= y diff --git a/xen-domUloader.diff b/xen-domUloader.diff index c5fb14c..cd914bb 100644 --- a/xen-domUloader.diff +++ b/xen-domUloader.diff @@ -123,7 +123,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py import xen.util.xsm.xsm as security from xen.util import xsconstants from xen.util import mkdir -@@ -2320,6 +2320,10 @@ class XendDomainInfo: +@@ -2337,6 +2337,10 @@ class XendDomainInfo: deviceClass, config = self.info['devices'].get(dev_uuid) self._waitForDevice(deviceClass, config['devid']) @@ -134,7 +134,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py def _waitForDevice_destroy(self, deviceClass, devid, backpath): return self.getDeviceController(deviceClass).waitForDevice_destroy( devid, backpath) -@@ -3206,7 +3210,7 @@ class XendDomainInfo: +@@ -3226,7 +3230,7 @@ class XendDomainInfo: devtype = devinfo[0] disk = devinfo[1]['uname'] @@ -143,7 +143,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py # If this is a drbd volume, check if we need to activate it if disk.find(":") != -1: -@@ -3217,8 +3221,17 @@ class XendDomainInfo: +@@ -3237,8 +3241,17 @@ class XendDomainInfo: if state == 'Secondary': os.system('/sbin/drbdadm primary ' + diskname) @@ -163,7 +163,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py if mounted: # This is a file, not a device. pygrub can cope with a # file if it's raw, but if it's QCOW or other such formats -@@ -3234,7 +3247,8 @@ class XendDomainInfo: +@@ -3254,7 +3267,8 @@ class XendDomainInfo: from xen.xend import XendDomain dom0 = XendDomain.instance().privilegedDomain() @@ -173,7 +173,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py fn = BOOTLOADER_LOOPBACK_DEVICE try: -@@ -3244,8 +3258,10 @@ class XendDomainInfo: +@@ -3264,8 +3278,10 @@ class XendDomainInfo: if mounted: log.info("Unmounting %s from %s." % (fn, BOOTLOADER_LOOPBACK_DEVICE)) diff --git a/xen-hvm-default-bridge.diff b/xen-hvm-default-bridge.diff index bcb0660..e659238 100644 --- a/xen-hvm-default-bridge.diff +++ b/xen-hvm-default-bridge.diff @@ -17,7 +17,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/net.c =================================================================== --- xen-4.0.0-testing.orig/tools/ioemu-remote/net.c +++ xen-4.0.0-testing/tools/ioemu-remote/net.c -@@ -1759,9 +1759,10 @@ int net_client_init(const char *device, +@@ -1759,9 +1759,10 @@ int net_client_init(const char *device, } if (get_param_value(script_arg, sizeof(script_arg), "scriptarg", p) == 0 && get_param_value(script_arg, sizeof(script_arg), "bridge", p) == 0) { /* deprecated; for xend compatibility */ @@ -34,7 +34,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/image.py +++ xen-4.0.0-testing/tools/python/xen/xend/image.py -@@ -912,11 +912,13 @@ class HVMImageHandler(ImageHandler): +@@ -910,11 +910,13 @@ class HVMImageHandler(ImageHandler): mac = devinfo.get('mac') if mac is None: raise VmError("MAC address not specified or generated.") diff --git a/xen-hvm-default-pae.diff b/xen-hvm-default-pae.diff index 804e407..5512009 100644 --- a/xen-hvm-default-pae.diff +++ b/xen-hvm-default-pae.diff @@ -4,7 +4,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/image.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/image.py +++ xen-4.0.0-testing/tools/python/xen/xend/image.py -@@ -1030,7 +1030,7 @@ class X86_HVM_ImageHandler(HVMImageHandl +@@ -1028,7 +1028,7 @@ class X86_HVM_ImageHandler(HVMImageHandl def configure(self, vmConfig): HVMImageHandler.configure(self, vmConfig) diff --git a/xen-max-free-mem.diff b/xen-max-free-mem.diff index b5ca22a..9ff41a4 100644 --- a/xen-max-free-mem.diff +++ b/xen-max-free-mem.diff @@ -56,7 +56,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py + 'max_hvm_memory', 'node_to_cpu', 'node_to_memory', - 'node_to_dma32_mem' + 'node_to_dma32_mem', Index: xen-4.0.0-testing/tools/python/xen/xend/balloon.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/balloon.py @@ -98,7 +98,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py +++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -@@ -1452,6 +1452,27 @@ class XendDomainInfo: +@@ -1469,6 +1469,27 @@ class XendDomainInfo: pci_conf = self.info['devices'][dev_uuid][1] return map(pci_dict_to_bdf_str, pci_conf['devs']) diff --git a/xen-qemu-iscsi-fix.patch b/xen-qemu-iscsi-fix.patch index 256c02a..d8e41ff 100644 --- a/xen-qemu-iscsi-fix.patch +++ b/xen-qemu-iscsi-fix.patch @@ -1,8 +1,6 @@ -Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -=================================================================== ---- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c -+++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c -@@ -398,7 +398,7 @@ void xenstore_parse_domain_config(int hv +--- a/tools/ioemu-remote/xenstore.c ++++ b/tools/ioemu-remote/xenstore.c +@@ -399,7 +399,7 @@ void xenstore_parse_domain_config(int hv char *buf = NULL; char *fpath = NULL, *bpath = NULL, *btype = NULL, *dev = NULL, *params = NULL, *drv = NULL; @@ -11,7 +9,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c unsigned int len, num, hd_index, pci_devid = 0; BlockDriverState *bs; BlockDriver *format; -@@ -485,12 +485,7 @@ void xenstore_parse_domain_config(int hv +@@ -486,12 +486,7 @@ void xenstore_parse_domain_config(int hv continue; free(danger_type); danger_type = xs_read(xsh, XBT_NULL, danger_buf, &len); @@ -25,7 +23,7 @@ Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c /* read the name of the device */ if (pasprintf(&buf, "%s/type", bpath) == -1) continue; -@@ -498,6 +493,35 @@ void xenstore_parse_domain_config(int hv +@@ -499,6 +494,35 @@ void xenstore_parse_domain_config(int hv drv = xs_read(xsh, XBT_NULL, buf, &len); if (drv == NULL) continue; diff --git a/xen-warnings.diff b/xen-warnings.diff index cd136c8..fd779f0 100644 --- a/xen-warnings.diff +++ b/xen-warnings.diff @@ -104,7 +104,7 @@ Index: xen-4.0.0-testing/tools/xenstore/xenstored_domain.c barf_perror("Failed to read from event fd"); if (port == virq_port) -@@ -561,7 +561,7 @@ static int dom0_init(void) +@@ -561,7 +561,7 @@ static int dom0_init(void) struct domain *dom0; port = xenbus_evtchn(); @@ -178,7 +178,7 @@ Index: xen-4.0.0-testing/tools/libxl/Makefile =================================================================== --- xen-4.0.0-testing.orig/tools/libxl/Makefile +++ xen-4.0.0-testing/tools/libxl/Makefile -@@ -49,7 +49,7 @@ xl.o: $(LIBCONFIG_OUTPUT)/libconfig.so x +@@ -50,7 +50,7 @@ xl.o: $(LIBCONFIG_OUTPUT)/libconfig.so x $(CC) $(CFLAGS) -I$(LIBCONFIG_SOURCE) -c xl.c $(CLIENTS): xl.o libxenlight.so $(LIBCONFIG_OUTPUT)/libconfig.so diff --git a/xen.changes b/xen.changes index aa2f0dd..1ab1b65 100644 --- a/xen.changes +++ b/xen.changes @@ -1,3 +1,31 @@ +------------------------------------------------------------------- +Fri Jan 29 09:22:46 MST 2010 - carnold@novell.com + +- bnc#573376 - OS reboot while create DomU with Windows CD + +------------------------------------------------------------------- +Wed Jan 27 11:45:27 MST 2010 - carnold@novell.com + +- bnc#573881 - /usr/lib64/xen/bin/qemu-dm is a broken link + +------------------------------------------------------------------- +Thu Jan 21 18:50:36 MST 2010 - carnold@novell.com + +- Update to changeset 20840 RC1+ for sle11-sp1 beta3. + +------------------------------------------------------------------- +Thu Jan 21 10:41:59 MST 2010 - jfehlig@novell.com + +- bnc#569581 - SuSEfirewall2 should handle rules. Disable + handle_iptable in vif-bridge script + vif-bridge-no-iptables.patch + +------------------------------------------------------------------- +Wed Jan 20 09:12:00 MST 2010 - carnold@novell.com + +- bnc#569577 - /etc/modprove.d/xen_pvdrivers, installed by + xen-kmp-default, to ../xen_pvdrivers.conf + ------------------------------------------------------------------- Wed Jan 6 16:50:16 EST 2010 - ksrinivasan@novell.com @@ -128,19 +156,10 @@ Thu Oct 8 22:44:04 MDT 2009 - jfehlig@novell.com ------------------------------------------------------------------- Mon Sep 28 16:34:19 CST 2009 - wkong@novell.com + - Add patch ioemu-bdrv-open-CACHE_WB.patch for install guest on tapdisk very very slow. -------------------------------------------------------------------- -Mon Sep 28 08:28:24 MDT 2009 - carnold@novell.com - -- bnc#542525 - VUL-1: xen pygrub vulnerability - 20099-pygrub-security.patch - 20107-pygrub-security.patch - 20146-pygrub-security.patch - 20174-pygrub-security.patch - 20201-pygrub-security.patch - ------------------------------------------------------------------- Fri Sep 25 15:08:12 MDT 2009 - jfehlig@novell.com @@ -165,25 +184,36 @@ Tue Sep 15 09:32:59 MDT 2009 - jfehlig@novell.com 20125-xc-parse-tuple-fix.patch ------------------------------------------------------------------- -Mon Aug 24 10:31:36 MDT 2009 - carnold@novell.com +Wed Sep 2 10:12:18 MDT 2009 - carnold@novell.com -- bnc#491081 - Xen time goes backwards x3950M2 - Patch for this bug plus additional upstream patches from Jan. - 19614-x86-emul-lldt-ltr.patch - 20026-ept-rwx-default.patch - 20031-x86-pmode-load-seg-retry.patch - 20035-x86-load-sreg-adjust.patch - 20059-vmx-nmi-handling.patch - 20077-x86-runstate-cswitch-out.patch - 20078-x86_64-branch-emulation.patch - 20101-hvm-no-compat-virt-start.patch +- bnc#536176 - Xen panic when using iommu after updating hypervisor + 19380-vtd-feature-check.patch + +------------------------------------------------------------------- +Fri Aug 28 09:54:08 MDT 2009 - jfehlig@novell.com + +- bnc#530959 - virsh autostart doesn't work + Fixing this libvirt bug also required fixing xend's op_pincpu + method with upstream c/s 19580 + 19580-xend-pincpu.patch + +------------------------------------------------------------------- +Fri Aug 28 08:05:17 MDT 2009 - jbeulich@novell.com + +- bnc#534146 - Xen: Fix SRAT check for discontig memory + 20120-x86-srat-check-discontig.patch + +------------------------------------------------------------------- +Mon Aug 24 07:59:14 MDT 2009 - carnold@novell.com + +- bnc#491081 - Xen time goes backwards x3950M2 20112-x86-dom0-boot-run-timers.patch ------------------------------------------------------------------- -Fri Aug 14 13:00:48 MDT 2009 - carnold@novell.com +Tue Aug 11 01:08:51 CEST 2009 - ro@suse.de -- Modify the mkbuildtree script so the KMPs will build. - mkbuildtree.patch +- disable module build for ec2 correctly to fix build + (at the suse_kernel_module_package macro) ------------------------------------------------------------------- Mon Aug 10 16:21:00 EDT 2009 - ksrinivasan@novell.com @@ -192,112 +222,298 @@ Mon Aug 10 16:21:00 EDT 2009 - ksrinivasan@novell.com hv_win7_eoi_bug.patch ------------------------------------------------------------------- -Fri Aug 7 10:43:32 MDT 2009 - jfehlig@novell.com +Mon Aug 3 11:53:37 MDT 2009 - jfehlig@novell.com - bnc#524180 - xend memory leak resulting in long garbage collector - runs. Bug applies to xen 3.4.1 as well. + runs 20013-xend-memleak.patch ------------------------------------------------------------------- -Thu Aug 6 10:10:43 MDT 2009 - carnold@novell.com +Fri Jul 31 13:22:09 MDT 2009 - carnold@novell.com -- Update to Xen 3.4.1 FCS c/s 19718. +- Upstream bugfixes from Jan. + 19896-32on64-arg-xlat.patch + 19960-show-page-walk.patch + 19945-pae-xen-l2-entries.patch + 19953-x86-fsgs-base.patch + 19931-gnttblop-preempt.patch + 19885-kexec-gdt-switch.patch + 19894-shadow-resync-fastpath-race.patch +- hvperv shim patches no longer require being applied conditionally ------------------------------------------------------------------- -Tue Aug 4 15:48:59 MDT 2009 - carnold@novell.com +Wed Jul 29 08:47:50 MDT 2009 - jfehlig@novell.com -- Rename xen_loop to xen_loop.conf to conform with naming rules. +- bnc#520234 - npiv does not work with XEN in SLE11 + Update block-npiv +- bnc#496033 - Support for creating NPIV ports without starting vm + block-npiv-common.sh + block-npiv-vport + Update block-npiv +- bnc#500043 - Fix access to NPIV disk from HVM vm + Update xen-qemu-iscsi-fix.patch ------------------------------------------------------------------- -Tue Jul 28 14:07:42 MDT 2009 - carnold@novell.com +Wed Jul 15 11:52:31 MDT 2009 - carnold@novell.com -- Update to Xen 3.4.1 RC10 c/s 19711. +- Don't build the KMPs for the ec2 kernel. ------------------------------------------------------------------- -Tue Jun 23 11:09:29 MDT 2009 - carnold@novell.com +Thu Jul 2 12:45:32 MDT 2009 - jfehlig@novell.com -- Update to Xen 3.4.1 RC4 c/s 19664. +- Upstream fixes from Jan Beulich + 19606-hvm-x2apic-cpuid.patch + 19734-vtd-gcmd-submit.patch + 19752-vtd-srtp-sirtp-flush.patch + 19753-vtd-reg-write-lock.patch + 19764-hvm-domain-lock-leak.patch + 19765-hvm-post-restore-vcpu-state.patch + 19767-hvm-port80-inhibit.patch + 19768-x86-dom0-stack-dump.patch + 19770-x86-amd-s3-resume.patch + 19801-x86-p2m-2mb-hap-only.patch + 19815-vtd-kill-correct-timer.patch +- Patch from Jan Beulich to aid in debugging bnc#509911 + gnttblop-preempt.patch ------------------------------------------------------------------- -Tue Jun 16 09:28:51 MDT 2009 - carnold@novell.com +Tue Jun 23 15:32:14 CST 2009 - wkong@novell.com -- Update to Xen 3.4.1 RC3 c/s 19657. - -------------------------------------------------------------------- -Thu Jun 11 14:17:22 MDT 2009 - carnold@novell.com - -- Update to Xen 3.4.1 RC2 c/s 19648. +- bnc#515220 - qemu-img-xen snapshot Segmentation fault + qemu-img-snapshot.patch update ------------------------------------------------------------------- Tue Jun 9 13:52:07 CST 2009 - wkong@novell.com + - bnc#504491 - drop write data when set read only disk in xen config bdrv_open2_fix_flags.patch bdrv_open2_flags_2.patch ------------------------------------------------------------------- -Mon May 18 15:03:29 MDT 2009 - carnold@novell.com +Fri Jun 5 13:19:04 MDT 2009 - carnold@novell.com -- Update to Xen 3.4.0 FCS c/s 19607 +- Upstream fixes from Jan Beulich. + 19474-32on64-S3.patch + 19490-log-dirty.patch + 19492-sched-timer-non-idle.patch + 19493-hvm-io-intercept-count.patch + 19505-x86_64-clear-cr1.patch + 19519-domctl-deadlock.patch + 19523-32on64-restore-p2m.patch + 19555-ept-live-migration.patch + 19557-amd-iommu-ioapic-remap.patch + 19560-x86-flush-tlb-empty-mask.patch + 19571-x86-numa-shift.patch + 19578-hvm-load-ldt-first.patch + 19592-vmx-exit-reason-perfc-size.patch + 19595-hvm-set-callback-irq-level.patch + 19597-x86-ioport-quirks-BL2xx.patch + 19602-vtd-multi-ioapic-remap.patch + 19631-x86-frametable-map.patch + 19653-hvm-vcpuid-range-checks.patch ------------------------------------------------------------------- -Mon May 17 17:15:57 CST 2009 - wkong@novell.com +Wed Jun 05 10:35:18 MDT 2009 - jsong@novell.com + +- bnc#382112 - Caps lock not being passed to vm correctly. + capslock_enable.patch + +------------------------------------------------------------------- +Wed May 27 10:35:18 MDT 2009 - jfehlig@novell.com + +- bnc#506833 - Use pidof in xend and xendomains init scripts + +------------------------------------------------------------------- +Wed May 27 09:39:25 MDT 2009 - jsong@novell.com +- bnc#484778 - XEN: PXE boot of FV domU using non-Realtek NIC fails + enable_more_nic_pxe.patch + +------------------------------------------------------------------- +Wed May 27 09:38:40 MDT 2009 - jsong@novell.com +cross-migrate.patch +- bnc#390961 - cross-migration of a VM causes it to become + unresponsive (remains paused after migration) + +------------------------------------------------------------------- +Tue May 19 10:58:40 MDT 2009 - carnold@novell.com + +- Patches taken to fix the xenctx tool. The fixed version of this + tool is needed to debug bnc#502735. + 18962-xc_translate_foreign_address.patch + 18963-xenctx.patch + 19168-hvm-domctl.patch + 19169-remove-declare-bitmap.patch + 19170-libxc.patch + 19171-xenctx.patch + 19450-xc_translate_foreign_address.patch + +------------------------------------------------------------------- +Mon May 18 16:15:57 CST 2009 - wkong@novell.com -bnc#485770 - check exsit file for save and snapshot-create xm-save-check-file.patch snapshot-xend.patch + +------------------------------------------------------------------- +Mon May 18 15:06:41 CST 2009 - wkong@novell.com +-bnc#503782 - Using converted vmdk image does not work + ioemu-tapdisk-compat-QEMU_IMG.patch ------------------------------------------------------------------- -Thu May 14 12:00:09 MDT 2009 - jfehlig@novell.com +Thu May 14 10:54:03 MDT 2009 - jfehlig@novell.com - bnc#503332 - Remove useless qcow tools /usr/sbin/{qcow-create,img2qcow,qcow2raw} from xen-tools package. +------------------------------------------------------------------- +Wed May 13 09:59:50 CST 2009 - jsong@novell.com +- bnc#474738 - adding CD drive to VM guest makes it unbootable. + parse_boot_disk.patch + ------------------------------------------------------------------- Mon May 11 18:49:50 CST 2009 - wkong@novell.com - bnc#477892 - snapshot windows can't accomplish. snapshot-xend.patch ------------------------------------------------------------------- -Fri Apr 22 17:30:02 CST 2009 - wkong@novell.com +Tue Apr 28 11:57:00 MDT 2009 - carnold@novell.com -- Backport two qcow2 patches from qemu org - ioemu-6816-qcow2-revert-6404-6405-6407.patch - ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch +- bnc#495300 - L3: Xen unable to PXE boot Windows based DomU's + 18545-hvm-gpxe-rom.patch, 18548-hvm-gpxe-rom.patch ------------------------------------------------------------------- -Fri Apr 17 16:21:36 CST 2009 - wkong@novell.com -- bnc#477890 - Patch: ioemu-qcow2-enhance-snapshot-create.patch +Mon Apr 27 10:42:17 MDT 2009 - jfehlig@novell.com + +- bnc#459836 - Fix rtc_timeoffset when localtime=0 + xend-timeoffset.patch ------------------------------------------------------------------- -Thu Apr 16 12:13:10 MDT 2009 - jfehlig@novell.com +Wed Apr 22 14:50:35 MDT 2009 - carnold@novell.com -- Updated xend-domain-lock.patch - fate#303525, bnc#494892 +- bnc#497440 - xmclone.sh script incorrectly handles networking for + SLE11. ------------------------------------------------------------------- -Thu Apr 9 18:36:23 CST 2009 - wkong@novell.com - -- Remove Patch155: xend-core-dump-loc.diff -- Modify Patch110: xen-domUloader.diff, - remove symbol "bootfilter" +Fri Apr 17 16:21:36 MDT 2009 - wkong@novell.com +- bnc#477890 - VM becomes unresponsive after applying snapshot ------------------------------------------------------------------- -Wed Apr 8 17:38:12 CST 2009 - wkong@novell.com +Wed Apr 15 16:34:08 MDT 2009 - jfehlig@novell.com -- PATCH: network-nat-open-SuSEfirewall2-FORWARD.patch - fate#305354, bnc#472107 - Open SuSEfirewall2 FORWARD rule when use xen nat -- PATCH: network-nat-dhcpd-domain-info.patch - Add domain info for nat-dhcpd +- bnc#494892 - Update xend-domain-lock.patch to flock the lock + file. ------------------------------------------------------------------- -Thu Apr 2 16:52:04 MDT 2009 - jfehlig@novell.com +Wed Apr 8 16:30:14 EDT 2009 - ksrinivasan@novell.com -- Fix domUloader to handle block device names with ':' +- bnc#439639 - SVVP Test 273 System - Sleep Stress With IO" fails + +Turned off s3/s4 sleep states for HVM guests. ------------------------------------------------------------------- -Wed Mar 11 16:28:59 MDT 2009 - jfehlig@novell.com +Tue Apr 7 21:55:14 MDT 2009 - jsong@novell.com -- Use pidofproc/checkproc in xend init script +- bnc#468169 - fix domUloader to umount the mounted device mapper target in dom0 + when install a sles10 guest with disk = /dev/disk/by_path + +------------------------------------------------------------------- +Thu Apr 2 16:03:25 MDT 2009 - jfehlig@novell.com + +- bnc#488490 - domUloader can't handle block device names with ':' +- bnc#486244 - vms fail to start after reboot when using qcow2 + +------------------------------------------------------------------- +Tue Mar 31 15:00:50 MDT 2009 - carnold@novell.com + +- bnc#490835 - VTd errata on Cantiga chipset + 19230-vtd-mobile-series4-chipset.patch + +------------------------------------------------------------------- +Mon Mar 30 15:03:16 MDT 2009 - carnold@novell.com + +- bnc#482515 - Missing dependency in xen.spec + +------------------------------------------------------------------- +Thu Mar 26 09:17:00 MDT 2009 - carnold@novell.com + +- Additional upstream bug fix patches from Jan Beulich. + 19132-page-list-mfn-links.patch + 19134-fold-shadow-page-info.patch + 19135-next-shadow-mfn.patch + 19136-page-info-rearrange.patch + 19156-page-list-simplify.patch + 19161-pv-ldt-handling.patch + 19162-page-info-no-cpumask.patch + 19216-msix-fixmap.patch + 19268-page-get-owner.patch + 19293-vcpu-migration-delay.patch + 19391-vpmu-double-free.patch + 19415-vtd-dom0-s3.patch + +------------------------------------------------------------------- +Wed Mar 25 13:55:25 MDT 2009 - carnold@novell.com + +- Imported numerous upstream bug fix patches. + 19083-memory-is-conventional-fix.patch + 19097-M2P-table-1G-page-mappings.patch + 19137-lock-domain-page-list.patch + 19140-init-heap-pages-max-order.patch + 19167-recover-pat-value-s3-resume.patch + 19172-irq-to-vector.patch + 19173-pci-passthrough-fix.patch + 19176-free-irq-shutdown-fix.patch + 19190-pciif-typo-fix.patch + 19204-allow-old-images-restore.patch + 19232-xend-exception-fix.patch + 19239-ioapic-s3-suspend-fix.patch + 19240-ioapic-s3-suspend-fix.patch + 19242-xenstored-use-after-free-fix.patch + 19259-ignore-shutdown-deferrals.patch + 19266-19365-event-channel-access-fix.patch + 19275-19296-schedular-deadlock-fixes.patch + 19276-cpu-selection-allocation-fix.patch + 19302-passthrough-pt-irq-time-out.patch + 19313-hvmemul-read-msr-fix.patch + 19317-vram-tracking-fix.patch + 19335-apic-s3-resume-error-fix.patch + 19353-amd-migration-fix.patch + 19354-amd-migration-fix.patch + 19371-in-sync-L1s-writable.patch + 19372-2-on-3-shadow-mode-fix.patch + 19377-xend-vnclisten.patch + 19400-ensure-ltr-execute.patch + 19410-virt-to-maddr-fix.patch + +------------------------------------------------------------------- +Mon Mar 9 16:28:27 MDT 2009 - jfehlig@novell.com + +- bnc#483565 - Fix block-iscsi script. + Updated block-iscsi and xen-domUloader.diff + +------------------------------------------------------------------- +Mon Mar 9 16:06:03 MDT 2009 - carnold@novell.com + +- bnc#465814 - Mouse stops responding when wheel is used in Windows + VM. + mouse-wheel-roll.patch (James Song) +- bnc#470704 - save/restore of windows VM throws off the mouse + tracking. + usb-save-restore.patch (James Song) + +------------------------------------------------------------------- +Thu Mar 5 15:35:30 MST 2009 - jfehlig@novell.com + +- bnc#436629 - Use global vnc-listen setting specified in xend + configuration file. + xend-vnclisten.patch +- bnc#482623 - Fix pygrub to append user-supplied 'extra' args + to kernel args. + 19234_pygrub.patch + +------------------------------------------------------------------- +Thu Mar 5 13:52:48 MST 2009 - carnold@novell.com + +- bnc#481161 upgrade - sles10sp2 to sles11 upgrade keeps + xen-tools-ioemu ------------------------------------------------------------------- Tue Mar 3 16:11:39 CET 2009 - kukuk@suse.de diff --git a/xen.spec b/xen.spec index fc8764a..c2b322a 100644 --- a/xen.spec +++ b/xen.spec @@ -1,7 +1,7 @@ # -# spec file for package xen (Version 4.0.0_20809_01) +# spec file for package xen (Version 4.0.0_20873_01) # -# Copyright (c) 2010 SUSE LINUX Products GmbH, Nuernberg, Germany. +# Copyright (c) 2009 SUSE LINUX Products GmbH, Nuernberg, Germany. # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -22,7 +22,7 @@ Name: xen ExclusiveArch: %ix86 x86_64 %define xvers 4.0 %define xvermaj 4 -%define changeset 20809 +%define changeset 20873 %define xen_build_dir xen-4.0.0-testing %define with_kmp 1 BuildRequires: LibVNCServer-devel SDL-devel automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig @@ -37,9 +37,9 @@ BuildRequires: glibc-32bit glibc-devel-32bit %if %{?with_kmp}0 BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11 %endif -Version: 4.0.0_20809_01 +Version: 4.0.0_20873_01 Release: 1 -License: GPLv2 +License: GPL v2 only Group: System/Kernel AutoReqProv: on PreReq: %insserv_prereq %fillup_prereq @@ -63,7 +63,7 @@ Source16: xmclone.sh Source17: xend-relocation.sh Source18: init.xen_loop %if %{?with_kmp}0 -Source19: xen_pvdrivers +Source19: xen_pvdrivers.conf Source20: kmp_filelist %endif Source21: block-dmmd @@ -119,6 +119,8 @@ Patch354: xen-api-auth.patch Patch355: tools-gdbserver-build.diff Patch356: ioemu-vnc-resize.patch Patch357: ioemu-debuginfo.patch +Patch358: vif-bridge-no-iptables.patch +Patch359: qemu-console-retry.patch # Needs to go upstream Patch360: checkpoint-rename.patch Patch361: xm-save-check-file.patch @@ -232,7 +234,7 @@ Authors: ... %package libs -License: GPLv2+ +License: GPL v2 or later Summary: Xen Virtualization: Libraries Group: System/Kernel #Requires: xen = %{version} @@ -282,7 +284,7 @@ Authors: Ian Pratt %package tools -License: GPLv2+ +License: GPL v2 or later Summary: Xen Virtualization: Control tools for domain 0 Group: System/Kernel Requires: xen-libs = %{version} @@ -340,7 +342,7 @@ Authors: Ian Pratt %package tools-domU -License: GPLv2+ +License: GPL v2 or later Summary: Xen Virtualization: Control tools for domain U Group: System/Kernel Conflicts: xen-tools @@ -361,7 +363,7 @@ Authors: Ian Pratt %package devel -License: GPLv2+ +License: GPL v2 or later Summary: Xen Virtualization: Headers and libraries for development Group: System/Kernel Requires: xen-libs = %{version} @@ -412,7 +414,7 @@ Authors: %if %{?with_kmp}0 %package KMP -License: GPLv2+ +License: GPL v2 or later Group: System/Kernel Summary: Xen para-virtual device drivers for fully virtualized guests Conflicts: xen @@ -461,7 +463,7 @@ Xen, but is not available for release due to license restrictions. %endif %package doc-html -License: GPLv2+ +License: GPL v2 or later Summary: Xen Virtualization: HTML documentation Group: Documentation/HTML @@ -480,7 +482,7 @@ Authors: Ian Pratt %package doc-pdf -License: GPLv2+ +License: GPL v2 or later Summary: Xen Virtualization: PDF documentation Group: Documentation/Other @@ -546,6 +548,8 @@ Authors: %patch355 -p1 %patch356 -p1 %patch357 -p1 +%patch358 -p1 +%patch359 -p1 %patch360 -p1 %patch361 -p1 %patch362 -p1 @@ -669,8 +673,8 @@ rm -f $RPM_BUILD_ROOT/usr/sbin/{qcow-create,img2qcow,qcow2raw} make -C tools/misc/serial-split install \ DESTDIR=$RPM_BUILD_ROOT MANDIR=%{_mandir} %ifarch x86_64 -mkdir -p $RPM_BUILD_ROOT/usr/lib64/xen/bin/ -ln -s %{_libdir}/xen/bin/qemu-dm $RPM_BUILD_ROOT/usr/lib64/xen/bin/qemu-dm +mkdir -p $RPM_BUILD_ROOT/${_libdir}/xen/bin/ +ln -s /usr/lib/xen/bin/qemu-dm $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm %endif %if %{?with_kmp}0 # pv driver modules @@ -681,7 +685,7 @@ for flavor in %flavors_to_build; do M=$PWD/obj/$flavor done mkdir -p $RPM_BUILD_ROOT/etc/modprobe.d -install -m644 %SOURCE19 $RPM_BUILD_ROOT/etc/modprobe.d/xen_pvdrivers +install -m644 %SOURCE19 $RPM_BUILD_ROOT/etc/modprobe.d/xen_pvdrivers.conf %endif # docs make -C docs install \ @@ -927,7 +931,6 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/xen/bin/qemu-dm.debug %{_libdir}/libvhd.so /usr/bin/serial-split /usr/include/blktaplib.h -/usr/include/flask.h /usr/include/fsimage* /usr/include/xen*.h /usr/include/xen/ diff --git a/xen_pvdrivers.conf b/xen_pvdrivers.conf new file mode 100644 index 0000000..7dd8c0f --- /dev/null +++ b/xen_pvdrivers.conf @@ -0,0 +1,7 @@ +# Install the paravirtualized drivers +install libata /sbin/modprobe xen-vbd 2>&1 |:; /sbin/modprobe --ignore-install libata + +install 8139cp /sbin/modprobe xen-vnif 2>&1 |:; /sbin/modprobe --ignore-install 8139cp + +install 8139too /sbin/modprobe xen-vnif 2>&1 |:; /sbin/modprobe --ignore-install 8139too + diff --git a/xenapi-console-protocol.patch b/xenapi-console-protocol.patch index 64fa2ea..723f3f5 100644 --- a/xenapi-console-protocol.patch +++ b/xenapi-console-protocol.patch @@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py +++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -@@ -3896,6 +3896,14 @@ class XendDomainInfo: +@@ -3916,6 +3916,14 @@ class XendDomainInfo: if not config.has_key('backend'): config['backend'] = "00000000-0000-0000-0000-000000000000" diff --git a/xend-core-dump-loc.diff b/xend-core-dump-loc.diff index 2addd92..9efbb77 100644 --- a/xend-core-dump-loc.diff +++ b/xend-core-dump-loc.diff @@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py +++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -@@ -2291,7 +2291,7 @@ class XendDomainInfo: +@@ -2308,7 +2308,7 @@ class XendDomainInfo: # To prohibit directory traversal based_name = os.path.basename(self.info['name_label']) diff --git a/xend-domain-lock.patch b/xend-domain-lock.patch index 77c6654..93b48b8 100644 --- a/xend-domain-lock.patch +++ b/xend-domain-lock.patch @@ -83,7 +83,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py from xen.util.blkif import parse_uname import xen.util.xsm.xsm as security from xen.util import xsconstants -@@ -457,6 +458,7 @@ class XendDomainInfo: +@@ -465,6 +466,7 @@ class XendDomainInfo: if self._stateGet() in (XEN_API_VM_POWER_STATE_HALTED, XEN_API_VM_POWER_STATE_SUSPENDED, XEN_API_VM_POWER_STATE_CRASHED): try: @@ -91,7 +91,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py XendTask.log_progress(0, 30, self._constructDomain) XendTask.log_progress(31, 60, self._initDomain) -@@ -2933,6 +2935,11 @@ class XendDomainInfo: +@@ -2953,6 +2955,11 @@ class XendDomainInfo: self._stateSet(DOM_STATE_HALTED) self.domid = None # Do not push into _stateSet()! @@ -103,7 +103,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py finally: self.refresh_shutdown_lock.release() -@@ -4434,6 +4441,74 @@ class XendDomainInfo: +@@ -4454,6 +4461,74 @@ class XendDomainInfo: def has_device(self, dev_class, dev_uuid): return (dev_uuid in self.info['%s_refs' % dev_class.lower()]) @@ -243,7 +243,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendCheckpoint.py + dominfo.acquire_running_lock() return dominfo - except: + except Exception, exn: dominfo.destroy() Index: xen-4.0.0-testing/tools/hotplug/Linux/Makefile =================================================================== From 6c7e8be7db558dcebcbbf7e26449a6011f7e19fa76c512024cf20478e5becdcd Mon Sep 17 00:00:00 2001 From: Charles Arnold Date: Fri, 5 Feb 2010 23:33:58 +0000 Subject: [PATCH 2/6] - Update to changeset 20900 RC2+ for sle11-sp1 beta4. OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=29 --- README.SuSE | 3 +- shadow.patch | 66 +++++++++++++++++++++++++++++++++++ xen-4.0.0-testing-src.tar.bz2 | 4 +-- xen-config.diff | 6 ++-- xen.changes | 5 +++ xen.spec | 8 +++-- 6 files changed, 83 insertions(+), 9 deletions(-) create mode 100644 shadow.patch diff --git a/README.SuSE b/README.SuSE index 227ac48..f161bbc 100644 --- a/README.SuSE +++ b/README.SuSE @@ -31,6 +31,7 @@ optional packages are also installed: vm-install (Optional, to install VMs) python-gtk (Optional, to install VMs graphically) virt-manager (Optional, to manage VMs graphically) + virt-viewer (Optional, to view VMs outside virt-manager) tightvnc (Optional, to view VMs outside virt-manager) Additional packages: @@ -328,7 +329,7 @@ documentation for workarounds. Networking ---------- -Your virtual machines become much more useful if your can reach them via the +Your virtual machines become much more useful if you can reach them via the network. Starting with openSUSE11.1 and SLE11, networking in domain 0 is configured and managed via YaST. The yast2-networking module can be used to create and manage bridged networks. During initial installation, a bridged diff --git a/shadow.patch b/shadow.patch new file mode 100644 index 0000000..814190f --- /dev/null +++ b/shadow.patch @@ -0,0 +1,66 @@ + In domain_create, previously we reserve 1M memory for domain creation (as +described in xend comment), and these memory SHOULD NOT related with vcpu +number. And later, shadow_mem_control() will modify the shadow size to 256 +pages per vcpu (also plus some other values related with guest memory size...). +Therefore the C/S 20389 which modifies 1M to 4M to fit more vcpu number is +wrong. I'm sorry for that. + + Following is the reason why currently 1M doesn't work for big number vcpus, +as we mentioned, it caused Xen crash. + + Each time when sh_set_allocation() is called, it checks whether +shadow_min_acceptable_pages() has been allocated, if not, it will allocate +them. That is to say, it is 128 pages per vcpu. But before we define +d->max_vcpu, guest vcpu hasn't been initialized, so +shadow_min_acceptable_pages() always returns 0. Therefore we only allocated 1M +shadow memory for domain_create, and didn't satisfy 128 pages per vcpu for +alloc_vcpu(). + + As we know, vcpu allocation is done in the hypercall of +XEN_DOMCTL_max_vcpus. However, at this point we haven't called +shadow_mem_control() and are still using the pre-allocated 1M shadow memory to +allocate so many vcpus. So it should be a BUG. Therefore when vcpu number +increases, 1M is not enough and causes Xen crash. C/S 20389 exposes this issue. + + So I think the right process should be, after d->max_vcpu is set and before +alloc_vcpu(), we should call sh_set_allocation() to satisfy 128 pages per vcpu. +The following patch does this work. Is it work for you? Thanks! + +Signed-off-by: Dongxiao Xu + +Index: xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c +=================================================================== +--- xen-4.0.0-testing.orig/xen/arch/x86/mm/shadow/common.c ++++ xen-4.0.0-testing/xen/arch/x86/mm/shadow/common.c +@@ -41,6 +41,9 @@ + + DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags); + ++static unsigned int sh_set_allocation(struct domain *d, ++ unsigned int pages, ++ int *preempted); + /* Set up the shadow-specific parts of a domain struct at start of day. + * Called for every domain from arch_domain_create() */ + void shadow_domain_init(struct domain *d, unsigned int domcr_flags) +@@ -82,6 +85,12 @@ void shadow_vcpu_init(struct vcpu *v) + } + #endif + ++ if ( !is_idle_domain(v->domain) ) ++ { ++ shadow_lock(v->domain); ++ sh_set_allocation(v->domain, 128, NULL); ++ shadow_unlock(v->domain); ++ } + v->arch.paging.mode = &SHADOW_INTERNAL_NAME(sh_paging_mode, 3); + } + +@@ -3100,7 +3109,7 @@ int shadow_enable(struct domain *d, u32 + { + unsigned int r; + shadow_lock(d); +- r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */ ++ r = sh_set_allocation(d, 256, NULL); /* Use at least 1MB */ + if ( r != 0 ) + { + sh_set_allocation(d, 0, NULL); diff --git a/xen-4.0.0-testing-src.tar.bz2 b/xen-4.0.0-testing-src.tar.bz2 index 29b6814..687628c 100644 --- a/xen-4.0.0-testing-src.tar.bz2 +++ b/xen-4.0.0-testing-src.tar.bz2 @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4947d275a04f0a6ce9b6c027c84281f03611ef2fb6d81f8d0175d2f7c72b7619 -size 23218651 +oid sha256:fadb3f78dfaf163464c6fcfed57a1f76a6d7cc2f65771bc9886800afdbb528bb +size 23224505 diff --git a/xen-config.diff b/xen-config.diff index 0a9fa5d..e48f4c2 100644 --- a/xen-config.diff +++ b/xen-config.diff @@ -9,9 +9,9 @@ Index: xen-4.0.0-testing/Config.mk -CONFIG_QEMU ?= $(QEMU_REMOTE) +CONFIG_QEMU ?= ioemu-remote - QEMU_TAG := xen-4.0.0-rc2 - #QEMU_TAG ?= a0066d08514ecfec34c717c7184250e95519f39c -@@ -164,9 +164,9 @@ CONFIG_OCAML_XENSTORED ?= n + QEMU_TAG ?= 575ed1016f6fba1c6a6cd32a828cb468bdee96bb + # Mon Feb 1 16:33:52 2010 +0000 +@@ -163,9 +163,9 @@ CONFIG_OCAML_XENSTORED ?= n # Optional components XENSTAT_XENTOP ?= y VTPM_TOOLS ?= n diff --git a/xen.changes b/xen.changes index 1ab1b65..b5032ef 100644 --- a/xen.changes +++ b/xen.changes @@ -1,3 +1,8 @@ +------------------------------------------------------------------- +Fri Feb 5 08:16:39 MST 2010 - carnold@novell.com + +- Update to changeset 20900 RC2+ for sle11-sp1 beta4. + ------------------------------------------------------------------- Fri Jan 29 09:22:46 MST 2010 - carnold@novell.com diff --git a/xen.spec b/xen.spec index c2b322a..f0596c0 100644 --- a/xen.spec +++ b/xen.spec @@ -1,5 +1,5 @@ # -# spec file for package xen (Version 4.0.0_20873_01) +# spec file for package xen (Version 4.0.0_20900_01) # # Copyright (c) 2009 SUSE LINUX Products GmbH, Nuernberg, Germany. # @@ -22,7 +22,7 @@ Name: xen ExclusiveArch: %ix86 x86_64 %define xvers 4.0 %define xvermaj 4 -%define changeset 20873 +%define changeset 20900 %define xen_build_dir xen-4.0.0-testing %define with_kmp 1 BuildRequires: LibVNCServer-devel SDL-devel automake bin86 curl-devel dev86 graphviz latex2html libjpeg-devel libxml2-devel ncurses-devel openssl openssl-devel pciutils-devel python-devel texinfo transfig @@ -37,7 +37,7 @@ BuildRequires: glibc-32bit glibc-devel-32bit %if %{?with_kmp}0 BuildRequires: kernel-source kernel-syms module-init-tools xorg-x11 %endif -Version: 4.0.0_20873_01 +Version: 4.0.0_20900_01 Release: 1 License: GPL v2 only Group: System/Kernel @@ -146,6 +146,7 @@ Patch424: ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch Patch425: ioemu-bdrv-open-CACHE_WB.patch Patch426: xen-ioemu-hvm-pv-support.diff Patch427: qemu-dm-segfault.patch +Patch428: shadow.patch # Jim's domain lock patch Patch450: xend-domain-lock.patch # Hypervisor and PV driver Patches @@ -571,6 +572,7 @@ Authors: %patch425 -p1 %patch426 -p1 %patch427 -p1 +%patch428 -p1 %patch450 -p1 %patch500 -p1 %patch501 -p1 From 514b8cf8ecbf7d1b3366ab82602ca35d3269b15147ae5c49c255e722a8576799 Mon Sep 17 00:00:00 2001 From: Charles Arnold Date: Mon, 1 Mar 2010 15:05:50 +0000 Subject: [PATCH 3/6] - bnc#556939 - Improve device map cleanup code in domUloader - bnc# 578910 - xm block-detach does not cleanup xenstore hotplug-cleanup-fix.patch - bnc#579361 - Windows Server 2003 cannot wake up from stand by in sp1 hibernate.patch - fate#308852: XEN CPU Pools cpupools-core.patch cpupools-core-fixup.patch keyhandler-alternative.patch cpu-pools-libxc.patch cpu-pools-python.patch cpu-pools-libxen.patch cpu-pools-xmtest.patch cpu-pools-docs.patch - bnc#558760: Disable scsi devices when PV drivers are loaded. - Update to changeset 20951 Xen 4.0.0 RC4 for sle11-sp1 beta5. - bnc#572146 - SLES11 SP1 beta 2 Xen - BUG: soft lockup - CPU#31 stuck for 61s! [kstop/31:4512] cpuidle-hint-v3.patch OBS-URL: https://build.opensuse.org/package/show/Virtualization/xen?expand=0&rev=31 --- 32on64-extra-mem.patch | 2 +- bdrv_open2_flags_2.patch | 36 +- blktap-pv-cdrom.patch | 2 +- blktap.patch | 14 +- cpu-pools-docs.patch | 1484 ++++++++++++++ cpu-pools-libxc.patch | 360 ++++ cpu-pools-libxen.patch | 2180 ++++++++++++++++++++ cpu-pools-python.patch | 2543 +++++++++++++++++++++++ cpu-pools-xmtest.patch | 838 ++++++++ cpupools-core-fixup.patch | 127 ++ cpupools-core.patch | 3267 ++++++++++++++++++++++++++++++ domUloader.py | 18 +- dump-exec-state.patch | 28 +- hibernate.patch | 28 + hv_tools.patch | 10 +- hv_xen_base.patch | 4 +- ioemu-bdrv-open-CACHE_WB.patch | 2 +- ioemu-blktap-barriers.patch | 12 +- ioemu-blktap-image-format.patch | 6 +- ioemu-blktap-zero-size.patch | 2 +- ioemu-disable-scsi.patch | 80 + keyhandler-alternative.patch | 86 + shadow.patch | 2 +- snapshot-ioemu-delete.patch | 20 +- snapshot-ioemu-restore.patch | 8 +- snapshot-ioemu-save.patch | 24 +- snapshot-xend.patch | 16 +- tapdisk-ioemu-shutdown-fix.patch | 6 +- x86-cpufreq-report.patch | 20 +- x86-show-page-walk-early.patch | 44 +- xen-4.0.0-testing-src.tar.bz2 | 4 +- xen-config.diff | 4 +- xen-destdir.diff | 2 +- xen-domUloader.diff | 10 +- xen.changes | 48 + xen.spec | 65 +- xenapi-console-protocol.patch | 2 +- xend-core-dump-loc.diff | 2 +- xend-domain-lock.patch | 6 +- 39 files changed, 11277 insertions(+), 135 deletions(-) create mode 100644 cpu-pools-docs.patch create mode 100644 cpu-pools-libxc.patch create mode 100644 cpu-pools-libxen.patch create mode 100644 cpu-pools-python.patch create mode 100644 cpu-pools-xmtest.patch create mode 100644 cpupools-core-fixup.patch create mode 100644 cpupools-core.patch create mode 100644 hibernate.patch create mode 100644 ioemu-disable-scsi.patch create mode 100644 keyhandler-alternative.patch diff --git a/32on64-extra-mem.patch b/32on64-extra-mem.patch index 43d6515..263cf4a 100644 --- a/32on64-extra-mem.patch +++ b/32on64-extra-mem.patch @@ -2,7 +2,7 @@ Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py =================================================================== --- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py +++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py -@@ -2883,7 +2883,7 @@ class XendDomainInfo: +@@ -2902,7 +2902,7 @@ class XendDomainInfo: self.guest_bitsize = self.image.getBitSize() # Make sure there's enough RAM available for the domain diff --git a/bdrv_open2_flags_2.patch b/bdrv_open2_flags_2.patch index ae9cf67..2213d1a 100644 --- a/bdrv_open2_flags_2.patch +++ b/bdrv_open2_flags_2.patch @@ -1,6 +1,8 @@ ---- a/tools/ioemu-remote/hw/xen_blktap.c -+++ b/tools/ioemu-remote/hw/xen_blktap.c -@@ -225,6 +225,7 @@ static int open_disk(struct td_state *s, +Index: xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/ioemu-remote/hw/xen_blktap.c ++++ xen-4.0.0-testing/tools/ioemu-remote/hw/xen_blktap.c +@@ -227,6 +227,7 @@ static int open_disk(struct td_state *s, BlockDriver* drv; char* devname; static int devnumber = 0; @@ -8,7 +10,7 @@ int i; DPRINTF("Opening %s as blktap%d\n", path, devnumber); -@@ -247,7 +248,7 @@ static int open_disk(struct td_state *s, +@@ -249,7 +250,7 @@ static int open_disk(struct td_state *s, DPRINTF("%s driver specified\n", drv ? drv->format_name : "No"); /* Open the image */ @@ -17,9 +19,11 @@ fprintf(stderr, "Could not open image file %s\n", path); return -ENOMEM; } ---- a/tools/ioemu-remote/xenstore.c -+++ b/tools/ioemu-remote/xenstore.c -@@ -134,7 +134,8 @@ static void insert_media(void *opaque) +Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c ++++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c +@@ -136,7 +136,8 @@ static void insert_media(void *opaque) else format = &bdrv_raw; @@ -29,7 +33,7 @@ #ifdef CONFIG_STUBDOM { char *buf, *backend, *params_path, *params; -@@ -398,9 +399,9 @@ void xenstore_parse_domain_config(int hv +@@ -400,9 +401,9 @@ void xenstore_parse_domain_config(int hv { char **e_danger = NULL; char *buf = NULL; @@ -41,7 +45,7 @@ unsigned int len, num, hd_index, pci_devid = 0; BlockDriverState *bs; BlockDriver *format; -@@ -462,7 +463,8 @@ void xenstore_parse_domain_config(int hv +@@ -464,7 +465,8 @@ void xenstore_parse_domain_config(int hv } for (i = 0; i < num; i++) { @@ -51,7 +55,7 @@ /* read the backend path */ xenstore_get_backend_path(&bpath, "vbd", danger_path, hvm_domid, e_danger[i]); if (bpath == NULL) -@@ -548,6 +550,17 @@ void xenstore_parse_domain_config(int hv +@@ -550,6 +552,17 @@ void xenstore_parse_domain_config(int hv format = &bdrv_raw; } @@ -69,7 +73,7 @@ #if 0 /* Phantom VBDs are disabled because the use of paths * from guest-controlled areas in xenstore is unsafe. -@@ -615,7 +628,7 @@ void xenstore_parse_domain_config(int hv +@@ -617,7 +630,7 @@ void xenstore_parse_domain_config(int hv #ifdef CONFIG_STUBDOM if (pasprintf(&danger_buf, "%s/device/vbd/%s", danger_path, e_danger[i]) == -1) continue; @@ -78,12 +82,12 @@ pstrcpy(bs->filename, sizeof(bs->filename), params); } #else -@@ -644,7 +657,7 @@ void xenstore_parse_domain_config(int hv +@@ -646,7 +659,7 @@ void xenstore_parse_domain_config(int hv } } pstrcpy(bs->filename, sizeof(bs->filename), params); -- if (bdrv_open2(bs, params, BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) -+ if (bdrv_open2(bs, params, flags|BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) +- if (bdrv_open2(bs, params, BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) { ++ if (bdrv_open2(bs, params, flags|BDRV_O_CACHE_WB /* snapshot and write-back */, format) < 0) { fprintf(stderr, "qemu: could not open vbd '%s' or hard disk image '%s' (drv '%s' format '%s')\n", buf, params, drv ? drv : "?", format ? format->format_name : "0"); - } - + } else { + char* snapshot = get_snapshot_name(atoi(e_danger[i])); diff --git a/blktap-pv-cdrom.patch b/blktap-pv-cdrom.patch index ccaf60a..ccf8b9a 100644 --- a/blktap-pv-cdrom.patch +++ b/blktap-pv-cdrom.patch @@ -741,7 +741,7 @@ Index: xen-4.0.0-testing/tools/blktap/lib/blktaplib.h =================================================================== --- xen-4.0.0-testing.orig/tools/blktap/lib/blktaplib.h +++ xen-4.0.0-testing/tools/blktap/lib/blktaplib.h -@@ -219,6 +219,7 @@ typedef struct msg_pid { +@@ -220,6 +220,7 @@ typedef struct msg_pid { #define DISK_TYPE_RAM 3 #define DISK_TYPE_QCOW 4 #define DISK_TYPE_QCOW2 5 diff --git a/blktap.patch b/blktap.patch index d35adf6..3d67c84 100644 --- a/blktap.patch +++ b/blktap.patch @@ -1,9 +1,11 @@ bug #239173 bug #242953 ---- a/tools/python/xen/xend/XendDomainInfo.py -+++ b/tools/python/xen/xend/XendDomainInfo.py -@@ -3282,7 +3282,7 @@ class XendDomainInfo: +Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py +@@ -3286,7 +3286,7 @@ class XendDomainInfo: (fn, BOOTLOADER_LOOPBACK_DEVICE)) vbd = { @@ -12,8 +14,10 @@ bug #242953 'device': BOOTLOADER_LOOPBACK_DEVICE, } ---- a/tools/ioemu-remote/xenstore.c -+++ b/tools/ioemu-remote/xenstore.c +Index: xen-4.0.0-testing/tools/ioemu-remote/xenstore.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/ioemu-remote/xenstore.c ++++ xen-4.0.0-testing/tools/ioemu-remote/xenstore.c @@ -397,9 +397,9 @@ void xenstore_parse_domain_config(int hv { char **e_danger = NULL; diff --git a/cpu-pools-docs.patch b/cpu-pools-docs.patch new file mode 100644 index 0000000..e28f7c3 --- /dev/null +++ b/cpu-pools-docs.patch @@ -0,0 +1,1484 @@ +Index: xen-4.0.0-testing/docs/xen-api/coversheet.tex +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/coversheet.tex ++++ xen-4.0.0-testing/docs/xen-api/coversheet.tex +@@ -52,6 +52,7 @@ Mike Day, IBM & Daniel Veillard, Red Hat + Jim Fehlig, Novell & Tom Wilkie, University of Cambridge \\ + Jon Harrop, XenSource & Yosuke Iwamatsu, NEC \\ + Masaki Kanno, FUJITSU \\ ++Lutz Dube, FUJITSU TECHNOLOGY SOLUTIONS \\ + \end{tabular} + \end{large} + +Index: xen-4.0.0-testing/docs/xen-api/revision-history.tex +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/revision-history.tex ++++ xen-4.0.0-testing/docs/xen-api/revision-history.tex +@@ -50,6 +50,12 @@ + between classes. Added host.PSCSI\_HBAs and VM.DSCSI\_HBAs + fields.\tabularnewline + \hline ++ 1.0.10 & 10th Jan. 10 & L. Dube & ++ Added definitions of new classes cpu\_pool. Updated the table ++ and the diagram representing relationships between classes. ++ Added fields host.resident\_cpu\_pools, VM.cpu\_pool and ++ host\_cpu.cpu\_pool. ++ \hline + \end{tabular} + \end{center} + \end{flushleft} +Index: xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-coversheet.tex ++++ xen-4.0.0-testing/docs/xen-api/xenapi-coversheet.tex +@@ -17,12 +17,12 @@ + \newcommand{\coversheetlogo}{xen.eps} + + %% Document date +-\newcommand{\datestring}{20th November 2009} ++\newcommand{\datestring}{10th January 2010} + + \newcommand{\releasestatement}{Stable Release} + + %% Document revision +-\newcommand{\revstring}{API Revision 1.0.9} ++\newcommand{\revstring}{API Revision 1.0.10} + + %% Document authors + \newcommand{\docauthors}{ +Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-datamodel-graph.dot ++++ xen-4.0.0-testing/docs/xen-api/xenapi-datamodel-graph.dot +@@ -14,7 +14,7 @@ fontname="Verdana"; + + node [ shape=box ]; session VM host network VIF PIF SR VDI VBD PBD user; + node [ shape=box ]; XSPolicy ACMPolicy DPCI PPCI host_cpu console VTPM; +-node [ shape=box ]; DSCSI PSCSI DSCSI_HBA PSCSI_HBA; ++node [ shape=box ]; DSCSI PSCSI DSCSI_HBA PSCSI_HBA cpu_pool; + node [ shape=ellipse ]; VM_metrics VM_guest_metrics host_metrics; + node [ shape=ellipse ]; PIF_metrics VIF_metrics VBD_metrics PBD_metrics; + session -> host [ arrowhead="none" ] +@@ -51,4 +51,7 @@ DSCSI_HBA -> PSCSI_HBA [ arrowhead="crow + PSCSI -> host [ arrowhead="none", arrowtail="crow" ] + PSCSI_HBA -> host [ arrowhead="none", arrowtail="crow" ] + PSCSI -> PSCSI_HBA [ arrowhead="none", arrowtail="crow" ] ++cpu_pool -> host_cpu [ arrowhead="crow", arrowtail="none" ] ++cpu_pool -> VM [ arrowhead="crow", arrowtail="none" ] ++host -> cpu_pool [ arrowhead="crow", arrowtail="none" ] + } +Index: xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex +=================================================================== +--- xen-4.0.0-testing.orig/docs/xen-api/xenapi-datamodel.tex ++++ xen-4.0.0-testing/docs/xen-api/xenapi-datamodel.tex +@@ -56,6 +56,7 @@ Name & Description \\ + {\tt debug} & A basic class for testing \\ + {\tt XSPolicy} & A class for handling Xen Security Policies \\ + {\tt ACMPolicy} & A class for handling ACM-type policies \\ ++{\tt cpu\_pool} & A container for VMs which should shared the same host\_cpu(s) \\ + \hline + \end{tabular}\end{center} + \section{Relationships Between Classes} +@@ -88,6 +89,9 @@ PSCSI.HBA & PSCSI\_HBA.PSCSIs & one-to-m + PSCSI\_HBA.host & host.PSCSI\_HBAs & one-to-many\\ + host.resident\_VMs & VM.resident\_on & many-to-one\\ + host.host\_CPUs & host\_cpu.host & many-to-one\\ ++host.resident\_cpu\_pools & cpu\_pool.resident\_on & many-to-one\\ ++cpu\_pool.started\_VMs & VM.cpu\_pool & many-to-one\\ ++cpu\_pool.host\_CPUs & host\_cpu.cpu\_pool & many-to-one\\ + \hline + \end{tabular}\end{center} + +@@ -499,6 +503,56 @@ error code and a message describing the + \begin{verbatim}SECURITY_ERROR(xserr, message)\end{verbatim} + \begin{center}\rule{10em}{0.1pt}\end{center} + ++\subsubsection{POOL\_BAD\_STATE} ++ ++You attempted an operation on a pool that was not in an appropriate state ++at the time; for example, you attempted to activate a pool that was ++already activated. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}POOL_BAD_STATE(current pool state)\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ ++\subsubsection{INSUFFICIENT\_CPUS} ++ ++You attempted to activate a cpu\_pool but there are not enough ++unallocated CPUs to satisfy the request. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}INSUFFICIENT_CPUS(needed cpu count, available cpu count)\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ ++\subsubsection{UNKOWN\_SCHED\_POLICY} ++ ++The specified scheduler policy is unkown to the host. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}UNKOWN_SCHED_POLICY()\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ ++\subsubsection{INVALID\_CPU} ++ ++You tried to reconfigure a cpu\_pool with a CPU that is unkown to the host ++or has a wrong state. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}INVALID_CPU(message)\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ ++\subsubsection{LAST\_CPU\_NOT\_REMOVEABLE} ++ ++You tried to remove the last CPU from a cpu\_pool that has one or more ++active domains. ++ ++\vspace{0.3cm} ++{\bf Signature:} ++\begin{verbatim}LAST_CPU_NOT_REMOVEABLE(message)\end{verbatim} ++\begin{center}\rule{10em}{0.1pt}\end{center} ++ + + \newpage + \section{Class: session} +@@ -4847,6 +4901,135 @@ references to objects with match names + \vspace{0.3cm} + \vspace{0.3cm} + \vspace{0.3cm} ++\subsubsection{RPC name:~get\_cpu\_pool} ++ ++{\bf Overview:} ++Get the cpu\_pool field of the given VM. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool ref) Set) get_cpu_pool (session_id s, VM ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt VM ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool ref) Set ++} ++ ++ ++references to cpu\_pool objects. ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_pool\_name} ++ ++{\bf Overview:} ++Get the pool\_name field of the given VM. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} string get_cpu_pool (session_id s, VM ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt VM ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++name of cpu pool to use ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~cpu\_pool\_migrate} ++ ++{\bf Overview:} ++Migrate the VM to another cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void cpu_pool_migrate (session_id s, VM ref self, cpu_pool ref pool)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt VM ref } & self & reference to the object \\ \hline ++{\tt cpu\_pool ref} & pool & reference to new cpu\_pool \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent{\bf Possible Error Codes:} {\tt POOL\_BAD\_STATE, VM\_BAD\_POWER\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_pool\_name} ++ ++{\bf Overview:} ++Set cpu pool name to use for next activation. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_pool_name (session_id s, VM ref self, string pool\_name)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt VM ref } & self & reference to the object \\ \hline ++{\tt string} & pool\_name & New pool name \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++ ++ ++ + + \vspace{1cm} + \newpage +@@ -5681,6 +5864,7 @@ $\mathit{RO}_\mathit{run}$ & {\tt PSCSI + $\mathit{RO}_\mathit{run}$ & {\tt PSCSI\_HBAs} & (PSCSI\_HBA ref) Set & physical SCSI host bus adapters \\ + $\mathit{RO}_\mathit{run}$ & {\tt host\_CPUs} & (host\_cpu ref) Set & The physical CPUs on this host \\ + $\mathit{RO}_\mathit{run}$ & {\tt metrics} & host\_metrics ref & metrics associated with this host \\ ++$\mathit{RO}_\mathit{run}$ & {\tt resident\_cpu\_pools} & (cpu\_pool ref) Set & list of cpu\_pools currently resident on the host \\ + \hline + \end{longtable} + \subsection{RPCs associated with class: host} +@@ -7229,6 +7413,38 @@ references to objects with match names + \vspace{0.3cm} + \vspace{0.3cm} + \vspace{0.3cm} ++\subsubsection{RPC name:~get\_resident\_cpu\_pools} ++ ++{\bf Overview:} ++Get the resident\_cpu\_pools field of the given host. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool ref) Set) get_resident_cpu_pools (session_id s, host ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt host ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool ref) Set ++} ++ ++ ++references to all known cpu\_pools. ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++ + + \vspace{1cm} + \newpage +@@ -7484,6 +7700,7 @@ $\mathit{RO}_\mathit{run}$ & {\tt stepp + $\mathit{RO}_\mathit{run}$ & {\tt flags} & string & the flags of the physical CPU (a decoded version of the features field) \\ + $\mathit{RO}_\mathit{run}$ & {\tt features} & string & the physical CPU feature bitmap \\ + $\mathit{RO}_\mathit{run}$ & {\tt utilisation} & float & the current CPU utilisation \\ ++$\mathit{RO}_\mathit{run}$ & {\tt cpu\_pool} & (cpu\_pool ref) Set & reference to cpu\_pool the cpu belongs to \\ + \hline + \end{longtable} + \subsection{RPCs associated with class: host\_cpu} +@@ -7896,6 +8113,70 @@ all fields from the object + \vspace{0.3cm} + \vspace{0.3cm} + \vspace{0.3cm} ++\subsubsection{RPC name:~get\_cpu\_pool} ++ ++{\bf Overview:} ++Get the cpu\_pool field of the given host\_cpu. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool) Set) get_cpu_pool (session_id s, host_cpu ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt host\_cpu ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool) Set ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_unassigned\_cpus} ++ ++{\bf Overview:} ++Get a reference to all cpus that are not assigend to any cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((host_cpu) Set) get_unassigned_cpus (session_id s)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(host\_cpu ref) Set ++} ++ ++ ++Set of free (not assigned) cpus ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++ ++ + + \vspace{1cm} + \newpage +@@ -18892,3 +19173,1073 @@ all fields from the object + \vspace{0.3cm} + \vspace{0.3cm} + ++\newpage ++\section{Class: cpu\_pool} ++\subsection{Fields for class: cpu\_pool} ++\begin{longtable}{|lllp{0.38\textwidth}|} ++\hline ++\multicolumn{1}{|l}{Name} & \multicolumn{3}{l|}{\bf cpu\_pool} \\ ++\multicolumn{1}{|l}{Description} & \multicolumn{3}{l|}{\parbox{11cm}{\em A CPU pool}} \\ ++\hline ++Quals & Field & Type & Description \\ ++\hline ++$\mathit{RO}_\mathit{run}$ & {\tt uuid} & string & unique identifier / object reference \\ ++$\mathit{RW}$ & {\tt name\_label} & string & name of cpu\_pool \\ ++$\mathit{RW}$ & {\tt name\_description} & string & cpu\_pool description \\ ++$\mathit{RO}_\mathit{run}$ & {\tt resident\_on} & host ref & the host the cpu\_pool is currently resident on \\ ++$\mathit{RW}$ & {\tt auto\_power\_on} & bool & True if this cpu\_pool should be activated automatically after host boot \\ ++$\mathit{RO}_\mathit{run}$ & {\tt started\_VMs} & (VM ref) Set & list of VMs currently started in this cpu\_pool \\ ++$\mathit{RW}$ & {\tt ncpu} & integer & number of host\_CPUs requested for this cpu\_pool at next start \\ ++$\mathit{RW}$ & {\tt sched\_policy} & string & scheduler policy on this cpu\_pool \\ ++$\mathit{RW}$ & {\tt proposed\_CPUs} & (string) Set & list of proposed host\_CPUs to assign at next activation \\ ++$\mathit{RO}_\mathit{run}$ & {\tt host\_CPUs} & (VM ref) Set & list of host\_cpus currently assigned to this cpu\_pool \\ ++$\mathit{RO}_\mathit{run}$ & {\tt activated} & bool & True if this cpu\_pool is activated \\ ++$\mathit{RW}$ & {\tt other\_config} & (string $\rightarrow$ string) Map & additional configuration \\ ++\hline ++\end{longtable} ++\subsection{RPCs associated with class: cpu\_pool} ++\subsubsection{RPC name:~activate} ++ ++{\bf Overview:} ++Activate the cpu\_pool and assign the given CPUs to it. ++CPUs specified in field proposed\_CPUs, that are not existing or not free, are ++ignored. If value of ncpu is greater than the number of CPUs in field ++proposed\_CPUs, additional free CPUs are assigned to the cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void activate (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE, INSUFFICIENT\_CPUS, UNKOWN\_SCHED\_POLICY} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~create} ++ ++{\bf Overview:} ++Create a new cpu\_pool instance, and return its handle. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} (cpu_pool ref) create (session_id s, cpu_pool record args)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool record } & args & All constructor arguments \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++cpu\_pool ref ++} ++ ++ ++reference to the newly created object ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~deactivate} ++ ++{\bf Overview:} ++Deactivate the cpu\_pool and release all CPUs assigned to it. ++This function can only be called if there are no domains active in the ++cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void deactivate (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~destroy} ++ ++{\bf Overview:} ++Destroy the specified cpu\_pool. The cpu\_pool is completely removed from the ++system. ++This function can only be called if the cpu\_pool is deactivated. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void destroy (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} {\tt POOL\_BAD\_STATE} ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~add\_host\_CPU\_live} ++ ++ ++{\bf Overview:} ++Add a additional CPU immediatly to the cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void add_host_CPU_live (session_id s, cpu_pool ref self, host_cpu ref host_cpu)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt host\_cpu ref } & host\_cpu & CPU to add \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE, INVALID\_CPU} ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~remove\_host\_CPU\_live} ++ ++ ++{\bf Overview:} ++Remove a CPU immediatly from the cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void remove_host_CPU_live (session_id s, cpu_pool ref self, host_cpu ref host_cpu)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt host\_cpu ref } & host\_cpu & CPU to remove \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE, INVALID\_CPU, LAST\_CPU\_NOT\_REMOVEABLE} ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_all} ++ ++ ++{\bf Overview:} ++Return a list of all the cpu pools known to the system. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool ref) Set) get_all (session_id s)\end{verbatim} ++ ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool ref) Set ++} ++A list of all the IDs of the cpu pools. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_all\_records} ++ ++ ++{\bf Overview:} ++Return a map of all the cpu pool records known to the system. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} (((cpu_pool ref) -> (cpu_pool record)) Map) get_all_records (session_id s)\end{verbatim} ++ ++ ++ \noindent {\bf Return Type:} ++{\tt ++((cpu\_pool ref) $\rightarrow$ (cpu\_pool record)) Map ++} ++A map of all the cpu pool records indexed by cpu pool ref. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_by\_name\_label} ++ ++{\bf Overview:} ++Get all the cpu\_pool instances with the given label. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((cpu_pool ref) Set) get_by_name_label (session_id s, string label)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt string } & label & label of object to return \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(cpu\_pool ref) Set ++} ++ ++ ++references to objects with matching names ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_by\_uuid} ++ ++{\bf Overview:} ++Get a reference to the cpu\_pool instance with the specified UUID. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} (cpu_pool ref) get_by_uuid (session_id s, string uuid)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt string } & uuid & UUID of object to return \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++cpu\_pool ref ++} ++ ++ ++reference to the object ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_activated} ++ ++ ++{\bf Overview:} ++Return the activation state of the cpu\_pool object. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} bool get_activated (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++bool ++} ++Returns {\bf true} if cpu\_pool is active. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_auto\_power\_on} ++ ++ ++{\bf Overview:} ++Return the auto power attribute of the cpu\_pool object. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} bool get_auto_power_on (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++bool ++} ++Returns {\bf true} if cpu\_pool has to be activated on xend start. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_host\_CPUs} ++ ++ ++{\bf Overview:} ++Return the list of host\_cpu refs assigned to the cpu\_pool object. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((host_cpu ref) Set) get_host_CPUs (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(host\_cpu ref) Set ++} ++Returns a list of references of all host cpus assigned to the cpu\_pool. ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_name\_description} ++ ++{\bf Overview:} ++Get the name/description field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} string get_name_description (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_name\_label} ++ ++{\bf Overview:} ++Get the name/label field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} string get_name_label (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_ncpu} ++ ++{\bf Overview:} ++Get the ncpu field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} int get_ncpu (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++int ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_proposed\_CPUs} ++ ++{\bf Overview:} ++Get the proposed\_CPUs field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} ((string) Set) get_proposed_CPUs (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++ ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++(string) Set ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_other\_config} ++ ++{\bf Overview:} ++Get the other\_config field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} ((string -> string) Map) get_other_config (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++(string $\rightarrow$ string) Map ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_record} ++ ++{\bf Overview:} ++Get a record containing the current state of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} (cpu_pool record) get_record (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++cpu\_pool record ++} ++ ++ ++all fields of the object. ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_resident\_on} ++ ++{\bf Overview:} ++Get the resident\_on field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} (host ref) get_resident_on (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++host ref ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_sched\_policy} ++ ++{\bf Overview:} ++Get the sched\_policy field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} string get_sched_policy (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_started\_VMs} ++ ++{\bf Overview:} ++Get the started\_VMs field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} ((VM ref) Set) get_started_VMs (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++(VM ref) Set ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~get\_uuid} ++ ++{\bf Overview:} ++Get the uuid field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} string get_uuid (session_id s, cpu_pool ref self)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++string ++} ++ ++ ++value of the field ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_auto\_power\_on} ++ ++{\bf Overview:} ++Set the auto\_power\_on field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} void set_auto_power_on (session_id s, cpu_pool ref self, bool value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt bool } & value & new auto\_power\_on value \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_proposed\_CPUs} ++ ++{\bf Overview:} ++Set the proposed\_CPUs field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} void set_proposed_CPUs (session_id s, cpu_pool ref self, string Set cpus)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string Set } & cpus & Set of preferred CPU (numbers) to use \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:add\_to\_proposed\_CPUs} ++ ++{\bf Overview:} ++Add a CPU (number) to the proposed\_CPUs field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} void add_to_proposed_CPUs (session_id s, cpu_pool ref self, integer cpu)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++ \hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt integer } & cpu & Number of CPU to add \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:remove\_from\_proposed\_CPUs} ++ ++{\bf Overview:} ++Remove a CPU (number) from the proposed\_CPUs field of the given cpu\_pool. ++ ++\noindent {\bf Signature:} ++\begin{verbatim} void remove_from_proposed_CPUs (session_id s, cpu_pool ref self, integer cpu)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt integer } & cpu & Number of CPU to remove \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_name\_label} ++ ++{\bf Overview:} ++Set the name/label field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_name_label (session_id s, cpu_pool ref self, string value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string } & value & New value to set \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_ncpu} ++ ++{\bf Overview:} ++Set the ncpu field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_ncpu (session_id s, cpu_pool ref self, integer value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt integer } & value & Number of cpus to use \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Possible Error Codes:} ++ {\tt POOL\_BAD\_STATE} ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_other\_config} ++ ++{\bf Overview:} ++Set the other\_config field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_other_config (session_id s, cpu_pool ref self, (string -> string) Map value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt (string $\rightarrow$ string) Map } & value & New value to set \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~add\_to\_other\_config} ++ ++{\bf Overview:} ++Add the given key-value pair to the other\_config field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void add_to_other_config (session_id s, cpu_pool ref self, string key, string value)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string } & key & Key to add \\ \hline ++{\tt string } & value & Value to add \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++ \noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~remove\_from\_other\_config} ++ ++{\bf Overview:} ++Remove the given key and its corresponding value from the other\_config ++field of the given cpu\_pool. If the key is not in that Map, then do nothing. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void remove_from_other_config (session_id s, cpu_pool ref self, string key)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string } & key & Key to remove \\ \hline ++\end{tabular} ++ ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ ++ ++\vspace{0.3cm} ++\vspace{0.3cm} ++\vspace{0.3cm} ++\subsubsection{RPC name:~set\_sched\_policy} ++ ++{\bf Overview:} ++Set the sched\_policy field of the given cpu\_pool. ++ ++ \noindent {\bf Signature:} ++\begin{verbatim} void set_sched_policy (session_id s, cpu_pool ref self, string new_sched_policy)\end{verbatim} ++ ++ ++\noindent{\bf Arguments:} ++ ++ ++\vspace{0.3cm} ++\begin{tabular}{|c|c|p{7cm}|} ++\hline ++{\bf type} & {\bf name} & {\bf description} \\ \hline ++{\tt cpu\_pool ref } & self & reference to the object \\ \hline ++{\tt string } & new\_sched\_policy & New value to set \\ \hline ++\end{tabular} ++\vspace{0.3cm} ++ ++\noindent {\bf Return Type:} ++{\tt ++void ++} ++ ++ diff --git a/cpu-pools-libxc.patch b/cpu-pools-libxc.patch new file mode 100644 index 0000000..e9ba385 --- /dev/null +++ b/cpu-pools-libxc.patch @@ -0,0 +1,360 @@ +Index: xen-4.0.0-testing/tools/libxc/Makefile +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxc/Makefile ++++ xen-4.0.0-testing/tools/libxc/Makefile +@@ -8,6 +8,7 @@ CTRL_SRCS-y := + CTRL_SRCS-y += xc_core.c + CTRL_SRCS-$(CONFIG_X86) += xc_core_x86.c + CTRL_SRCS-$(CONFIG_IA64) += xc_core_ia64.c ++CTRL_SRCS-y += xc_cpupool.c + CTRL_SRCS-y += xc_domain.c + CTRL_SRCS-y += xc_evtchn.c + CTRL_SRCS-y += xc_misc.c +Index: xen-4.0.0-testing/tools/libxc/xc_cpupool.c +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/libxc/xc_cpupool.c +@@ -0,0 +1,154 @@ ++/****************************************************************************** ++ * xc_cpupool.c ++ * ++ * API for manipulating and obtaining information on cpupools. ++ * ++ * Copyright (c) 2009, J Gross. ++ */ ++ ++#include ++#include "xc_private.h" ++ ++int xc_cpupool_create(int xc_handle, ++ uint32_t *ppoolid, ++ uint32_t sched_id) ++{ ++ int err; ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_CREATE; ++ domctl.u.cpupool_op.cpupool_id = (*ppoolid == 0) ? ++ XEN_DOMCTL_CPUPOOL_PAR_ANY : *ppoolid; ++ domctl.u.cpupool_op.sched_id = sched_id; ++ if ( (err = do_domctl_save(xc_handle, &domctl)) != 0 ) ++ return err; ++ ++ *ppoolid = domctl.u.cpupool_op.cpupool_id; ++ return 0; ++} ++ ++int xc_cpupool_destroy(int xc_handle, ++ uint32_t poolid) ++{ ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_DESTROY; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ return do_domctl_save(xc_handle, &domctl); ++} ++ ++int xc_cpupool_getinfo(int xc_handle, ++ uint32_t first_poolid, ++ uint32_t n_max, ++ xc_cpupoolinfo_t *info) ++{ ++ int err = 0; ++ int p; ++ uint32_t poolid = first_poolid; ++ uint8_t local[sizeof (info->cpumap)]; ++ DECLARE_DOMCTL; ++ ++ memset(info, 0, n_max * sizeof(xc_cpupoolinfo_t)); ++ ++ for (p = 0; p < n_max; p++) ++ { ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_INFO; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local); ++ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(info->cpumap) * 8; ++ ++ if ( (err = lock_pages(local, sizeof(local))) != 0 ) ++ { ++ PERROR("Could not lock memory for Xen hypercall"); ++ break; ++ } ++ err = do_domctl_save(xc_handle, &domctl); ++ unlock_pages(local, sizeof (local)); ++ ++ if ( err < 0 ) ++ break; ++ ++ info->cpupool_id = domctl.u.cpupool_op.cpupool_id; ++ info->sched_id = domctl.u.cpupool_op.sched_id; ++ info->n_dom = domctl.u.cpupool_op.n_dom; ++ bitmap_byte_to_64(&(info->cpumap), local, sizeof(local) * 8); ++ poolid = domctl.u.cpupool_op.cpupool_id + 1; ++ info++; ++ } ++ ++ if ( p == 0 ) ++ return err; ++ ++ return p; ++} ++ ++int xc_cpupool_addcpu(int xc_handle, ++ uint32_t poolid, ++ int cpu) ++{ ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_ADDCPU; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu; ++ return do_domctl_save(xc_handle, &domctl); ++} ++ ++int xc_cpupool_removecpu(int xc_handle, ++ uint32_t poolid, ++ int cpu) ++{ ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_RMCPU; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ domctl.u.cpupool_op.cpu = (cpu < 0) ? XEN_DOMCTL_CPUPOOL_PAR_ANY : cpu; ++ return do_domctl_save(xc_handle, &domctl); ++} ++ ++int xc_cpupool_movedomain(int xc_handle, ++ uint32_t poolid, ++ uint32_t domid) ++{ ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_MOVEDOMAIN; ++ domctl.u.cpupool_op.cpupool_id = poolid; ++ domctl.u.cpupool_op.domid = domid; ++ return do_domctl_save(xc_handle, &domctl); ++} ++ ++int xc_cpupool_freeinfo(int xc_handle, ++ uint64_t *cpumap) ++{ ++ int err; ++ uint8_t local[sizeof (*cpumap)]; ++ DECLARE_DOMCTL; ++ ++ domctl.cmd = XEN_DOMCTL_cpupool_op; ++ domctl.u.cpupool_op.op = XEN_DOMCTL_CPUPOOL_OP_FREEINFO; ++ set_xen_guest_handle(domctl.u.cpupool_op.cpumap.bitmap, local); ++ domctl.u.cpupool_op.cpumap.nr_cpus = sizeof(*cpumap) * 8; ++ ++ if ( (err = lock_pages(local, sizeof(local))) != 0 ) ++ { ++ PERROR("Could not lock memory for Xen hypercall"); ++ return err; ++ } ++ ++ err = do_domctl_save(xc_handle, &domctl); ++ unlock_pages(local, sizeof (local)); ++ ++ if (err < 0) ++ return err; ++ ++ bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); ++ ++ return 0; ++} +Index: xen-4.0.0-testing/tools/libxc/xc_domain.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxc/xc_domain.c ++++ xen-4.0.0-testing/tools/libxc/xc_domain.c +@@ -6,6 +6,7 @@ + * Copyright (c) 2003, K A Fraser. + */ + ++#include + #include "xc_private.h" + #include "xg_save_restore.h" + #include +@@ -15,15 +16,21 @@ int xc_domain_create(int xc_handle, + uint32_t ssidref, + xen_domain_handle_t handle, + uint32_t flags, +- uint32_t *pdomid) ++ uint32_t *pdomid, ...) + { + int err; ++ va_list ap; + DECLARE_DOMCTL; + + domctl.cmd = XEN_DOMCTL_createdomain; + domctl.domain = (domid_t)*pdomid; + domctl.u.createdomain.ssidref = ssidref; + domctl.u.createdomain.flags = flags; ++ if ( flags & XEN_DOMCTL_CDF_pool ) { ++ va_start(ap, pdomid); ++ domctl.u.createdomain.cpupool = va_arg(ap, uint32_t); ++ va_end(ap); ++ } + memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t)); + if ( (err = do_domctl(xc_handle, &domctl)) != 0 ) + return err; +@@ -206,6 +213,7 @@ int xc_domain_getinfo(int xc_handle, + info->cpu_time = domctl.u.getdomaininfo.cpu_time; + info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus; + info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id; ++ info->cpupool = domctl.u.getdomaininfo.cpupool; + + memcpy(info->handle, domctl.u.getdomaininfo.handle, + sizeof(xen_domain_handle_t)); +Index: xen-4.0.0-testing/tools/libxc/xc_private.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxc/xc_private.h ++++ xen-4.0.0-testing/tools/libxc/xc_private.h +@@ -164,6 +164,19 @@ static inline int do_domctl(int xc_handl + return ret; + } + ++static inline int do_domctl_save(int xc_handle, struct xen_domctl *domctl) ++{ ++ int ret; ++ ++ do ++ { ++ ret = do_domctl(xc_handle, domctl); ++ } ++ while ( (ret < 0 ) && (errno == EAGAIN) ); ++ ++ return ret; ++} ++ + static inline int do_sysctl(int xc_handle, struct xen_sysctl *sysctl) + { + int ret = -1; +Index: xen-4.0.0-testing/tools/libxc/xenctrl.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxc/xenctrl.h ++++ xen-4.0.0-testing/tools/libxc/xenctrl.h +@@ -171,6 +171,7 @@ typedef struct xc_dominfo { + unsigned int nr_online_vcpus; + unsigned int max_vcpu_id; + xen_domain_handle_t handle; ++ unsigned int cpupool; + } xc_dominfo_t; + + typedef xen_domctl_getdomaininfo_t xc_domaininfo_t; +@@ -207,7 +208,7 @@ int xc_domain_create(int xc_handle, + uint32_t ssidref, + xen_domain_handle_t handle, + uint32_t flags, +- uint32_t *pdomid); ++ uint32_t *pdomid, ...); + + + /* Functions to produce a dump of a given domain +@@ -500,6 +501,100 @@ int xc_domain_setdebugging(int xc_handle + unsigned int enable); + + /* ++ * CPUPOOL MANAGEMENT FUNCTIONS ++ */ ++ ++typedef struct xc_cpupoolinfo { ++ uint32_t cpupool_id; ++ uint32_t sched_id; ++ uint32_t n_dom; ++ uint64_t cpumap; ++} xc_cpupoolinfo_t; ++ ++/** ++ * Create a new cpupool. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm ppoolid pointer to the new cpupool id (in/out) ++ * @parm sched_id id of scheduler to use for pool ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_create(int xc_handle, ++ uint32_t *ppoolid, ++ uint32_t sched_id); ++ ++/** ++ * Destroy a cpupool. Pool must be unused and have no cpu assigned. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm poolid id of the cpupool to destroy ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_destroy(int xc_handle, ++ uint32_t poolid); ++ ++/** ++ * Get cpupool info. Returns info for up to the specified number of cpupools ++ * starting at the given id. ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm first_poolid lowest id for which info is returned ++ * @parm n_max maximum number of cpupools to return info ++ * @parm info pointer to xc_cpupoolinfo_t array ++ * return number of cpupool infos ++ */ ++int xc_cpupool_getinfo(int xc_handle, ++ uint32_t first_poolid, ++ uint32_t n_max, ++ xc_cpupoolinfo_t *info); ++ ++/** ++ * Add cpu to a cpupool. cpu may be -1 indicating the first unassigned. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm poolid id of the cpupool ++ * @parm cpu cpu number to add ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_addcpu(int xc_handle, ++ uint32_t poolid, ++ int cpu); ++ ++/** ++ * Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm poolid id of the cpupool ++ * @parm cpu cpu number to remove ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_removecpu(int xc_handle, ++ uint32_t poolid, ++ int cpu); ++ ++/** ++ * Move domain to another cpupool. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm poolid id of the destination cpupool ++ * @parm domid id of the domain to move ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_movedomain(int xc_handle, ++ uint32_t poolid, ++ uint32_t domid); ++ ++/** ++ * Return map of cpus not in any cpupool. ++ * ++ * @parm xc_handle a handle to an open hypervisor interface ++ * @parm cpumap pointer where to store the cpumap ++ * return 0 on success, -1 on failure ++ */ ++int xc_cpupool_freeinfo(int xc_handle, ++ uint64_t *cpumap); ++ ++ ++/* + * EVENT CHANNEL FUNCTIONS + */ + diff --git a/cpu-pools-libxen.patch b/cpu-pools-libxen.patch new file mode 100644 index 0000000..bb4ab33 --- /dev/null +++ b/cpu-pools-libxen.patch @@ -0,0 +1,2180 @@ +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_all.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/include/xen/api/xen_all.h ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_all.h +@@ -37,4 +37,5 @@ + #include + #include + #include ++#include + #endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_cpu_pool.h +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_cpu_pool.h +@@ -0,0 +1,424 @@ ++/* ++ * Copyright (c) 2006-2007, XenSource Inc. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#ifndef XEN_CPU_POOL_H ++#define XEN_CPU_POOL_H ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * The cpu_pool class. ++ * ++ * Management of CPU pools. ++ */ ++ ++ ++/** ++ * Free the given xen_cpu_pool. The given handle must have been allocated ++ * by this library. ++ */ ++extern void ++xen_cpu_pool_free(xen_cpu_pool cpu_pool); ++ ++ ++typedef struct xen_cpu_pool_set ++{ ++ size_t size; ++ xen_cpu_pool *contents[]; ++} xen_cpu_pool_set; ++ ++/** ++ * Allocate a xen_cpu_pool_set of the given size. ++ */ ++extern xen_cpu_pool_set * ++xen_cpu_pool_set_alloc(size_t size); ++ ++/** ++ * Free the given xen_cpu_pool_set. The given set must have been allocated ++ * by this library. ++ */ ++extern void ++xen_cpu_pool_set_free(xen_cpu_pool_set *set); ++ ++ ++typedef struct xen_cpu_pool_record ++{ ++ xen_cpu_pool handle; ++ char *uuid; ++ char *name_label; ++ char *name_description; ++ struct xen_host_record_opt *resident_on; ++ bool auto_power_on; ++ struct xen_vm_record_opt_set *started_vms; ++ int64_t ncpu; ++ char *sched_policy; ++ struct xen_string_set *proposed_cpus; ++ struct xen_host_cpu_record_opt_set *host_cpus; ++ bool activated; ++ xen_string_string_map *other_config; ++} xen_cpu_pool_record; ++ ++/** ++ * Allocate a xen_cpu_pool_record. ++ */ ++extern xen_cpu_pool_record * ++xen_cpu_pool_record_alloc(void); ++ ++/** ++ * Free the given xen_cpu_pool_record, and all referenced values. The given ++ * record must have been allocated by this library. ++ */ ++extern void ++xen_cpu_pool_record_free(xen_cpu_pool_record *record); ++ ++ ++typedef struct xen_cpu_pool_record_opt ++{ ++ bool is_record; ++ union ++ { ++ xen_cpu_pool handle; ++ xen_cpu_pool_record *record; ++ } u; ++} xen_cpu_pool_record_opt; ++ ++/** ++ * Allocate a xen_cpu_pool_record_opt. ++ */ ++extern xen_cpu_pool_record_opt * ++xen_cpu_pool_record_opt_alloc(void); ++ ++/** ++ * Free the given xen_cpu_pool_record_opt, and all referenced values. The ++ * given record_opt must have been allocated by this library. ++ */ ++extern void ++xen_cpu_pool_record_opt_free(xen_cpu_pool_record_opt *record_opt); ++ ++ ++typedef struct xen_cpu_pool_record_set ++{ ++ size_t size; ++ xen_cpu_pool_record *contents[]; ++} xen_cpu_pool_record_set; ++ ++/** ++ * Allocate a xen_cpu_pool_record_set of the given size. ++ */ ++extern xen_cpu_pool_record_set * ++xen_cpu_pool_record_set_alloc(size_t size); ++ ++/** ++ * Free the given xen_cpu_pool_record_set, and all referenced values. The ++ * given set must have been allocated by this library. ++ */ ++extern void ++xen_cpu_pool_record_set_free(xen_cpu_pool_record_set *set); ++ ++ ++ ++typedef struct xen_cpu_pool_record_opt_set ++{ ++ size_t size; ++ xen_cpu_pool_record_opt *contents[]; ++} xen_cpu_pool_record_opt_set; ++ ++/** ++ * Allocate a xen_cpu_pool_record_opt_set of the given size. ++ */ ++extern xen_cpu_pool_record_opt_set * ++xen_cpu_pool_record_opt_set_alloc(size_t size); ++ ++/** ++ * Free the given xen_cpu_pool_record_opt_set, and all referenced values. ++ * The given set must have been allocated by this library. ++ */ ++extern void ++xen_cpu_pool_record_opt_set_free(xen_cpu_pool_record_opt_set *set); ++ ++ ++/** ++ * Get a record containing the current state of the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_record(xen_session *session, xen_cpu_pool_record **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get a reference to the cpu_pool instance with the specified UUID. ++ */ ++extern bool ++xen_cpu_pool_get_by_uuid(xen_session *session, xen_cpu_pool *result, char *uuid); ++ ++ ++/** ++ * Create a new cpu_pool instance, and return its handle. ++ */ ++extern bool ++xen_cpu_pool_create(xen_session *session, xen_cpu_pool *result, ++ xen_cpu_pool_record *record); ++ ++ ++/** ++ * Destroy the specified VBD instance. ++ */ ++extern bool ++xen_cpu_pool_destroy(xen_session *session, xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get the uuid field of the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_uuid(xen_session *session, char **result, xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Deactivate the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_deactivate(xen_session *session, xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Activate the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_activate(xen_session *session, xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Add a physical cpu to the active pool. ++ */ ++extern bool ++xen_cpu_pool_add_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_host_cpu host_cpu); ++ ++ ++/** ++ * Remove a physical cpu from the active pool. ++ */ ++extern bool ++xen_cpu_pool_remove_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_host_cpu host_cpu); ++ ++ ++/** ++ * Return a list of all the cpu_pools known to the system. ++ */ ++extern bool ++xen_cpu_pool_get_all(xen_session *session, struct xen_cpu_pool_set **result); ++ ++ ++/** ++ * Get the uuid field of the cpu_pool with given name. ++ */ ++extern bool ++xen_cpu_pool_get_by_name_label(xen_session *session, ++ struct xen_cpu_pool_set **result, char *label); ++ ++ ++/** ++ * Get activation state of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_activated(xen_session *session, bool *result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get auto_power_on option of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_auto_power_on(xen_session *session, bool *result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get host_cpu refs of all physical cpus of cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_host_CPUs(xen_session *session, struct xen_host_cpu_set **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get name description field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_name_description(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get name label field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_name_label(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get count of physical cpus to attach to cpu_pool on activation. ++ */ ++extern bool ++xen_cpu_pool_get_ncpu(xen_session *session, int64_t *result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get proposed_CPUs field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_proposed_CPUs(xen_session *session, struct xen_string_set **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get the other_config field of the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_other_config(xen_session *session, xen_string_string_map **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get host the cpu_pool is resident on. ++ */ ++extern bool ++xen_cpu_pool_get_resident_on(xen_session *session, xen_host *result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get sched_policy field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_sched_policy(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Get set of started vms in given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_get_started_VMs(xen_session *session, xen_vm_set **result, ++ xen_cpu_pool cpu_pool); ++ ++ ++/** ++ * Set auto_power_on field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_auto_power_on(xen_session *session, xen_cpu_pool cpu_pool, ++ bool auto_power_on); ++ ++ ++/** ++ * Set proposed_CPUs field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_string_set *proposed_cpus); ++ ++ ++/** ++ * Add a proposed cpu to proposed_CPUs field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_add_to_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ char* proposed_cpu); ++ ++ ++/** ++ * Remove a proposed cpu from proposed_CPUs field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_remove_from_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ char* proposed_cpu); ++ ++ ++/** ++ * Set name_label field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_name_label(xen_session *session, xen_cpu_pool cpu_pool, ++ char *label); ++ ++ ++/** ++ * Set name_description field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_name_description(xen_session *session, xen_cpu_pool cpu_pool, ++ char *descr); ++ ++ ++/** ++ * Set ncpu field of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_ncpu(xen_session *session, xen_cpu_pool cpu_pool, int64_t ncpu); ++ ++ ++/** ++ * Set the other_config field of the given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_string_string_map *other_config); ++ ++ ++/** ++ * Add the given key-value pair to the other_config field of the given ++ * cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_add_to_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ char *key, char *value); ++ ++ ++/** ++ * Remove the given key and its corresponding value from the ++ * other_config field of the given cpu_pool. If the key is not in that Map, then ++ * do nothing. ++ */ ++extern bool ++xen_cpu_pool_remove_from_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ char *key); ++ ++/** ++ * Set sched_policy of given cpu_pool. ++ */ ++extern bool ++xen_cpu_pool_set_sched_policy(xen_session *session, xen_cpu_pool cpu_pool, ++ char *sched_policy); ++ ++ ++#endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_cpu_pool_decl.h +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_cpu_pool_decl.h +@@ -0,0 +1,30 @@ ++/* ++ * Copyright (c) 2006-2007, XenSource Inc. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++#ifndef XEN_CPU_POOL_DECL_H ++#define XEN_CPU_POOL_DECL_H ++ ++typedef void *xen_cpu_pool; ++ ++struct xen_cpu_pool_set; ++struct xen_cpu_pool_record; ++struct xen_cpu_pool_record_set; ++struct xen_cpu_pool_record_opt; ++struct xen_cpu_pool_record_opt_set; ++ ++#endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_host.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/include/xen/api/xen_host.h ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_host.h +@@ -29,7 +29,7 @@ + #include + #include + #include +- ++#include + + /* + * The host class. +@@ -91,6 +91,7 @@ typedef struct xen_host_record + struct xen_pbd_record_opt_set *pbds; + struct xen_host_cpu_record_opt_set *host_cpus; + struct xen_host_metrics_record_opt *metrics; ++ struct xen_cpu_pool_record_opt_set *resident_cpu_pools; + } xen_host_record; + + /** +@@ -494,4 +495,11 @@ extern bool + xen_host_get_all(xen_session *session, struct xen_host_set **result); + + ++/** ++ * Get list of resident cpu pools. ++ */ ++extern bool ++xen_host_get_resident_cpu_pools(xen_session *session, struct xen_cpu_pool_set **result, ++ xen_host host); ++ + #endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_host_cpu.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/include/xen/api/xen_host_cpu.h ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_host_cpu.h +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + + /* +@@ -72,6 +73,7 @@ typedef struct xen_host_cpu_record + char *flags; + char *features; + double utilisation; ++ struct xen_cpu_pool_record_opt_set *cpu_pools; + } xen_host_cpu_record; + + /** +@@ -244,4 +246,18 @@ extern bool + xen_host_cpu_get_all(xen_session *session, struct xen_host_cpu_set **result); + + ++/** ++ * Get the ref of the cpu_pool to which the host_cpu belongs. ++ */ ++extern bool ++xen_host_cpu_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, xen_host_cpu host_cpu); ++ ++ ++/** ++ * Return a list of all the host_cpus not assigned to a cpu_pool. ++ */ ++extern bool ++xen_host_cpu_get_unassigned_cpus(xen_session *session, struct xen_host_cpu_set **result); ++ ++ + #endif +Index: xen-4.0.0-testing/tools/libxen/include/xen/api/xen_vm.h +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/include/xen/api/xen_vm.h ++++ xen-4.0.0-testing/tools/libxen/include/xen/api/xen_vm.h +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + + /* +@@ -113,6 +114,8 @@ typedef struct xen_vm_record + struct xen_vm_metrics_record_opt *metrics; + struct xen_vm_guest_metrics_record_opt *guest_metrics; + char *security_label; ++ char *pool_name; ++ struct xen_cpu_pool_record_opt_set *cpu_pool; + } xen_vm_record; + + /** +@@ -905,4 +908,33 @@ xen_vm_set_security_label(xen_session *s + extern bool + xen_vm_get_security_label(xen_session *session, char **result, xen_vm vm); + ++ ++/** ++ * Get the cpu_pool ref field of a domain. ++ */ ++extern bool ++xen_vm_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, xen_vm vm); ++ ++ ++/** ++ * Get the pool_name field of a domain. ++ */ ++extern bool ++xen_vm_get_pool_name(xen_session *session, char **result, xen_vm vm); ++ ++ ++/** ++ * Set the pool_name field of a domain. ++ */ ++extern bool ++xen_vm_set_pool_name(xen_session *session, xen_vm vm, char *pool_name); ++ ++ ++/** ++ * Migrate the VM to another cpu_pool (on the same host). This can only be ++ * called when the specified VM is in the Running state. ++ */ ++extern bool ++xen_vm_cpu_pool_migrate(xen_session *session, xen_vm vm, xen_cpu_pool cpu_pool); ++ + #endif +Index: xen-4.0.0-testing/tools/libxen/src/xen_cpu_pool.c +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/libxen/src/xen_cpu_pool.c +@@ -0,0 +1,671 @@ ++/* ++ * Copyright (c) 2006-2007, XenSource Inc. ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++ ++ ++#include ++#include ++ ++#include "xen_internal.h" ++#include ++#include ++#include ++ ++XEN_FREE(xen_cpu_pool) ++XEN_SET_ALLOC_FREE(xen_cpu_pool) ++XEN_ALLOC(xen_cpu_pool_record) ++XEN_SET_ALLOC_FREE(xen_cpu_pool_record) ++XEN_ALLOC(xen_cpu_pool_record_opt) ++XEN_RECORD_OPT_FREE(xen_cpu_pool) ++XEN_SET_ALLOC_FREE(xen_cpu_pool_record_opt) ++ ++ ++static const struct_member xen_cpu_pool_record_struct_members[] = ++ { ++ { .key = "uuid", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_cpu_pool_record, uuid) }, ++ { .key = "name_label", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_cpu_pool_record, name_label) }, ++ { .key = "name_description", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_cpu_pool_record, name_description) }, ++ { .key = "resident_on", ++ .type = &abstract_type_ref, ++ .offset = offsetof(xen_cpu_pool_record, resident_on) }, ++ { .key = "auto_power_on", ++ .type = &abstract_type_bool, ++ .offset = offsetof(xen_cpu_pool_record, auto_power_on) }, ++ { .key = "started_VMs", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_cpu_pool_record, started_vms) }, ++ { .key = "ncpu", ++ .type = &abstract_type_int, ++ .offset = offsetof(xen_cpu_pool_record, ncpu) }, ++ { .key = "sched_policy", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_cpu_pool_record, sched_policy) }, ++ { .key = "proposed_CPUs", ++ .type = &abstract_type_string_set, ++ .offset = offsetof(xen_cpu_pool_record, proposed_cpus) }, ++ { .key = "host_CPUs", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_cpu_pool_record, host_cpus) }, ++ { .key = "activated", ++ .type = &abstract_type_bool, ++ .offset = offsetof(xen_cpu_pool_record, activated) }, ++ { .key = "other_config", ++ .type = &abstract_type_string_string_map, ++ .offset = offsetof(xen_cpu_pool_record, other_config) }, ++ }; ++ ++ ++const abstract_type xen_cpu_pool_record_abstract_type_ = ++ { ++ .typename = STRUCT, ++ .struct_size = sizeof(xen_cpu_pool_record), ++ .member_count = ++ sizeof(xen_cpu_pool_record_struct_members) / sizeof(struct_member), ++ .members = xen_cpu_pool_record_struct_members ++ }; ++ ++ ++void ++xen_cpu_pool_record_free(xen_cpu_pool_record *record) ++{ ++ if (record == NULL) ++ { ++ return; ++ } ++ free(record->handle); ++ free(record->uuid); ++ free(record->name_label); ++ free(record->name_description); ++ xen_host_record_opt_free(record->resident_on); ++ xen_vm_record_opt_set_free(record->started_vms); ++ free(record->sched_policy); ++ xen_string_set_free(record->proposed_cpus); ++ xen_host_cpu_record_opt_set_free(record->host_cpus); ++ xen_string_string_map_free(record->other_config); ++ free(record); ++} ++ ++ ++bool ++xen_cpu_pool_get_record(xen_session *session, xen_cpu_pool_record **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = xen_cpu_pool_record_abstract_type_; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_record"); ++ ++ if (session->ok) ++ { ++ (*result)->handle = xen_strdup_((*result)->uuid); ++ } ++ ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_by_uuid(xen_session *session, xen_cpu_pool *result, char *uuid) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = uuid } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_by_uuid"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_create(xen_session *session, xen_cpu_pool *result, ++ xen_cpu_pool_record *record) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &xen_cpu_pool_record_abstract_type_, ++ .u.struct_val = record } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.create"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_destroy(xen_session *session, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ xen_call_(session, "cpu_pool.destroy", param_values, 1, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_uuid(xen_session *session, char **result, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_uuid"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_deactivate(xen_session *session, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ }; ++ ++ xen_call_(session, "cpu_pool.deactivate", param_values, 1, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_activate(xen_session *session, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ }; ++ ++ xen_call_(session, "cpu_pool.activate", param_values, 1, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_add_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_host_cpu host_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = host_cpu }, ++ }; ++ ++ xen_call_(session, "cpu_pool.add_host_CPU_live", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_remove_host_CPU_live(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_host_cpu host_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = host_cpu }, ++ }; ++ ++ xen_call_(session, "cpu_pool.remove_host_CPU_live", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_all(xen_session *session, struct xen_cpu_pool_set **result) ++{ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ xen_call_(session, "cpu_pool.get_all", NULL, 0, &result_type, result); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_by_name_label(xen_session *session, ++ struct xen_cpu_pool_set **result, char *label) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = label } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_by_name_label"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_activated(xen_session *session, bool *result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_bool; ++ ++ XEN_CALL_("cpu_pool.get_activated"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_auto_power_on(xen_session *session, bool *result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_bool; ++ ++ XEN_CALL_("cpu_pool.get_auto_power_on"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_host_CPUs(xen_session *session, struct xen_host_cpu_set **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_host_CPUs"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_name_description(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_name_description"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_name_label(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_name_label"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_ncpu(xen_session *session, int64_t *result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_int; ++ ++ XEN_CALL_("cpu_pool.get_ncpu"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_proposed_CPUs(xen_session *session, struct xen_string_set **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_proposed_CPUs"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_other_config(xen_session *session, xen_string_string_map **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string_string_map; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_other_config"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_resident_on(xen_session *session, xen_host *result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_resident_on"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_sched_policy(xen_session *session, char **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_sched_policy"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_get_started_VMs(xen_session *session, xen_vm_set **result, ++ xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("cpu_pool.get_started_VMs"); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_auto_power_on(xen_session *session, xen_cpu_pool cpu_pool, ++ bool auto_power_on) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_bool, ++ .u.bool_val = auto_power_on } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_auto_power_on", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_string_set *proposed_cpus) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string_set, ++ .u.set_val = (arbitrary_set *)proposed_cpus } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_proposed_CPUs", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_add_to_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ char* proposed_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = proposed_cpu } ++ }; ++ ++ xen_call_(session, "cpu_pool.add_to_proposed_CPUs", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_remove_from_proposed_CPUs(xen_session *session, xen_cpu_pool cpu_pool, ++ char* proposed_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = proposed_cpu } ++ }; ++ ++ xen_call_(session, "cpu_pool.remove_from_proposed_CPUs", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_name_label(xen_session *session, xen_cpu_pool cpu_pool, ++ char *label) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = label } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_name_label", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_name_description(xen_session *session, xen_cpu_pool cpu_pool, ++ char *descr) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = descr } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_name_description", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_ncpu(xen_session *session, xen_cpu_pool cpu_pool, int64_t ncpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_int, ++ .u.int_val = ncpu } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_ncpu", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ xen_string_string_map *other_config) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string_string_map, ++ .u.set_val = (arbitrary_set *)other_config } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_other_config", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_add_to_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ char *key, char *value) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = key }, ++ { .type = &abstract_type_string, ++ .u.string_val = value } ++ }; ++ ++ xen_call_(session, "cpu_pool.add_to_other_config", param_values, 3, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_remove_from_other_config(xen_session *session, xen_cpu_pool cpu_pool, ++ char *key) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = key } ++ }; ++ ++ xen_call_(session, "cpu_pool.remove_from_other_config", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_cpu_pool_set_sched_policy(xen_session *session, xen_cpu_pool cpu_pool, ++ char *sched_policy) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool }, ++ { .type = &abstract_type_string, ++ .u.string_val = sched_policy } ++ }; ++ ++ xen_call_(session, "cpu_pool.set_sched_policy", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ +Index: xen-4.0.0-testing/tools/libxen/src/xen_host.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/src/xen_host.c ++++ xen-4.0.0-testing/tools/libxen/src/xen_host.c +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + + XEN_FREE(xen_host) +@@ -108,7 +109,10 @@ static const struct_member xen_host_reco + .offset = offsetof(xen_host_record, host_cpus) }, + { .key = "metrics", + .type = &abstract_type_ref, +- .offset = offsetof(xen_host_record, metrics) } ++ .offset = offsetof(xen_host_record, metrics) }, ++ { .key = "resident_cpu_pools", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_host_record, resident_cpu_pools) } + }; + + const abstract_type xen_host_record_abstract_type_ = +@@ -148,6 +152,7 @@ xen_host_record_free(xen_host_record *re + xen_pbd_record_opt_set_free(record->pbds); + xen_host_cpu_record_opt_set_free(record->host_cpus); + xen_host_metrics_record_opt_free(record->metrics); ++ xen_cpu_pool_record_opt_set_free(record->resident_cpu_pools); + free(record); + } + +@@ -889,3 +894,22 @@ xen_host_get_uuid(xen_session *session, + XEN_CALL_("host.get_uuid"); + return session->ok; + } ++ ++ ++bool ++xen_host_get_resident_cpu_pools(xen_session *session, struct xen_cpu_pool_set **result, ++ xen_host host) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = host } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("host.get_resident_cpu_pools"); ++ return session->ok; ++} ++ +Index: xen-4.0.0-testing/tools/libxen/src/xen_host_cpu.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/src/xen_host_cpu.c ++++ xen-4.0.0-testing/tools/libxen/src/xen_host_cpu.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + + XEN_FREE(xen_host_cpu) +@@ -66,7 +67,10 @@ static const struct_member xen_host_cpu_ + .offset = offsetof(xen_host_cpu_record, features) }, + { .key = "utilisation", + .type = &abstract_type_float, +- .offset = offsetof(xen_host_cpu_record, utilisation) } ++ .offset = offsetof(xen_host_cpu_record, utilisation) }, ++ { .key = "cpu_pool", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_host_cpu_record, cpu_pools) }, + }; + + const abstract_type xen_host_cpu_record_abstract_type_ = +@@ -94,6 +98,7 @@ xen_host_cpu_record_free(xen_host_cpu_re + free(record->stepping); + free(record->flags); + free(record->features); ++ xen_cpu_pool_record_opt_set_free(record->cpu_pools); + free(record); + } + +@@ -315,3 +320,34 @@ xen_host_cpu_get_uuid(xen_session *sessi + XEN_CALL_("host_cpu.get_uuid"); + return session->ok; + } ++ ++ ++bool ++xen_host_cpu_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, xen_host_cpu host_cpu) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = host_cpu } ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("host_cpu.get_cpu_pool"); ++ return session->ok; ++} ++ ++ ++bool ++xen_host_cpu_get_unassigned_cpus(xen_session *session, struct xen_host_cpu_set **result) ++{ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ xen_call_(session, "host_cpu.get_unassigned_cpus", NULL, 0, &result_type, result); ++ return session->ok; ++} ++ ++ ++ +Index: xen-4.0.0-testing/tools/libxen/src/xen_vm.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/src/xen_vm.c ++++ xen-4.0.0-testing/tools/libxen/src/xen_vm.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + + + XEN_FREE(xen_vm) +@@ -165,7 +166,13 @@ static const struct_member xen_vm_record + .offset = offsetof(xen_vm_record, guest_metrics) }, + { .key = "security_label", + .type = &abstract_type_string, +- .offset = offsetof(xen_vm_record, security_label) } ++ .offset = offsetof(xen_vm_record, security_label) }, ++ { .key = "pool_name", ++ .type = &abstract_type_string, ++ .offset = offsetof(xen_vm_record, pool_name) }, ++ { .key = "cpu_pool", ++ .type = &abstract_type_ref_set, ++ .offset = offsetof(xen_vm_record, cpu_pool) }, + }; + + const abstract_type xen_vm_record_abstract_type_ = +@@ -209,6 +216,7 @@ xen_vm_record_free(xen_vm_record *record + xen_string_string_map_free(record->other_config); + xen_vm_metrics_record_opt_free(record->metrics); + xen_vm_guest_metrics_record_opt_free(record->guest_metrics); ++ xen_cpu_pool_record_opt_set_free(record->cpu_pool); + free(record->security_label); + free(record); + } +@@ -1781,3 +1789,71 @@ xen_vm_get_security_label(xen_session *s + XEN_CALL_("VM.get_security_label"); + return session->ok; + } ++ ++ ++bool ++xen_vm_get_cpu_pool(xen_session *session, struct xen_cpu_pool_set **result, xen_vm vm) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = vm }, ++ }; ++ ++ abstract_type result_type = abstract_type_string_set; ++ ++ *result = NULL; ++ XEN_CALL_("VM.get_cpu_pool"); ++ return session->ok; ++} ++ ++ ++bool ++xen_vm_get_pool_name(xen_session *session, char **result, xen_vm vm) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = vm }, ++ }; ++ ++ abstract_type result_type = abstract_type_string; ++ ++ *result = NULL; ++ XEN_CALL_("VM.get_pool_name"); ++ return session->ok; ++} ++ ++ ++bool ++xen_vm_set_pool_name(xen_session *session, xen_vm vm, char *pool_name) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = vm }, ++ { .type = &abstract_type_string, ++ .u.string_val = pool_name } ++ }; ++ ++ xen_call_(session, "VM.set_pool_name", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ ++bool ++xen_vm_cpu_pool_migrate(xen_session *session, xen_vm vm, xen_cpu_pool cpu_pool) ++{ ++ abstract_value param_values[] = ++ { ++ { .type = &abstract_type_string, ++ .u.string_val = vm }, ++ { .type = &abstract_type_string, ++ .u.string_val = cpu_pool } ++ }; ++ ++ xen_call_(session, "VM.cpu_pool_migrate", param_values, 2, NULL, NULL); ++ return session->ok; ++} ++ ++ +Index: xen-4.0.0-testing/tools/libxen/test/test_bindings.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/libxen/test/test_bindings.c ++++ xen-4.0.0-testing/tools/libxen/test/test_bindings.c +@@ -28,6 +28,7 @@ + #include + + //#define PRINT_XML ++//////////////#define POOL_TESTS + + static void usage() + { +@@ -125,6 +126,649 @@ static void print_error(xen_session *ses + } + + ++#ifdef POOL_TESTS ++#define NAME_DESCRIPTION "TestPool" ++#define NAME_DESCRIPTION_2 "TestPool-2" ++#define NAME_LABEL "Pool-1" ++#define NAME_LABEL_2 "Pool-2" ++#define SCHED_NAME "credit" ++#define NCPU_VAL 2 ++#define NCPU_VAL_2 1 ++ ++ ++static int pool_tests(xen_session *session, xen_host host) ++{ ++ int rc = 1; ++ xen_cpu_pool_set *pools = NULL; ++ xen_host_record *host_record = NULL; ++ xen_cpu_pool_record_opt *cpu_pool_opt = NULL; ++ xen_cpu_pool_record *cpu_pool_rec = NULL; ++ xen_host_cpu_set *host_cpu_set = NULL; ++ xen_host_cpu_record *host_cpu_record = NULL; ++ xen_vm_set *vm_set = NULL; ++ xen_cpu_pool pool = NULL; ++ xen_cpu_pool pool_out = NULL; ++ xen_string_string_map *pool_other_config = NULL; ++ xen_vm_record *vm_record = NULL; ++ xen_string_set *proposed_cpus = NULL; ++ xen_host res_host = NULL; ++ char *name_description = NULL; ++ char *name_label = NULL; ++ char *sched_policy = NULL; ++ char *pool_uuid = NULL; ++ int64_t ncpu; ++ ++ for (int loop= 0; loop < 1; loop++) ++ { ++ // Test extensions of class host ++ printf("Test cpu_pool extension of host class -----------------------------------------\n"); ++ ++ printf("host.get_resident_cpu_pools\n"); ++ if (!xen_host_get_resident_cpu_pools(session, &pools, host)) ++ { ++ break; ++ } ++ if (pools->size != 1) ++ { ++ printf("Wrong pool count; only one pool expected\n"); ++ break; ++ } ++ printf("Pool UUID %s\n", (char*)pools->contents[0]); ++ xen_cpu_pool_set_free(pools); ++ pools = NULL; ++ ++ printf("host.get_record\n"); ++ if (!xen_host_get_record(session, &host_record, host)) ++ { ++ break; ++ } ++ printf("Pool count %d\n", (int)host_record->resident_cpu_pools->size); ++ if (host_record->resident_cpu_pools->size != 1) ++ { ++ break; ++ } ++ cpu_pool_opt = host_record->resident_cpu_pools->contents[0]; ++ printf("Pool UUID %s\n", (char*)cpu_pool_opt->u.handle); ++ xen_host_record_free(host_record); ++ host_record = NULL; ++ cpu_pool_opt = NULL; ++ ++ ++ // Test extensions of class host_cpu ++ printf("host_cpu.get_all\n"); ++ if (!xen_host_cpu_get_all(session, &host_cpu_set)) ++ { ++ break; ++ } ++ ++ printf("host_cpu.get_cpu_pool & host_cpu.get_record\n"); ++ for (int i= 0; i < host_cpu_set->size; i++) ++ { ++ if (!xen_host_cpu_get_cpu_pool(session, &pools, host_cpu_set->contents[i])) ++ { ++ break; ++ } ++ if (pools->size > 1) ++ { ++ printf("Wrong pool count (xen_host_cpu_get_cpu_pool)\n"); ++ break; ++ } ++ ++ printf("host_cpu (get_cpu_pool) %s, cpu_pool %s\n", (char*)host_cpu_set->contents[i], ++ pools->size != 0 ? (char*)pools->contents[0] : "(None)"); ++ ++ if (!xen_host_cpu_get_record(session, &host_cpu_record, host_cpu_set->contents[i])) ++ { ++ break; ++ } ++ if (host_cpu_record->cpu_pools->size > 1) ++ { ++ printf("Wrong pool count (xen_host_cpu_get_record)\n"); ++ break; ++ } ++ ++ printf("host_cpu (get_record) %s, cpu_pool %s\n", (char*)host_cpu_set->contents[i], ++ host_cpu_record->cpu_pools->size != 0 ++ ? (char*)((xen_cpu_pool_record_opt*)(host_cpu_record->cpu_pools->contents[0])->u.handle) ++ : "(None)"); ++ ++ } ++ xen_host_cpu_record_free(host_cpu_record); ++ host_cpu_record = NULL; ++ xen_host_cpu_set_free(host_cpu_set); ++ host_cpu_set = NULL; ++ xen_cpu_pool_set_free(pools); ++ pools = NULL; ++ ++ printf("host_cpu.get_unassigned_cpus\n"); ++ if (!xen_host_cpu_get_unassigned_cpus(session, &host_cpu_set)) ++ { ++ break; ++ } ++ printf("Free cpus (not bound to a pool)\n"); ++ for (int i= 0; i < host_cpu_set->size; i++) ++ { ++ printf(" cpu UUID %s\n", (char*)host_cpu_set->contents[i]); ++ } ++ xen_host_cpu_set_free(host_cpu_set); ++ host_cpu_set = NULL; ++ ++ ++ printf("vm.get_record\n"); ++ if (!xen_vm_get_all(session, &vm_set)) ++ { ++ break; ++ } ++ ++ if (!xen_vm_get_record(session, &vm_record, vm_set->contents[0])) ++ { ++ break; ++ } ++ printf("VM %s, pool_name %s, cpu_pool %s\n", (char*)vm_set->contents[0], ++ vm_record->pool_name, (char*)vm_record->cpu_pool->contents[0]); ++ ++ xen_vm_record_free(vm_record); ++ vm_record = NULL; ++ ++ printf("vm.get_cpu_pool\n"); ++ if (!xen_vm_get_cpu_pool(session, &pools, vm_set->contents[0])) ++ { ++ break; ++ } ++ printf("vm_get_cpu_pool %s\n", (char*)pools->contents[0]); ++ ++ xen_vm_set_free(vm_set); ++ xen_cpu_pool_set_free(pools); ++ vm_set = NULL; ++ pools = NULL; ++ ++ ++ // Class cpu_pool ++ ++ // create ++ pool_other_config = xen_string_string_map_alloc(1); ++ pool_other_config->contents[0].key = strdup("type"); ++ pool_other_config->contents[0].val = strdup("bs2000"); ++ xen_string_set *proposed_CPUs_set = xen_string_set_alloc(1); ++ proposed_CPUs_set->contents[0] = strdup("3"); ++ ++ xen_cpu_pool_record new_cpu_pool_record = ++ { ++ .name_label = NAME_LABEL, ++ .name_description = NAME_DESCRIPTION, ++ .auto_power_on = false, ++ .ncpu = NCPU_VAL, ++ .sched_policy = SCHED_NAME, ++ .proposed_cpus = proposed_CPUs_set, ++ .other_config = pool_other_config, ++ }; ++ ++ printf("cpu_pool.create\n"); ++ if (!xen_cpu_pool_create(session, &pool, &new_cpu_pool_record)) ++ { ++ break; ++ } ++ printf("New Pool UUID %s\n", (char*)pool); ++ xen_string_set_free(proposed_CPUs_set); ++ proposed_CPUs_set = NULL; ++ xen_string_string_map_free(pool_other_config); ++ pool_other_config = NULL; ++ ++ // get_by_name_label ++ printf("cpu_pool.get_by_name_label\n"); ++ if (!xen_cpu_pool_get_by_name_label(session, &pools, "Pool-1")) ++ { ++ break; ++ } ++ if (strcmp((char*)pools->contents[0], (char*)pool) != 0) ++ { ++ break; ++ } ++ xen_cpu_pool_set_free(pools); ++ pools = NULL; ++ ++ ++ // get_by_uuid ++ printf("cpu_pool.get_by_uuid\n"); ++ if (!xen_cpu_pool_get_by_uuid(session, &pool_out, pool)) ++ { ++ break; ++ } ++ if (strcmp((char*)pool_out, (char*)pool) != 0) ++ { ++ printf("Wrong pool returned\n"); ++ break; ++ } ++ xen_cpu_pool_free(pool_out); ++ pool_out = NULL; ++ ++ // get_all ++ printf("cpu_pool.get_all\n"); ++ if (!xen_cpu_pool_get_all(session, &pools)) ++ { ++ break; ++ } ++ if (pools->size != 2) ++ { ++ printf("Wrong pool count (%d)\n", (int)pools->size); ++ break; ++ } ++ xen_cpu_pool_set_free(pools); ++ pools = NULL; ++ ++ ++ // get_activated ++ printf("cpu_pool.get_activated\n"); ++ bool activated_state = true; ++ if (!xen_cpu_pool_get_activated(session, &activated_state, pool)) ++ { ++ break; ++ } ++ if (activated_state) ++ { ++ printf("Pool must not be activated\n"); ++ break; ++ } ++ ++ ++ // get_auto_power_on ++ printf("cpu_pool.get_auto_power_on\n"); ++ bool power_state = true; ++ if (!xen_cpu_pool_get_auto_power_on(session, &power_state, pool)) ++ { ++ break; ++ } ++ if (power_state) ++ { ++ printf("Pool must not have attibute 'auto_power_on'\n"); ++ break; ++ } ++ ++ // get_host_CPUs ++ printf("cpu_pool.get_host_CPUs\n"); ++ if (!xen_cpu_pool_get_host_CPUs(session, &host_cpu_set, pool)) ++ { ++ break; ++ } ++ if (host_cpu_set->size != 0) ++ { ++ printf("Pool must not have any attached cpus\n"); ++ break; ++ } ++ xen_host_cpu_set_free(host_cpu_set); ++ host_cpu_set = NULL; ++ ++ ++ // get_name_description ++ printf("cpu_pool.get_name_description\n"); ++ if (!xen_cpu_pool_get_name_description(session, &name_description, pool)) ++ { ++ break; ++ } ++ if (strcmp(NAME_DESCRIPTION, name_description) != 0) ++ { ++ printf("Pool has wrong name_description\n"); ++ break; ++ } ++ free(name_description); ++ name_description = NULL; ++ ++ ++ // get_name_label ++ printf("cpu_pool.get_name_label\n"); ++ if (!xen_cpu_pool_get_name_label(session, &name_label, pool)) ++ { ++ break; ++ } ++ if (strcmp(NAME_LABEL, name_label) != 0) ++ { ++ printf("Pool has wrong name_label\n"); ++ break; ++ } ++ free(name_label); ++ name_label = NULL; ++ ++ // get_ncpu ++ printf("cpu_pool.get_ncpu\n"); ++ if (!xen_cpu_pool_get_ncpu(session, &ncpu, pool)) ++ { ++ break; ++ } ++ if (NCPU_VAL != ncpu) ++ { ++ printf("Pool has wrong ncpu\n"); ++ break; ++ } ++ ++ // get_proposed_CPUs ++ printf("cpu_pool.get_proposed_CPUs\n"); ++ if (!xen_cpu_pool_get_proposed_CPUs(session, &proposed_cpus, pool)) ++ { ++ break; ++ } ++ if (proposed_cpus->size != 1) ++ { ++ printf("Pool has wrong proposed_cpus count\n"); ++ break; ++ } ++ xen_string_set_free(proposed_cpus); ++ proposed_cpus = NULL; ++ ++ ++ // get_other_config ++ printf("cpu_pool.get_other_config\n"); ++ if (!xen_cpu_pool_get_other_config(session, &pool_other_config, pool)) ++ { ++ break; ++ } ++ if (pool_other_config->size != 1) ++ { ++ printf("Pool has wrong other_config element count\n"); ++ break; ++ } ++ if ((strcmp(pool_other_config->contents[0].key, "type") != 0) || ++ (strcmp(pool_other_config->contents[0].val, "bs2000") != 0)) ++ { ++ printf("Pool has wrong other_config attributes\n"); ++ break; ++ } ++ xen_string_string_map_free(pool_other_config); ++ pool_other_config = NULL; ++ ++ ++ // get_record ++ printf("cpu_pool.get_record\n"); ++ if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool)) ++ { ++ break; ++ } ++ if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL) != 0) || ++ (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION) != 0) || ++ (cpu_pool_rec->auto_power_on) || ++ (cpu_pool_rec->ncpu != NCPU_VAL) || ++ (cpu_pool_rec->started_vms->size != 0) || ++ (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) || ++ (cpu_pool_rec->proposed_cpus->size != 1) || ++ (cpu_pool_rec->host_cpus->size != 0) || ++ (cpu_pool_rec->activated) || ++ (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) || ++ (strcmp(cpu_pool_rec->uuid, pool) != 0) || ++ (cpu_pool_rec->other_config->size != 1)) ++ { ++ printf("Wrong record output\n"); ++ break; ++ } ++ xen_cpu_pool_record_free(cpu_pool_rec); ++ cpu_pool_rec = NULL; ++ ++ ++ // get_resident_on ++ printf("cpu_pool.get_resident_on\n"); ++ if (!xen_cpu_pool_get_resident_on(session, &res_host, pool)) ++ { ++ break; ++ } ++ if (strcmp(res_host, host) != 0) ++ { ++ printf("Wrong resident host returned\n"); ++ break; ++ } ++ xen_host_free(res_host); ++ res_host = NULL; ++ ++ ++ // get_sched_policy ++ printf("cpu_pool.get_sched_policy\n"); ++ if (!xen_cpu_pool_get_sched_policy(session, &sched_policy, pool)) ++ { ++ break; ++ } ++ if (strcmp(sched_policy, SCHED_NAME) != 0) ++ { ++ printf("Wrong sched_policy returned\n"); ++ break; ++ } ++ free(sched_policy); ++ sched_policy = NULL; ++ ++ ++ // get_started_VMs ++ printf("cpu_pool.get_started_VMs\n"); ++ if (!xen_cpu_pool_get_started_VMs(session, &vm_set, pool)) ++ { ++ break; ++ } ++ if (vm_set->size != 0) ++ { ++ printf("Wrong count of started VMs\n"); ++ break; ++ } ++ xen_vm_set_free(vm_set); ++ vm_set = NULL; ++ ++ ++ // get_uuid ++ printf("cpu_pool.get_uuid\n"); ++ if (!xen_cpu_pool_get_uuid(session, &pool_uuid, pool)) ++ { ++ break; ++ } ++ if (strcmp(pool_uuid, pool) != 0) ++ { ++ printf("Wrong Pool UUID returnd\n"); ++ break; ++ } ++ free(pool_uuid); ++ pool_uuid = NULL; ++ ++ ++ // set_auto_power_on ++ printf("cpu_pool.set_auto_power_on\n"); ++ if (!xen_cpu_pool_set_auto_power_on(session, pool, true)) ++ break; ++ ++ ++ // set_proposed_CPUs ++ printf("cpu_pool.set_proposed_CPUs\n"); ++ proposed_CPUs_set = xen_string_set_alloc(2); ++ proposed_CPUs_set->contents[0] = strdup("2"); ++ proposed_CPUs_set->contents[1] = strdup("4"); ++ if (!xen_cpu_pool_set_proposed_CPUs(session, pool, proposed_CPUs_set)) ++ break; ++ xen_string_set_free(proposed_CPUs_set); ++ proposed_CPUs_set = NULL; ++ ++ ++ // add_to_proposed_CPUs ++ printf("cpu_pool.add_to_proposed_CPUs\n"); ++ if (!xen_cpu_pool_add_to_proposed_CPUs(session, pool, "3")) ++ break; ++ ++ ++ // remove_from_proposed_CPUs ++ printf("cpu_pool.remove_from_proposed_CPUs\n"); ++ if (!xen_cpu_pool_remove_from_proposed_CPUs(session, pool, "4")) ++ break; ++ ++ ++ // set_name_label ++ printf("cpu_pool.set_name_label\n"); ++ if (!xen_cpu_pool_set_name_label(session, pool, NAME_LABEL_2)) ++ break; ++ ++ ++ // set_name_description ++ printf("cpu_pool.set_name_description\n"); ++ if (!xen_cpu_pool_set_name_description(session, pool, NAME_DESCRIPTION_2)) ++ break; ++ ++ ++ // set_ncpu ++ printf("cpu_pool.set_ncpu\n"); ++ if (!xen_cpu_pool_set_ncpu(session, pool, NCPU_VAL_2)) ++ break; ++ ++ ++ // set_other_config ++ printf("cpu_pool.set_other_config\n"); ++ pool_other_config = xen_string_string_map_alloc(2); ++ pool_other_config->contents[0].key = strdup("test1"); ++ pool_other_config->contents[0].val = strdup("field1"); ++ pool_other_config->contents[1].key = strdup("test2"); ++ pool_other_config->contents[1].val = strdup("field2"); ++ if (!xen_cpu_pool_set_other_config(session, pool, pool_other_config)) ++ break; ++ xen_string_string_map_free(pool_other_config); ++ pool_other_config = NULL; ++ ++ ++ // add_to_other_config ++ printf("cpu_pool.add_to_other_config\n"); ++ if (!xen_cpu_pool_add_to_other_config(session, pool, "test3", "field3")) ++ break; ++ ++ ++ // remove_from_other_config ++ printf("cpu_pool.remove_from_other_config\n"); ++ if (!xen_cpu_pool_remove_from_other_config(session, pool, "test2")) ++ break; ++ ++ ++ // set_sched_policy ++ printf("cpu_pool.set_sched_policy\n"); ++ if (!xen_cpu_pool_set_sched_policy(session, pool, SCHED_NAME)) ++ break; ++ ++ ++ // check get_record again ++ printf("check cpu_pool record\n"); ++ if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool)) ++ { ++ break; ++ } ++ if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL_2) != 0) || ++ (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION_2) != 0) || ++ (!cpu_pool_rec->auto_power_on) || ++ (cpu_pool_rec->ncpu != NCPU_VAL_2) || ++ (cpu_pool_rec->started_vms->size != 0) || ++ (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) || ++ (cpu_pool_rec->proposed_cpus->size != 2) || ++ (cpu_pool_rec->host_cpus->size != 0) || ++ (cpu_pool_rec->activated) || ++ (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) || ++ (strcmp(cpu_pool_rec->uuid, pool) != 0) || ++ (cpu_pool_rec->other_config->size != 2)) ++ { ++ printf("Wrong record output\n"); ++ break; ++ } ++ xen_cpu_pool_record_free(cpu_pool_rec); ++ cpu_pool_rec = NULL; ++ ++ ++ // activate pool ++ printf("cpu_pool.activate\n"); ++ if (!xen_cpu_pool_activate(session, pool)) ++ break; ++ ++ ++ // add_host_CPU_live ++ printf("cpu_pool.add_host_CPU_live\n"); ++ if (!xen_host_cpu_get_unassigned_cpus(session, &host_cpu_set)) ++ { ++ break; ++ } ++ if (host_cpu_set->size < 1) ++ { ++ printf("No free CPU found\n"); ++ break; ++ } ++ if (!xen_cpu_pool_add_host_CPU_live(session, pool, host_cpu_set->contents[0])) ++ break; ++ ++ ++ // remove_host_CPU_live ++ printf("cpu_pool.remove_host_CPU_live\n"); ++ if (!xen_cpu_pool_remove_host_CPU_live(session, pool, host_cpu_set->contents[0])) ++ break; ++ ++ xen_host_cpu_set_free(host_cpu_set); ++ host_cpu_set = NULL; ++ ++ ++ // check get_record again ++ printf("check cpu_pool record\n"); ++ if (!xen_cpu_pool_get_record(session, &cpu_pool_rec, pool)) ++ { ++ break; ++ } ++ if ( (strcmp(cpu_pool_rec->name_label, NAME_LABEL_2) != 0) || ++ (strcmp(cpu_pool_rec->name_description, NAME_DESCRIPTION_2) != 0) || ++ (!cpu_pool_rec->auto_power_on) || ++ (cpu_pool_rec->ncpu != NCPU_VAL_2) || ++ (cpu_pool_rec->started_vms->size != 0) || ++ (strcmp(cpu_pool_rec->sched_policy, SCHED_NAME) != 0) || ++ (cpu_pool_rec->proposed_cpus->size != 2) || ++ (cpu_pool_rec->host_cpus->size != 1) || ++ (!cpu_pool_rec->activated) || ++ (strcmp(cpu_pool_rec->resident_on->u.handle, host) != 0) || ++ (strcmp(cpu_pool_rec->uuid, pool) != 0) || ++ (cpu_pool_rec->other_config->size != 2)) ++ { ++ printf("Wrong record output\n"); ++ break; ++ } ++ xen_cpu_pool_record_free(cpu_pool_rec); ++ cpu_pool_rec = NULL; ++ ++ ++ // deactivate pool ++ printf("cpu_pool.deactivate\n"); ++ if (!xen_cpu_pool_deactivate(session, pool)) ++ break; ++ ++ ++ // Pool delete ++ if (!xen_cpu_pool_destroy(session, pool)) ++ { ++ break; ++ } ++ xen_cpu_pool_free(pool); ++ pool = NULL; ++ ++ // Tests OK ++ printf("Pool Tests OK\n"); ++ rc= 0; ++ } ++ ++ if (rc != 0) ++ { ++ print_error(session); ++ } ++ ++ xen_cpu_pool_set_free(pools); ++ xen_host_record_free(host_record); ++ xen_cpu_pool_record_opt_free(cpu_pool_opt); ++ xen_host_cpu_set_free(host_cpu_set); ++ xen_host_cpu_record_free(host_cpu_record); ++ xen_vm_set_free(vm_set); ++ xen_cpu_pool_free(pool); ++ xen_cpu_pool_free(pool_out); ++ xen_string_string_map_free(pool_other_config); ++ xen_vm_record_free(vm_record); ++ xen_string_set_free(proposed_cpus); ++ free(name_description); ++ free(name_label); ++ free(sched_policy); ++ free(pool_uuid); ++ xen_cpu_pool_record_free(cpu_pool_rec); ++ xen_host_free(res_host); ++ ++ return rc; ++} ++#endif ++ ++ + int main(int argc, char **argv) + { + if (argc != 4) +@@ -365,6 +1009,11 @@ int main(int argc, char **argv) + + xen_vm_record_free(vm_record); + ++#ifdef POOL_TESTS ++ if (pool_tests(session, host) != 0) ++ return 1; ++#endif ++ + xen_host_free(host); + xen_string_string_map_free(versions); + free(dmesg); diff --git a/cpu-pools-python.patch b/cpu-pools-python.patch new file mode 100644 index 0000000..3ad0eea --- /dev/null +++ b/cpu-pools-python.patch @@ -0,0 +1,2543 @@ +Index: xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/lowlevel/xc/xc.c ++++ xen-4.0.0-testing/tools/python/xen/lowlevel/xc/xc.c +@@ -97,17 +97,18 @@ static PyObject *pyxc_domain_create(XcOb + PyObject *args, + PyObject *kwds) + { +- uint32_t dom = 0, ssidref = 0, flags = 0, target = 0; ++ uint32_t dom = 0, ssidref = 0, flags = 0, target = 0, cpupool = 0; + int ret, i; + PyObject *pyhandle = NULL; + xen_domain_handle_t handle = { + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef }; + +- static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", NULL }; ++ static char *kwd_list[] = { "domid", "ssidref", "handle", "flags", "target", "cpupool", NULL }; + +- if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOii", kwd_list, +- &dom, &ssidref, &pyhandle, &flags, &target)) ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOiii", kwd_list, &dom, ++ &ssidref, &pyhandle, &flags, &target, ++ &cpupool)) + return NULL; + if ( pyhandle != NULL ) + { +@@ -124,8 +125,9 @@ static PyObject *pyxc_domain_create(XcOb + } + } + ++ flags |= XEN_DOMCTL_CDF_pool; + if ( (ret = xc_domain_create(self->xc_handle, ssidref, +- handle, flags, &dom)) < 0 ) ++ handle, flags, &dom, cpupool)) < 0 ) + return pyxc_error_to_exception(); + + if ( target ) +@@ -316,7 +318,7 @@ static PyObject *pyxc_domain_getinfo(XcO + { + info_dict = Py_BuildValue( + "{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" +- ",s:L,s:L,s:L,s:i,s:i}", ++ ",s:L,s:L,s:L,s:i,s:i,s:i}", + "domid", (int)info[i].domid, + "online_vcpus", info[i].nr_online_vcpus, + "max_vcpu_id", info[i].max_vcpu_id, +@@ -331,7 +333,8 @@ static PyObject *pyxc_domain_getinfo(XcO + "cpu_time", (long long)info[i].cpu_time, + "maxmem_kb", (long long)info[i].max_memkb, + "ssidref", (int)info[i].ssidref, +- "shutdown_reason", info[i].shutdown_reason); ++ "shutdown_reason", info[i].shutdown_reason, ++ "cpupool", (int)info[i].cpupool); + pyhandle = PyList_New(sizeof(xen_domain_handle_t)); + if ( (pyhandle == NULL) || (info_dict == NULL) ) + { +@@ -1697,6 +1700,179 @@ static PyObject *pyxc_dom_set_memshr(XcO + return zero; + } + ++static PyObject *cpumap_to_cpulist(uint64_t cpumap) ++{ ++ PyObject *cpulist = NULL; ++ uint32_t i; ++ ++ cpulist = PyList_New(0); ++ for ( i = 0; cpumap != 0; i++ ) ++ { ++ if ( cpumap & 1 ) ++ { ++ PyObject* pyint = PyInt_FromLong(i); ++ ++ PyList_Append(cpulist, pyint); ++ Py_DECREF(pyint); ++ } ++ cpumap >>= 1; ++ } ++ return cpulist; ++} ++ ++static PyObject *pyxc_cpupool_create(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ uint32_t cpupool = 0, sched = XEN_SCHEDULER_CREDIT; ++ ++ static char *kwd_list[] = { "pool", "sched", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, &cpupool, ++ &sched)) ++ return NULL; ++ ++ if ( xc_cpupool_create(self->xc_handle, &cpupool, sched) < 0 ) ++ return pyxc_error_to_exception(); ++ ++ return PyInt_FromLong(cpupool); ++} ++ ++static PyObject *pyxc_cpupool_destroy(XcObject *self, ++ PyObject *args) ++{ ++ uint32_t cpupool; ++ ++ if (!PyArg_ParseTuple(args, "i", &cpupool)) ++ return NULL; ++ ++ if (xc_cpupool_destroy(self->xc_handle, cpupool) != 0) ++ return pyxc_error_to_exception(); ++ ++ Py_INCREF(zero); ++ return zero; ++} ++ ++static PyObject *pyxc_cpupool_getinfo(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ PyObject *list, *info_dict; ++ ++ uint32_t first_pool = 0; ++ int max_pools = 1024, nr_pools, i; ++ xc_cpupoolinfo_t *info; ++ ++ static char *kwd_list[] = { "first_pool", "max_pools", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|ii", kwd_list, ++ &first_pool, &max_pools) ) ++ return NULL; ++ ++ info = calloc(max_pools, sizeof(xc_cpupoolinfo_t)); ++ if (info == NULL) ++ return PyErr_NoMemory(); ++ ++ nr_pools = xc_cpupool_getinfo(self->xc_handle, first_pool, max_pools, info); ++ ++ if (nr_pools < 0) ++ { ++ free(info); ++ return pyxc_error_to_exception(); ++ } ++ ++ list = PyList_New(nr_pools); ++ for ( i = 0 ; i < nr_pools; i++ ) ++ { ++ info_dict = Py_BuildValue( ++ "{s:i,s:i,s:i,s:N}", ++ "cpupool", (int)info[i].cpupool_id, ++ "sched", info[i].sched_id, ++ "n_dom", info[i].n_dom, ++ "cpulist", cpumap_to_cpulist(info[i].cpumap)); ++ if ( info_dict == NULL ) ++ { ++ Py_DECREF(list); ++ if ( info_dict != NULL ) { Py_DECREF(info_dict); } ++ free(info); ++ return NULL; ++ } ++ PyList_SetItem(list, i, info_dict); ++ } ++ ++ free(info); ++ ++ return list; ++} ++ ++static PyObject *pyxc_cpupool_addcpu(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ uint32_t cpupool; ++ int cpu = -1; ++ ++ static char *kwd_list[] = { "cpupool", "cpu", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, ++ &cpupool, &cpu) ) ++ return NULL; ++ ++ if (xc_cpupool_addcpu(self->xc_handle, cpupool, cpu) != 0) ++ return pyxc_error_to_exception(); ++ ++ Py_INCREF(zero); ++ return zero; ++} ++ ++static PyObject *pyxc_cpupool_removecpu(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ uint32_t cpupool; ++ int cpu = -1; ++ ++ static char *kwd_list[] = { "cpupool", "cpu", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, ++ &cpupool, &cpu) ) ++ return NULL; ++ ++ if (xc_cpupool_removecpu(self->xc_handle, cpupool, cpu) != 0) ++ return pyxc_error_to_exception(); ++ ++ Py_INCREF(zero); ++ return zero; ++} ++ ++static PyObject *pyxc_cpupool_movedomain(XcObject *self, ++ PyObject *args, ++ PyObject *kwds) ++{ ++ uint32_t cpupool, domid; ++ ++ static char *kwd_list[] = { "cpupool", "domid", NULL }; ++ ++ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, ++ &cpupool, &domid) ) ++ return NULL; ++ ++ if (xc_cpupool_movedomain(self->xc_handle, cpupool, domid) != 0) ++ return pyxc_error_to_exception(); ++ ++ Py_INCREF(zero); ++ return zero; ++} ++ ++static PyObject *pyxc_cpupool_freeinfo(XcObject *self) ++{ ++ uint64_t cpumap; ++ ++ if (xc_cpupool_freeinfo(self->xc_handle, &cpumap) != 0) ++ return pyxc_error_to_exception(); ++ ++ return cpumap_to_cpulist(cpumap); ++} + + static PyMethodDef pyxc_methods[] = { + { "handle", +@@ -1812,7 +1988,8 @@ static PyMethodDef pyxc_methods[] = { + " maxmem_kb [int]: Maximum memory limit, in kilobytes\n" + " cpu_time [long]: CPU time consumed, in nanoseconds\n" + " shutdown_reason [int]: Numeric code from guest OS, explaining " +- "reason why it shut itself down.\n" }, ++ "reason why it shut itself down.\n" ++ " cpupool [int] Id of cpupool domain is bound to.\n" }, + + { "vcpu_getinfo", + (PyCFunction)pyxc_vcpu_getinfo, +@@ -2210,6 +2387,66 @@ static PyMethodDef pyxc_methods[] = { + " enable [int,0|1]: Disable or enable?\n" + "Returns: [int] 0 on success; -1 on error.\n" }, + ++ { "cpupool_create", ++ (PyCFunction)pyxc_cpupool_create, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Create new cpupool.\n" ++ " pool [int, 0]: cpupool identifier to use (allocated if zero).\n" ++ " sched [int]: scheduler to use (credit if unspecified).\n\n" ++ "Returns: [int] new cpupool identifier; -1 on error.\n" }, ++ ++ { "cpupool_destroy", ++ (PyCFunction)pyxc_cpupool_destroy, ++ METH_VARARGS, "\n" ++ "Destroy a cpupool.\n" ++ " pool [int]: Identifier of cpupool to be destroyed.\n\n" ++ "Returns: [int] 0 on success; -1 on error.\n" }, ++ ++ { "cpupool_getinfo", ++ (PyCFunction)pyxc_cpupool_getinfo, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Get information regarding a set of cpupools, in increasing id order.\n" ++ " first_pool [int, 0]: First cpupool to retrieve info about.\n" ++ " max_pools [int, 1024]: Maximum number of cpupools to retrieve info" ++ " about.\n\n" ++ "Returns: [list of dicts] if list length is less than 'max_pools'\n" ++ " parameter then there was an error, or the end of the\n" ++ " cpupool-id space was reached.\n" ++ " pool [int]: Identifier of cpupool to which this info pertains\n" ++ " sched [int]: Scheduler used for this cpupool\n" ++ " n_dom [int]: Number of Domains in this cpupool\n" ++ " cpulist [list]: List of CPUs this cpupool is using\n" }, ++ ++ { "cpupool_addcpu", ++ (PyCFunction)pyxc_cpupool_addcpu, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Add a cpu to a cpupool.\n" ++ " pool [int]: Identifier of cpupool.\n" ++ " cpu [int, -1]: Cpu to add (lowest free if -1)\n\n" ++ "Returns: [int] 0 on success; -1 on error.\n" }, ++ ++ { "cpupool_removecpu", ++ (PyCFunction)pyxc_cpupool_removecpu, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Remove a cpu from a cpupool.\n" ++ " pool [int]: Identifier of cpupool.\n" ++ " cpu [int, -1]: Cpu to remove (highest used if -1)\n\n" ++ "Returns: [int] 0 on success; -1 on error.\n" }, ++ ++ { "cpupool_movedomain", ++ (PyCFunction)pyxc_cpupool_movedomain, ++ METH_VARARGS | METH_KEYWORDS, "\n" ++ "Move a domain to another cpupool.\n" ++ " pool [int]: Identifier of cpupool to move domain to.\n" ++ " dom [int]: Domain to move\n\n" ++ "Returns: [int] 0 on success; -1 on error.\n" }, ++ ++ { "cpupool_freeinfo", ++ (PyCFunction)pyxc_cpupool_freeinfo, ++ METH_NOARGS, "\n" ++ "Get info about cpus not in any cpupool.\n" ++ "Returns: [list]: List of CPUs\n" }, ++ + { NULL, NULL, 0, NULL } + }; + +Index: xen-4.0.0-testing/tools/python/xen/util/sxputils.py +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/python/xen/util/sxputils.py +@@ -0,0 +1,64 @@ ++#============================================================================ ++# This library is free software; you can redistribute it and/or ++# modify it under the terms of version 2.1 of the GNU Lesser General Public ++# License as published by the Free Software Foundation. ++# ++# This library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with this library; if not, write to the Free Software ++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++#============================================================================ ++# Copyright (c) 2009 Fujitsu Technology Solutions ++#============================================================================ ++ ++""" convert sxp to map / map to sxp. ++""" ++ ++import types ++from xen.xend import sxp ++ ++def map2sxp(map_val): ++ """ conversion of all key-value pairs of a map (recursively) to sxp. ++ @param map_val: map; if a value contains a list or dict it is also ++ converted to sxp ++ @type map_val: dict ++ @return sxp expr ++ @rtype: list ++ """ ++ sxp_vals = [] ++ for (k, v) in map_val.items(): ++ if isinstance(v, types.DictionaryType): ++ sxp_vals += [[k] + map2sxp(v)] ++ elif isinstance(v, types.ListType): ++ sxp_vals += [[k] + v] ++ else: ++ sxp_vals += [[k, v]] ++ return sxp_vals ++ ++def sxp2map( s ): ++ """ conversion of sxp to map. ++ @param s: sxp expr ++ @type s: list ++ @return: map ++ @rtype: dict ++ """ ++ sxphash = {} ++ ++ for child in sxp.children( s ): ++ if isinstance( child, types.ListType ) and len( child ) > 1: ++ if isinstance( child[1], types.ListType ) and len( child[1] ) > 1: ++ sxphash[ child[0] ] = sxp2map( child ) ++ else: ++ childs = sxp.children(child) ++ if len(childs) > 1: ++ sxphash[ child[0] ] = childs ++ else: ++ sxphash[ child[0] ] = childs[0] ++ ++ return sxphash ++ ++ +Index: xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendAPI.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendAPI.py +@@ -51,6 +51,7 @@ from XendDPCI import XendDPCI + from XendPSCSI import XendPSCSI, XendPSCSI_HBA + from XendDSCSI import XendDSCSI, XendDSCSI_HBA + from XendXSPolicy import XendXSPolicy, XendACMPolicy ++from xen.xend.XendCPUPool import XendCPUPool + + from XendAPIConstants import * + from xen.util.xmlrpclib2 import stringify +@@ -498,6 +499,7 @@ classes = { + 'PSCSI_HBA' : valid_object("PSCSI_HBA"), + 'DSCSI' : valid_object("DSCSI"), + 'DSCSI_HBA' : valid_object("DSCSI_HBA"), ++ 'cpu_pool' : valid_object("cpu_pool"), + } + + autoplug_classes = { +@@ -514,6 +516,7 @@ autoplug_classes = { + 'DSCSI_HBA' : XendDSCSI_HBA, + 'XSPolicy' : XendXSPolicy, + 'ACMPolicy' : XendACMPolicy, ++ 'cpu_pool' : XendCPUPool, + } + + class XendAPI(object): +@@ -914,7 +917,8 @@ class XendAPI(object): + 'API_version_minor', + 'API_version_vendor', + 'API_version_vendor_implementation', +- 'enabled'] ++ 'enabled', ++ 'resident_cpu_pools'] + + host_attr_rw = ['name_label', + 'name_description', +@@ -1014,6 +1018,8 @@ class XendAPI(object): + return xen_api_todo() + def host_get_logging(self, _, host_ref): + return xen_api_todo() ++ def host_get_resident_cpu_pools(self, _, host_ref): ++ return xen_api_success(XendCPUPool.get_all()) + + # object methods + def host_disable(self, session, host_ref): +@@ -1076,7 +1082,9 @@ class XendAPI(object): + 'PBDs': XendPBD.get_all(), + 'PPCIs': XendPPCI.get_all(), + 'PSCSIs': XendPSCSI.get_all(), +- 'PSCSI_HBAs': XendPSCSI_HBA.get_all()} ++ 'PSCSI_HBAs': XendPSCSI_HBA.get_all(), ++ 'resident_cpu_pools': XendCPUPool.get_all(), ++ } + return xen_api_success(record) + + def host_tmem_thaw(self, _, host_ref, cli_id): +@@ -1185,7 +1193,10 @@ class XendAPI(object): + 'stepping', + 'flags', + 'utilisation', +- 'features'] ++ 'features', ++ 'cpu_pool'] ++ ++ host_cpu_funcs = [('get_unassigned_cpus', 'Set(host_cpu)')] + + # attributes + def _host_cpu_get(self, ref, field): +@@ -1210,21 +1221,28 @@ class XendAPI(object): + return self._host_cpu_get(ref, 'flags') + def host_cpu_get_utilisation(self, _, ref): + return xen_api_success(XendNode.instance().get_host_cpu_load(ref)) ++ def host_cpu_get_cpu_pool(self, _, ref): ++ return xen_api_success(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) + + # object methods + def host_cpu_get_record(self, _, ref): + node = XendNode.instance() + record = dict([(f, node.get_host_cpu_field(ref, f)) + for f in self.host_cpu_attr_ro +- if f not in ['uuid', 'host', 'utilisation']]) ++ if f not in ['uuid', 'host', 'utilisation', 'cpu_pool']]) + record['uuid'] = ref + record['host'] = node.uuid + record['utilisation'] = node.get_host_cpu_load(ref) ++ record['cpu_pool'] = XendCPUPool.get_cpu_pool_by_cpu_ref(ref) + return xen_api_success(record) + + # class methods + def host_cpu_get_all(self, session): + return xen_api_success(XendNode.instance().get_host_cpu_refs()) ++ def host_cpu_get_unassigned_cpus(self, session): ++ return xen_api_success( ++ [ref for ref in XendNode.instance().get_host_cpu_refs() ++ if len(XendCPUPool.get_cpu_pool_by_cpu_ref(ref)) == 0]) + + + # Xen API: Class host_metrics +@@ -1284,6 +1302,7 @@ class XendAPI(object): + 'is_control_domain', + 'metrics', + 'crash_dumps', ++ 'cpu_pool', + ] + + VM_attr_rw = ['name_label', +@@ -1312,7 +1331,9 @@ class XendAPI(object): + 'platform', + 'PCI_bus', + 'other_config', +- 'security_label'] ++ 'security_label', ++ 'pool_name', ++ ] + + VM_methods = [('clone', 'VM'), + ('start', None), +@@ -1340,7 +1361,9 @@ class XendAPI(object): + ('set_memory_dynamic_min_live', None), + ('send_trigger', None), + ('migrate', None), +- ('destroy', None)] ++ ('destroy', None), ++ ('cpu_pool_migrate', None), ++ ] + + VM_funcs = [('create', 'VM'), + ('restore', None), +@@ -1540,6 +1563,17 @@ class XendAPI(object): + return xen_api_success( + xd.get_vm_by_uuid(vm_ref) == xd.privilegedDomain()) + ++ def VM_get_cpu_pool(self, session, vm_ref): ++ dom = XendDomain.instance().get_vm_by_uuid(vm_ref) ++ pool_ref = XendCPUPool.query_pool_ref(dom.get_cpu_pool()) ++ return xen_api_success(pool_ref) ++ ++ def VM_get_pool_name(self, session, vm_ref): ++ return self.VM_get('pool_name', session, vm_ref) ++ ++ def VM_set_pool_name(self, session, vm_ref, value): ++ return self.VM_set('pool_name', session, vm_ref, value) ++ + def VM_set_name_label(self, session, vm_ref, label): + dom = XendDomain.instance().get_vm_by_uuid(vm_ref) + dom.setName(label) +@@ -1618,7 +1652,8 @@ class XendAPI(object): + if key.startswith("cpumap"): + vcpu = int(key[6:]) + try: +- xendom.domain_pincpu(xeninfo.getDomid(), vcpu, value) ++ cpus = map(int, value.split(",")) ++ xendom.domain_pincpu(xeninfo.getDomid(), vcpu, cpus) + except Exception, ex: + log.exception(ex) + +@@ -1835,7 +1870,9 @@ class XendAPI(object): + 'is_control_domain': xeninfo.info['is_control_domain'], + 'metrics': xeninfo.get_metrics(), + 'security_label': xeninfo.get_security_label(), +- 'crash_dumps': [] ++ 'crash_dumps': [], ++ 'pool_name': xeninfo.info.get('pool_name'), ++ 'cpu_pool' : XendCPUPool.query_pool_ref(xeninfo.get_cpu_pool()), + } + return xen_api_success(record) + +@@ -1933,6 +1970,25 @@ class XendAPI(object): + xendom.domain_restore(src, bool(paused)) + return xen_api_success_void() + ++ def VM_cpu_pool_migrate(self, session, vm_ref, cpu_pool_ref): ++ xendom = XendDomain.instance() ++ xeninfo = xendom.get_vm_by_uuid(vm_ref) ++ domid = xeninfo.getDomid() ++ pool = XendAPIStore.get(cpu_pool_ref, XendCPUPool.getClass()) ++ if pool == None: ++ return xen_api_error(['HANDLE_INVALID', 'cpu_pool', cpu_pool_ref]) ++ if domid is not None: ++ if domid == 0: ++ return xen_api_error(['OPERATION_NOT_ALLOWED', ++ 'could not move Domain-0']) ++ try: ++ XendCPUPool.move_domain(cpu_pool_ref, domid) ++ except Exception, ex: ++ return xen_api_error(['INTERNAL_ERROR', ++ 'could not move domain']) ++ self.VM_set('pool_name', session, vm_ref, pool.get_name_label()) ++ return xen_api_success_void() ++ + + # Xen API: Class VBD + # ---------------------------------------------------------------- +Index: xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py +=================================================================== +--- /dev/null ++++ xen-4.0.0-testing/tools/python/xen/xend/XendCPUPool.py +@@ -0,0 +1,896 @@ ++#============================================================================ ++# This library is free software; you can redistribute it and/or ++# modify it under the terms of version 2.1 of the GNU Lesser General Public ++# License as published by the Free Software Foundation. ++# ++# This library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with this library; if not, write to the Free Software ++# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++#============================================================================ ++# Copyright (c) 2009 Fujitsu Technology Solutions. ++#============================================================================ ++ ++""" CPU Pool support including XEN-API and Legacy API. ++""" ++ ++import types ++import threading ++import re ++import xen.lowlevel.xc ++import XendNode ++import XendDomain ++from xen.xend.XendLogging import log ++from xen.xend.XendBase import XendBase ++from xen.xend import XendAPIStore ++from xen.xend.XendConstants import XS_POOLROOT ++from xen.xend import uuid as genuuid ++from xen.xend.XendError import VmError, XendAPIError, PoolError ++from xen.xend.xenstore.xstransact import xstransact ++from xen.util.sxputils import sxp2map, map2sxp ++ ++ ++XEND_ERROR_INTERNAL = 'INTERNAL_ERROR' ++XEND_ERROR_UNKOWN_SCHED_POLICY = 'UNKOWN_SCHED_POLICY' ++XEND_ERROR_BAD_POOL_STATE = 'POOL_BAD_STATE' ++XEND_ERROR_POOL_PARAM = 'PARAMETER_ERROR' ++XEND_ERROR_INSUFFICIENT_CPUS = 'INSUFFICIENT_CPUS' ++XEND_ERROR_POOL_RECONF = 'POOL_RECONF' ++XEND_ERROR_INVALID_CPU = 'INVAILD_CPU' ++XEND_ERROR_LAST_CPU_NOT_REM = 'LAST_CPU_NOT_REMOVEABLE' ++ ++ ++XEN_SCHEDULER_TO_ID = { ++ 'credit' : xen.lowlevel.xc.XEN_SCHEDULER_CREDIT, ++ 'sedf' : xen.lowlevel.xc.XEN_SCHEDULER_SEDF, ++ } ++ ++xc = xen.lowlevel.xc.xc() ++ ++class XendCPUPool(XendBase): ++ """ CPU Pool management. ++ @ivar pool_lock: Lock to secure modification of pool data ++ @type pool_lock: Rlock ++ """ ++ ++ pool_lock = threading.RLock() ++ ++ def getClass(cls): ++ return "cpu_pool" ++ ++ def getAttrRO(cls): ++ attrRO = ['resident_on', ++ 'started_VMs', ++ 'host_CPUs', ++ 'activated', ++ ] ++ return XendBase.getAttrRO() + attrRO ++ ++ def getAttrRW(cls): ++ attrRW = ['name_label', ++ 'name_description', ++ 'auto_power_on', ++ 'ncpu', ++ 'sched_policy', ++ 'proposed_CPUs', ++ 'other_config', ++ ] ++ return XendBase.getAttrRW() + attrRW ++ ++ def getMethods(cls): ++ methods = ['destroy', ++ 'activate', ++ 'deactivate', ++ 'add_host_CPU_live', ++ 'remove_host_CPU_live', ++ 'add_to_proposed_CPUs', ++ 'remove_from_proposed_CPUs', ++ 'add_to_other_config', ++ 'remove_from_other_config', ++ ] ++ return XendBase.getMethods() + methods ++ ++ def getFuncs(cls): ++ funcs = ['create', ++ 'get_by_name_label', ++ ] ++ return XendBase.getFuncs() + funcs ++ ++ getClass = classmethod(getClass) ++ getAttrRO = classmethod(getAttrRO) ++ getAttrRW = classmethod(getAttrRW) ++ getMethods = classmethod(getMethods) ++ getFuncs = classmethod(getFuncs) ++ ++ ++ # ++ # XenAPI function calls ++ # ++ ++ def create(cls, record): ++ """ Create a new managed pool instance. ++ @param record: attributes of pool ++ @type record: dict ++ @return: uuid of created pool ++ @rtype: str ++ """ ++ new_uuid = genuuid.createString() ++ XendCPUPool(record, new_uuid) ++ XendNode.instance().save_cpu_pools() ++ return new_uuid ++ ++ create = classmethod(create) ++ ++ ++ def get_by_name_label(cls, name_label): ++ """ Query a Pool(ref) by its name. ++ @return: ref of pool ++ @rtype: str ++ """ ++ cls.pool_lock.acquire() ++ try: ++ return [ inst.get_uuid() ++ for inst in XendAPIStore.get_all(cls.getClass()) ++ if inst.name_label == name_label ++ ] ++ finally: ++ cls.pool_lock.release() ++ ++ get_by_name_label = classmethod(get_by_name_label) ++ ++ ++ def get_cpu_pool_by_cpu_ref(cls, host_cpu): ++ """ Query cpu_pool ref the given cpu belongs to. ++ @param host_cpu: ref of host_cpu to lookup ++ @type host_cpu: str ++ @return: list cpu_pool refs (list contains not more than one element) ++ @rtype: list of str ++ """ ++ node = XendNode.instance() ++ cpu_nr = node.get_host_cpu_field(host_cpu, 'number') ++ for pool_rec in xc.cpupool_getinfo(): ++ if cpu_nr in pool_rec['cpulist']: ++ # pool found; return the ref ++ return cls.query_pool_ref(pool_rec['cpupool']) ++ return [] ++ ++ get_cpu_pool_by_cpu_ref = classmethod(get_cpu_pool_by_cpu_ref) ++ ++ ++ def get_all_managed(cls): ++ """ Query all managed pools. ++ @return: uuids of all managed pools ++ @rtype: list of str ++ """ ++ cls.pool_lock.acquire() ++ try: ++ managed_pools = [ inst.get_uuid() ++ for inst in XendAPIStore.get_all(cls.getClass()) ++ if inst.is_managed() ] ++ finally: ++ cls.pool_lock.release() ++ return managed_pools ++ ++ get_all_managed = classmethod(get_all_managed) ++ ++ ++ # ++ # XenAPI methods calls ++ # ++ ++ def __init__(self, record, new_uuid, managed_pool=True): ++ XendBase.__init__(self, new_uuid, record) ++ try: ++ self._managed = managed_pool ++ self.name_label = None ++ ++ name = record.get('name_label', 'Pool-Unnamed') ++ self._checkName(name) ++ self.name_label = name ++ self.name_description = record.get('name_description', ++ self.name_label) ++ self.proposed_cpus = [ int(cpu) ++ for cpu in record.get('proposed_CPUs', []) ] ++ self.auto_power_on = bool(record.get('auto_power_on', False)) ++ self.ncpu = int(record.get('ncpu', 1)) ++ self.sched_policy = record.get('sched_policy', '') ++ self.other_config = record.get('other_config', {}) ++ except Exception, ex: ++ XendBase.destroy(self) ++ raise ex ++ ++ ++ def get_resident_on(self): ++ """ Always return uuid of own node. ++ @return: uuid of this node ++ @rytpe: str ++ """ ++ return XendNode.instance().uuid ++ ++ def get_started_VMs(self): ++ """ Query all VMs currently assigned to pool. ++ @return: ref of all VMs assigned to pool; if pool is not active, ++ an empty list will be returned ++ @rtype: list of str ++ """ ++ if self.get_activated(): ++ # search VMs related to this pool ++ pool_id = self.query_pool_id() ++ started_VMs = [ vm.get_uuid() ++ for vm in XendDomain.instance().list('all') ++ if vm.get_cpu_pool() == pool_id ] ++ else: ++ # pool not active, so it couldn't have any started VMs ++ started_VMs = [] ++ ++ return started_VMs ++ ++ def get_host_CPUs(self): ++ """ Query all cpu refs of this pool currently asisgned . ++ - Read pool id of this pool from xenstore ++ - Read cpu configuration from hypervisor ++ - lookup cpu number -> cpu ref ++ @return: host_cpu refs ++ @rtype: list of str ++ """ ++ if self.get_activated(): ++ node = XendNode.instance() ++ pool_id = self.query_pool_id() ++ if pool_id == None: ++ raise PoolError(XEND_ERROR_INTERNAL, ++ [self.getClass(), 'get_host_CPUs']) ++ cpus = [] ++ for pool_rec in xc.cpupool_getinfo(): ++ if pool_rec['cpupool'] == pool_id: ++ cpus = pool_rec['cpulist'] ++ ++ # query host_cpu ref for any cpu of the pool ++ host_CPUs = [ cpu_ref ++ for cpu_ref in node.get_host_cpu_refs() ++ if node.get_host_cpu_field(cpu_ref, 'number') ++ in cpus ] ++ else: ++ # pool not active, so it couldn't have any assigned cpus ++ host_CPUs = [] ++ ++ return host_CPUs ++ ++ def get_activated(self): ++ """ Query if the pool is registered in XendStore. ++ If pool uuid is not in XenStore, the pool is not activated. ++ @return: True, if activated ++ @rtype: bool ++ """ ++ return self.query_pool_id() != None ++ ++ def get_name_label(self): ++ return self.name_label ++ ++ def get_name_description(self): ++ return self.name_description ++ ++ def get_auto_power_on(self): ++ return self.auto_power_on ++ ++ def get_ncpu(self): ++ return self.ncpu ++ ++ def get_sched_policy(self): ++ if len(self.sched_policy) == 0: ++ # default scheduler selected ++ return XendNode.instance().get_vcpus_policy() ++ else: ++ return self.sched_policy ++ ++ def get_proposed_CPUs(self): ++ return [ str(cpu) for cpu in self.proposed_cpus ] ++ ++ def get_other_config(self): ++ return self.other_config ++ ++ def set_name_label(self, name_label): ++ self._checkName(name_label) ++ self.name_label = name_label ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_name_description(self, name_descr): ++ self.name_description = name_descr ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_auto_power_on(self, auto_power_on): ++ self.auto_power_on = bool(int(auto_power_on)) ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_ncpu(self, ncpu): ++ _ncpu = int(ncpu) ++ if _ncpu < 1: ++ raise PoolError(XEND_ERROR_POOL_PARAM, 'ncpu') ++ self.ncpu = _ncpu ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_sched_policy(self, sched_policy): ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ self.sched_policy = sched_policy ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_proposed_CPUs(self, proposed_cpus): ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ self.proposed_cpus = [ int(cpu) for cpu in proposed_cpus ] ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def set_other_config(self, other_config): ++ self.other_config = other_config ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def destroy(self): ++ """ In order to destroy a cpu pool, it must be deactivated """ ++ self.pool_lock.acquire() ++ try: ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ XendBase.destroy(self) ++ finally: ++ self.pool_lock.release() ++ XendNode.instance().save_cpu_pools() ++ ++ def activate(self): ++ """ Create pool in hypervisor and add cpus. ++ Preconditions: ++ - pool not already active ++ - enough unbound cpus available ++ Actions: ++ - create pool in hypervisor ++ - select free cpus (preferred from proposed_CPUs list) and bind it to ++ the pool ++ - create entries in Xenstore ++ """ ++ self.pool_lock.acquire() ++ try: ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ if self.sched_policy != XendNode.instance().get_vcpus_policy(): ++ raise PoolError(XEND_ERROR_UNKOWN_SCHED_POLICY) ++ unbound_cpus = set(self.unbound_cpus()) ++ if len(unbound_cpus) < self.ncpu: ++ raise PoolError(XEND_ERROR_INSUFFICIENT_CPUS, ++ [str(self.ncpu), str(len(unbound_cpus))]) ++ ++ # build list of cpu numbers to bind to pool ++ cpu_set = set(self.proposed_cpus).intersection(unbound_cpus) ++ if len(cpu_set) < self.ncpu: ++ pool_cpus = (list(cpu_set) + ++ list(unbound_cpus.difference(cpu_set))) ++ else: ++ pool_cpus = list(cpu_set) ++ pool_cpus = pool_cpus[0:self.ncpu] ++ ++ # create pool in hypervisor ++ pool_id = xc.cpupool_create( ++ sched = XEN_SCHEDULER_TO_ID.get(self.sched_policy, 0)) ++ ++ self.update_XS(pool_id) ++ # add cpus ++ for cpu in pool_cpus: ++ xc.cpupool_addcpu(pool_id, cpu) ++ ++ finally: ++ self.pool_lock.release() ++ ++ def deactivate(self): ++ """ Delete pool in hypervisor ++ Preconditions: ++ - pool is activated ++ - no running VMs in pool ++ Actions: ++ - call hypervisor for deletion ++ - remove path of pool in xenstore ++ """ ++ self.pool_lock.acquire() ++ try: ++ if not self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') ++ if len(self.get_started_VMs()) != 0: ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'in use') ++ ++ pool_id = self.query_pool_id() ++ # remove cpus from pool ++ cpus = [] ++ for pool_rec in xc.cpupool_getinfo(): ++ if pool_rec['cpupool'] == pool_id: ++ cpus = pool_rec['cpulist'] ++ for cpu_number in cpus: ++ xc.cpupool_removecpu(pool_id, cpu_number) ++ xc.cpupool_destroy(pool_id) ++ ++ # update XenStore ++ xs_path = XS_POOLROOT + "%s/" % pool_id ++ xstransact.Remove(xs_path) ++ finally: ++ self.pool_lock.release() ++ ++ def add_host_CPU_live(self, cpu_ref): ++ """ Add cpu to pool, if it is currently not assigned to a pool. ++ @param cpu_ref: reference of host_cpu instance to add ++ @type cpu_ref: str ++ """ ++ if not self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') ++ node = XendNode.instance() ++ number = node.get_host_cpu_field(cpu_ref, 'number') ++ ++ self.pool_lock.acquire() ++ try: ++ pool_id = self.query_pool_id() ++ other_pool_ref = self.get_cpu_pool_by_cpu_ref(cpu_ref) ++ if len(other_pool_ref) != 0: ++ raise PoolError(XEND_ERROR_INVALID_CPU, ++ 'cpu already assigned to pool "%s"' % other_pool_ref[0]) ++ xc.cpupool_addcpu(pool_id, number) ++ finally: ++ self.pool_lock.release() ++ ++ if number not in self.proposed_cpus: ++ self.proposed_cpus.append(number) ++ self._update_ncpu(pool_id) ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def remove_host_CPU_live(self, cpu_ref): ++ """ Remove cpu from pool. ++ After successfull call, the cpu is free. ++ Remove of the last cpu of the pool is rejected. ++ @param cpu_ref: reference of host_cpu instance to remove ++ @type cpu_ref: str ++ """ ++ if not self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'deactivated') ++ node = XendNode.instance() ++ number = node.get_host_cpu_field(cpu_ref, 'number') ++ ++ self.pool_lock.acquire() ++ try: ++ pool_id = self.query_pool_id() ++ pool_rec = {} ++ for pool in xc.cpupool_getinfo(): ++ if pool['cpupool'] == pool_id: ++ pool_rec = pool ++ break ++ ++ if number in pool_rec['cpulist']: ++ if len(pool_rec['cpulist']) < 2 and pool_rec['n_dom'] > 0: ++ raise PoolError(XEND_ERROR_LAST_CPU_NOT_REM, ++ 'could not remove last cpu') ++ xc.cpupool_removecpu(pool_id, number) ++ else: ++ raise PoolError(XEND_ERROR_INVALID_CPU, ++ 'CPU not assigned to pool') ++ finally: ++ self.pool_lock.release() ++ ++ if number in self.proposed_cpus: ++ self.proposed_cpus.remove(number) ++ self._update_ncpu(pool_id) ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def add_to_proposed_CPUs(self, cpu): ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ ++ _cpu = int(cpu) ++ if _cpu not in self.proposed_cpus: ++ self.proposed_cpus.append(_cpu) ++ self.proposed_cpus.sort() ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def remove_from_proposed_CPUs(self, cpu): ++ if self.get_activated(): ++ raise PoolError(XEND_ERROR_BAD_POOL_STATE, 'activated') ++ _cpu = int(cpu) ++ if _cpu in self.proposed_cpus: ++ self.proposed_cpus.remove(_cpu) ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def add_to_other_config(self, key, value): ++ self.other_config[key] = value ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ def remove_from_other_config(self, key): ++ if key in self.other_config: ++ del self.other_config[key] ++ if self._managed: ++ XendNode.instance().save_cpu_pools() ++ ++ ++ # ++ # Legacy RPC calls ++ # ++ def pool_new(cls, config): ++ try: ++ record = sxp2map(config) ++ if record.has_key('proposed_CPUs') and \ ++ not isinstance(record['proposed_CPUs'], types.ListType): ++ record['proposed_CPUs'] = [record['proposed_CPUs']] ++ new_uuid = cls.create(record) ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ return new_uuid ++ ++ def pool_create(cls, config): ++ try: ++ record = sxp2map(config) ++ if record.has_key('proposed_CPUs') and \ ++ not isinstance(record['proposed_CPUs'], types.ListType): ++ record['proposed_CPUs'] = [record['proposed_CPUs']] ++ new_uuid = genuuid.createString() ++ pool = XendCPUPool(record, new_uuid, False) ++ pool.activate() ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_start(cls, poolname): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ pool.activate() ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_list(cls, names): ++ sxprs = [] ++ try: ++ node = XendNode.instance() ++ xd = XendDomain.instance() ++ pools = cls.get_all_records() ++ for (pool_uuid, pool_vals) in pools.items(): ++ if pool_vals['name_label'] in names or len(names) == 0: ++ # conv host_cpu refs to cpu number ++ cpus = [ node.get_host_cpu_field(cpu_ref, 'number') ++ for cpu_ref in pool_vals['host_CPUs'] ] ++ cpus.sort() ++ pool_vals['host_CPU_numbers'] = cpus ++ vm_names = [ xd.get_vm_by_uuid(uuid).getName() ++ for uuid in pool_vals['started_VMs'] ] ++ pool_vals['started_VM_names'] = vm_names ++ pool_vals['auto_power_on'] = int(pool_vals['auto_power_on']) ++ sxprs += [[pool_uuid] + map2sxp(pool_vals)] ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ return sxprs ++ ++ def pool_destroy(cls, poolname): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ pool.deactivate() ++ if not pool.is_managed(): ++ pool.destroy() ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_delete(cls, poolname): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ pool.destroy() ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_cpu_add(cls, poolname, cpu): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ cpu_ref = cls._cpu_number_to_ref(int(cpu)) ++ if cpu_ref: ++ pool.add_host_CPU_live(cpu_ref) ++ else: ++ raise PoolError(XEND_ERROR_INVALID_CPU, ++ 'CPU unkown') ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_cpu_remove(cls, poolname, cpu): ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ try: ++ cpu_ref = cls._cpu_number_to_ref(int(cpu)) ++ if cpu_ref: ++ pool.remove_host_CPU_live(cpu_ref) ++ else: ++ raise PoolError(XEND_ERROR_INVALID_CPU, ++ 'CPU unkown') ++ except XendAPIError, ex: ++ raise VmError(ex.get_api_error()) ++ ++ def pool_migrate(cls, domname, poolname): ++ dom = XendDomain.instance() ++ pool = cls.lookup_pool(poolname) ++ if not pool: ++ raise VmError('unkown pool %s' % poolname) ++ dominfo = dom.domain_lookup_nr(domname) ++ if not dominfo: ++ raise VmError('unkown domain %s' % domname) ++ domid = dominfo.getDomid() ++ if domid is not None: ++ if domid == 0: ++ raise VmError('could not move Domain-0') ++ try: ++ cls.move_domain(pool.get_uuid(), domid) ++ except Exception, ex: ++ raise VmError('could not move domain') ++ dominfo.info['pool_name'] = poolname ++ dom.managed_config_save(dominfo) ++ ++ pool_new = classmethod(pool_new) ++ pool_create = classmethod(pool_create) ++ pool_start = classmethod(pool_start) ++ pool_list = classmethod(pool_list) ++ pool_destroy = classmethod(pool_destroy) ++ pool_delete = classmethod(pool_delete) ++ pool_cpu_add = classmethod(pool_cpu_add) ++ pool_cpu_remove = classmethod(pool_cpu_remove) ++ pool_migrate = classmethod(pool_migrate) ++ ++ ++ # ++ # methods ++ # ++ ++ def is_managed(self): ++ """ Check, if pool is managed. ++ @return: True, if managed ++ @rtype: bool ++ """ ++ return self._managed ++ ++ def query_pool_id(self): ++ """ Get corresponding pool-id of pool instance from XenStore. ++ @return: pool id or None ++ @rytpe: int ++ """ ++ self.pool_lock.acquire() ++ try: ++ for pool_id in xstransact.List(XS_POOLROOT): ++ uuid = xstransact.Read(XS_POOLROOT + "%s/" % pool_id, 'uuid') ++ if uuid == self.get_uuid(): ++ return int(pool_id) ++ finally: ++ self.pool_lock.release() ++ ++ return None ++ ++ def update_XS(self, pool_id): ++ """ Write (or update) data in xenstore taken from instance. ++ @param pool_id: Pool id to build path to pool data in xenstore ++ @type pool_id: int ++ """ ++ self.pool_lock.acquire() ++ try: ++ xs_path = XS_POOLROOT + "%s/" % pool_id ++ xs_entries = { 'uuid' : self.get_uuid(), ++ 'name' : self.name_label, ++ 'description' : self.name_description ++ } ++ xstransact.Mkdir(xs_path) ++ xstransact.Mkdir(xs_path, 'other_config') ++ xstransact.Write(xs_path, xs_entries) ++ xstransact.Write('%s%s' % (xs_path, 'other_config'), ++ self.other_config) ++ finally: ++ self.pool_lock.release() ++ ++ def _update_ncpu(self, pool_id): ++ for pool_rec in xc.cpupool_getinfo(): ++ if pool_rec['cpupool'] == pool_id: ++ self.ncpu = len(pool_rec['cpulist']) ++ ++ def _checkName(self, name): ++ """ Check if a pool name is valid. Valid names contain alphabetic ++ characters, digits, or characters in '_-.:/+'. ++ The same name cannot be used for more than one pool at the same ++ time. ++ @param name: name ++ @type name: str ++ @raise: PoolError if invalid ++ """ ++ if name is None or name == '': ++ raise PoolError(XEND_ERROR_POOL_PARAM, 'Missing Pool Name') ++ if not re.search(r'^[A-Za-z0-9_\-\.\:\/\+]+$', name): ++ raise PoolError(XEND_ERROR_POOL_PARAM, 'Invalid Pool Name') ++ ++ pool = self.lookup_pool(name) ++ if pool and pool.get_uuid() != self.get_uuid(): ++ raise PoolError(XEND_ERROR_POOL_PARAM, ++ 'Pool name "%s" already exists' % name) ++ ++ ++ # ++ # class methods ++ # ++ ++ def recreate_active_pools(cls): ++ """ Read active pool config from hypervisor and create pool instances. ++ - Query pool ids and assigned CPUs from hypervisor. ++ - Query additional information for any pool from xenstore. ++ If an entry for a pool id is missing in xenstore, it will be ++ recreated with a new uuid and generic name (this is an error case) ++ - Create an XendCPUPool instance for any pool id ++ Function have to be called after recreation of managed pools. ++ """ ++ log.debug('recreate_active_pools') ++ ++ for pool_rec in xc.cpupool_getinfo(): ++ pool = pool_rec['cpupool'] ++ ++ # read pool data from xenstore ++ path = XS_POOLROOT + "%s/" % pool ++ uuid = xstransact.Read(path, 'uuid') ++ if not uuid: ++ # xenstore entry missing / invaild; create entry with new uuid ++ uuid = genuuid.createString() ++ name = "Pool-%s" % pool ++ try: ++ inst = XendCPUPool( { 'name_label' : name }, uuid, False ) ++ inst.update_XS(pool) ++ except PoolError, ex: ++ # log error and skip domain ++ log.error('cannot recreate pool %s; skipping (reason: %s)' \ ++ % (name, ex)) ++ else: ++ (name, descr) = xstransact.Read(path, 'name', 'description') ++ other_config = {} ++ for key in xstransact.List(path + 'other_config'): ++ other_config[key] = xstransact.Read( ++ path + 'other_config/%s' % key) ++ ++ # check existance of pool instance ++ inst = XendAPIStore.get(uuid, cls.getClass()) ++ if inst: ++ # update attributes of existing instance ++ inst.name_label = name ++ inst.name_description = descr ++ inst.other_config = other_config ++ else: ++ # recreate instance ++ try: ++ inst = XendCPUPool( ++ { 'name_label' : name, ++ 'name_description' : descr, ++ 'other_config' : other_config, ++ 'proposed_CPUs' : pool_rec['cpulist'], ++ 'ncpu' : len(pool_rec['cpulist']), ++ }, ++ uuid, False ) ++ except PoolError, ex: ++ # log error and skip domain ++ log.error( ++ 'cannot recreate pool %s; skipping (reason: %s)' \ ++ % (name, ex)) ++ ++ recreate_active_pools = classmethod(recreate_active_pools) ++ ++ ++ def recreate(cls, record, current_uuid): ++ """ Recreate a pool instance while xend restart. ++ @param record: attributes of pool ++ @type record: dict ++ @param current_uuid: uuid of pool to create ++ @type current_uuid: str ++ """ ++ XendCPUPool(record, current_uuid) ++ ++ recreate = classmethod(recreate) ++ ++ ++ def autostart_pools(cls): ++ """ Start managed pools that are marked as autostart pools. ++ Function is called after recreation of managed domains while ++ xend restart. ++ """ ++ cls.pool_lock.acquire() ++ try: ++ for inst in XendAPIStore.get_all(cls.getClass()): ++ if inst.is_managed() and inst.auto_power_on and \ ++ inst.query_pool_id() == None: ++ inst.activate() ++ finally: ++ cls.pool_lock.release() ++ ++ autostart_pools = classmethod(autostart_pools) ++ ++ ++ def move_domain(cls, pool_ref, domid): ++ cls.pool_lock.acquire() ++ try: ++ pool = XendAPIStore.get(pool_ref, cls.getClass()) ++ pool_id = pool.query_pool_id() ++ ++ xc.cpupool_movedomain(pool_id, domid) ++ finally: ++ cls.pool_lock.release() ++ ++ move_domain = classmethod(move_domain) ++ ++ ++ def query_pool_ref(cls, pool_id): ++ """ Get pool ref by pool id. ++ Take the ref from xenstore. ++ @param pool_id: ++ @type pool_id: int ++ @return: ref ++ @rtype: str ++ """ ++ uuid = xstransact.Read(XS_POOLROOT + "%s/" % pool_id, 'uuid') ++ if uuid: ++ return [uuid] ++ else: ++ return [] ++ ++ query_pool_ref = classmethod(query_pool_ref) ++ ++ ++ def lookup_pool(cls, id_or_name): ++ """ Search XendCPUPool instance with given id_or_name. ++ @param id_or_name: pool id or pool nameto search ++ @type id_or_name: [int, str] ++ @return: instane or None if not found ++ @rtype: XendCPUPool ++ """ ++ pool_uuid = None ++ try: ++ pool_id = int(id_or_name) ++ # pool id given ++ pool_uuid = cls.query_pool_ref(pool_id) ++ except ValueError: ++ # pool name given ++ pool_uuid = cls.get_by_name_label(id_or_name) ++ ++ if len(pool_uuid) > 0: ++ return XendAPIStore.get(pool_uuid[0], cls.getClass()) ++ else: ++ return None ++ ++ lookup_pool = classmethod(lookup_pool) ++ ++ ++ def _cpu_number_to_ref(cls, number): ++ node = XendNode.instance() ++ for cpu_ref in node.get_host_cpu_refs(): ++ if node.get_host_cpu_field(cpu_ref, 'number') == number: ++ return cpu_ref ++ return None ++ ++ _cpu_number_to_ref = classmethod(_cpu_number_to_ref) ++ ++ ++ def unbound_cpus(cls): ++ """ Build list containing the numbers of all cpus not bound to a pool. ++ Info is taken from Hypervisor. ++ @return: list of cpu numbers ++ @rytpe: list of int ++ """ ++ return xc.cpupool_freeinfo() ++ ++ unbound_cpus = classmethod(unbound_cpus) ++ +Index: xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConfig.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendConfig.py +@@ -128,6 +128,7 @@ XENAPI_CFG_TO_LEGACY_CFG = { + 'PV_bootloader': 'bootloader', + 'PV_bootloader_args': 'bootloader_args', + 'Description': 'description', ++ 'pool_name' : 'pool_name', + } + + LEGACY_CFG_TO_XENAPI_CFG = reverse_dict(XENAPI_CFG_TO_LEGACY_CFG) +@@ -233,6 +234,7 @@ XENAPI_CFG_TYPES = { + 's3_integrity' : int, + 'superpages' : int, + 'memory_sharing': int, ++ 'pool_name' : str, + } + + # List of legacy configuration keys that have no equivalent in the +@@ -278,6 +280,7 @@ LEGACY_CFG_TYPES = { + 'bootloader': str, + 'bootloader_args': str, + 'description': str, ++ 'pool_name': str, + } + + # Values that should be stored in xenstore's /vm/ that is used +@@ -299,6 +302,7 @@ LEGACY_XENSTORE_VM_PARAMS = [ + 'on_xend_stop', + 'bootloader', + 'bootloader_args', ++ 'pool_name', + ] + + ## +@@ -407,6 +411,7 @@ class XendConfig(dict): + 'other_config': {}, + 'platform': {}, + 'target': 0, ++ 'pool_name' : 'Pool-0', + 'superpages': 0, + 'description': '', + } +Index: xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendConstants.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendConstants.py +@@ -133,6 +133,8 @@ VTPM_DELETE_SCRIPT = auxbin.scripts_dir( + + XS_VMROOT = "/vm/" + ++XS_POOLROOT = "/local/pool/" ++ + NR_PCI_FUNC = 8 + NR_PCI_DEV = 32 + NR_PCI_DEVFN = NR_PCI_FUNC * NR_PCI_DEV +Index: xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendDomainInfo.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendDomainInfo.py +@@ -60,6 +60,7 @@ from xen.xend.xenstore.xsutil import Get + from xen.xend.xenstore.xswatch import xswatch + from xen.xend.XendConstants import * + from xen.xend.XendAPIConstants import * ++from xen.xend.XendCPUPool import XendCPUPool + from xen.xend.server.DevConstants import xenbusState + from xen.xend.server.BlktapController import TAPDISK_DEVICE, parseDeviceString + +@@ -2565,6 +2566,19 @@ class XendDomainInfo: + oos = self.info['platform'].get('oos', 1) + oos_off = 1 - int(oos) + ++ # look-up pool id to use ++ pool_name = self.info['pool_name'] ++ if len(pool_name) == 0: ++ pool_name = "Pool-0" ++ ++ pool = XendCPUPool.lookup_pool(pool_name) ++ ++ if pool is None: ++ raise VmError("unkown pool %s" % pool_name) ++ pool_id = pool.query_pool_id() ++ if pool_id is None: ++ raise VmError("pool %s not activated" % pool_name) ++ + flags = (int(hvm) << 0) | (int(hap) << 1) | (int(s3_integrity) << 2) | (int(oos_off) << 3) + + try: +@@ -2573,6 +2587,7 @@ class XendDomainInfo: + ssidref = ssidref, + handle = uuid.fromString(self.info['uuid']), + flags = flags, ++ cpupool = pool_id, + target = self.info.target()) + except Exception, e: + # may get here if due to ACM the operation is not permitted +@@ -3613,6 +3628,11 @@ class XendDomainInfo: + + retval = xc.sched_credit_domain_get(self.getDomid()) + return retval ++ def get_cpu_pool(self): ++ if self.getDomid() is None: ++ return None ++ xeninfo = dom_get(self.domid) ++ return xeninfo['cpupool'] + def get_power_state(self): + return XEN_API_VM_POWER_STATE[self._stateGet()] + def get_platform(self): +Index: xen-4.0.0-testing/tools/python/xen/xend/XendError.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendError.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendError.py +@@ -18,6 +18,7 @@ + + from xmlrpclib import Fault + ++import types + import XendClient + + class XendInvalidDomain(Fault): +@@ -186,6 +187,26 @@ class DirectPCIError(XendAPIError): + def __str__(self): + return 'DIRECT_PCI_ERROR: %s' % self.error + ++class PoolError(XendAPIError): ++ def __init__(self, error, spec=None): ++ XendAPIError.__init__(self) ++ self.spec = [] ++ if spec: ++ if isinstance(spec, types.ListType): ++ self.spec = spec ++ else: ++ self.spec = [spec] ++ self.error = error ++ ++ def get_api_error(self): ++ return [self.error] + self.spec ++ ++ def __str__(self): ++ if self.spec: ++ return '%s: %s' % (self.error, self.spec) ++ else: ++ return '%s' % self.error ++ + class VDIError(XendAPIError): + def __init__(self, error, vdi): + XendAPIError.__init__(self) +Index: xen-4.0.0-testing/tools/python/xen/xend/XendNode.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/XendNode.py ++++ xen-4.0.0-testing/tools/python/xen/xend/XendNode.py +@@ -43,6 +43,7 @@ from XendStateStore import XendStateStor + from XendMonitor import XendMonitor + from XendPPCI import XendPPCI + from XendPSCSI import XendPSCSI, XendPSCSI_HBA ++from xen.xend.XendCPUPool import XendCPUPool + + class XendNode: + """XendNode - Represents a Domain 0 Host.""" +@@ -159,6 +160,8 @@ class XendNode: + + self._init_PSCSIs() + ++ self._init_cpu_pools() ++ + + def _init_networks(self): + # Initialise networks +@@ -357,6 +360,18 @@ class XendNode: + for physical_host, pscsi_HBA_uuid in pscsi_HBA_table.items(): + XendPSCSI_HBA(pscsi_HBA_uuid, {'physical_host': physical_host}) + ++ def _init_cpu_pools(self): ++ # Initialise cpu_pools ++ saved_cpu_pools = self.state_store.load_state(XendCPUPool.getClass()) ++ if saved_cpu_pools: ++ for cpu_pool_uuid, cpu_pool in saved_cpu_pools.items(): ++ try: ++ XendCPUPool.recreate(cpu_pool, cpu_pool_uuid) ++ except CreateUnspecifiedAttributeError: ++ log.warn("Error recreating %s %s", ++ (XendCPUPool.getClass(), cpu_pool_uuid)) ++ XendCPUPool.recreate_active_pools() ++ + + def add_network(self, interface): + # TODO +@@ -577,6 +592,7 @@ class XendNode: + self.save_PPCIs() + self.save_PSCSIs() + self.save_PSCSI_HBAs() ++ self.save_cpu_pools() + + def save_PIFs(self): + pif_records = dict([(pif_uuid, XendAPIStore.get( +@@ -619,6 +635,12 @@ class XendNode: + for pscsi_HBA_uuid in XendPSCSI_HBA.get_all()]) + self.state_store.save_state('pscsi_HBA', pscsi_HBA_records) + ++ def save_cpu_pools(self): ++ cpu_pool_records = dict([(cpu_pool_uuid, XendAPIStore.get( ++ cpu_pool_uuid, XendCPUPool.getClass()).get_record()) ++ for cpu_pool_uuid in XendCPUPool.get_all_managed()]) ++ self.state_store.save_state(XendCPUPool.getClass(), cpu_pool_records) ++ + def shutdown(self): + return 0 + +@@ -930,6 +952,7 @@ class XendNode: + self.format_node_to_memory(info, 'node_to_memory') + info['node_to_dma32_mem'] = \ + self.format_node_to_memory(info, 'node_to_dma32_mem') ++ info['free_cpus'] = len(XendCPUPool.unbound_cpus()) + + # FIXME: These are hard-coded to be the inverse of the getXenMemory + # functions in image.py. Find a cleaner way. +@@ -949,6 +972,7 @@ class XendNode: + 'virt_caps', + 'total_memory', + 'free_memory', ++ 'free_cpus', + 'max_free_memory', + 'max_para_memory', + 'max_hvm_memory', +Index: xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/SrvServer.py ++++ xen-4.0.0-testing/tools/python/xen/xend/server/SrvServer.py +@@ -52,6 +52,7 @@ from xen.xend import XendNode, XendOptio + from xen.xend.XendLogging import log + from xen.xend.XendClient import XEN_API_SOCKET + from xen.xend.XendDomain import instance as xenddomain ++from xen.xend.XendCPUPool import XendCPUPool + from xen.web.SrvDir import SrvDir + + from SrvRoot import SrvRoot +@@ -147,6 +148,12 @@ class XendServers: + status.close() + status = None + ++ # auto start pools before domains are started ++ try: ++ XendCPUPool.autostart_pools() ++ except Exception, e: ++ log.exception("Failed while autostarting pools") ++ + # Reaching this point means we can auto start domains + try: + xenddomain().autostart_domains() +Index: xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xend/server/XMLRPCServer.py ++++ xen-4.0.0-testing/tools/python/xen/xend/server/XMLRPCServer.py +@@ -33,6 +33,7 @@ from xen.xend.XendClient import XML_RPC_ + from xen.xend.XendConstants import DOM_STATE_RUNNING + from xen.xend.XendLogging import log + from xen.xend.XendError import XendInvalidDomain ++from xen.xend.XendCPUPool import XendCPUPool + + # vcpu_avail is a long and is not needed by the clients. It's far easier + # to just remove it then to try and marshal the long. +@@ -98,6 +99,10 @@ methods = ['device_create', 'device_conf + + exclude = ['domain_create', 'domain_restore'] + ++POOL_FUNCS = ['pool_create', 'pool_new', 'pool_start', 'pool_list', ++ 'pool_destroy', 'pool_delete', 'pool_cpu_add', 'pool_cpu_remove', ++ 'pool_migrate'] ++ + class XMLRPCServer: + def __init__(self, auth, use_xenapi, use_tcp = False, + ssl_key_file = None, ssl_cert_file = None, +@@ -197,6 +202,11 @@ class XMLRPCServer: + if name not in exclude: + self.server.register_function(fn, "xend.domain.%s" % name[7:]) + ++ # Functions in XendPool ++ for name in POOL_FUNCS: ++ fn = getattr(XendCPUPool, name) ++ self.server.register_function(fn, "xend.cpu_pool.%s" % name[5:]) ++ + # Functions in XendNode and XendDmesg + for type, lst, n in [(XendNode, + ['info', 'pciinfo', 'send_debug_keys', +Index: xen-4.0.0-testing/tools/python/xen/xm/create.dtd +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.dtd ++++ xen-4.0.0-testing/tools/python/xen/xm/create.dtd +@@ -50,6 +50,7 @@ + s3_integrity CDATA #REQUIRED + vcpus_max CDATA #REQUIRED + vcpus_at_startup CDATA #REQUIRED ++ pool_name CDATA #REQUIRED + actions_after_shutdown %NORMAL_EXIT; #REQUIRED + actions_after_reboot %NORMAL_EXIT; #REQUIRED + actions_after_crash %CRASH_BEHAVIOUR; #REQUIRED +Index: xen-4.0.0-testing/tools/python/xen/xm/create.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xm/create.py ++++ xen-4.0.0-testing/tools/python/xen/xm/create.py +@@ -659,6 +659,10 @@ gopts.var('suppress_spurious_page_faults + fn=set_bool, default=None, + use="""Do not inject spurious page faults into this guest""") + ++gopts.var('pool', val='POOL NAME', ++ fn=set_value, default=None, ++ use="""CPU pool to use for the VM""") ++ + gopts.var('pci_msitranslate', val='TRANSLATE', + fn=set_int, default=1, + use="""Global PCI MSI-INTx translation flag (0=disable; +@@ -1147,6 +1151,8 @@ def make_config(vals): + config.append(['localtime', vals.localtime]) + if vals.oos: + config.append(['oos', vals.oos]) ++ if vals.pool: ++ config.append(['pool_name', vals.pool]) + + config_image = configure_image(vals) + if vals.bootloader: +Index: xen-4.0.0-testing/tools/python/xen/xm/main.py +=================================================================== +--- xen-4.0.0-testing.orig/tools/python/xen/xm/main.py ++++ xen-4.0.0-testing/tools/python/xen/xm/main.py +@@ -56,6 +56,7 @@ from xen.util.xmlrpcclient import Server + import xen.util.xsm.xsm as security + from xen.util.xsm.xsm import XSMError + from xen.util.acmpolicy import ACM_LABEL_UNLABELED_DISPLAY ++from xen.util.sxputils import sxp2map, map2sxp as map_to_sxp + from xen.util import auxbin + + import XenAPI +@@ -235,6 +236,23 @@ SUBCOMMAND_HELP = { + 'tmem-freeable' : ('', 'Print freeable tmem (in MiB).'), + 'tmem-shared-auth' : ('[|-a|--all] [--uuid=] [--auth=<0|1>]', 'De/authenticate shared tmem pool.'), + ++ # ++ # pool commands ++ # ++ 'pool-create' : (' [vars]', ++ 'Create a CPU pool based an ConfigFile.'), ++ 'pool-new' : (' [vars]', ++ 'Adds a CPU pool to Xend CPU pool management'), ++ 'pool-start' : ('', 'Starts a Xend CPU pool'), ++ 'pool-list' : ('[] [-l|--long] [-c|--cpus]', 'List CPU pools on host'), ++ 'pool-destroy' : ('', 'Deactivates a CPU pool'), ++ 'pool-delete' : ('', ++ 'Removes a CPU pool from Xend management'), ++ 'pool-cpu-add' : (' ', 'Adds a CPU to a CPU pool'), ++ 'pool-cpu-remove': (' ', 'Removes a CPU from a CPU pool'), ++ 'pool-migrate' : (' ', ++ 'Moves a domain into a CPU pool'), ++ + # security + + 'addlabel' : ('