65598582f5
OBS-URL: https://build.opensuse.org/package/show/systemsmanagement:saltstack/salt?expand=0&rev=179
645 lines
27 KiB
Diff
645 lines
27 KiB
Diff
From da22c9ee9bd3a2ca34d028e40ff3b476bb944933 Mon Sep 17 00:00:00 2001
|
|
From: Cedric Bosdonnat <cbosdonnat@suse.com>
|
|
Date: Mon, 5 Oct 2020 15:50:44 +0200
|
|
Subject: [PATCH] Xen disk fixes (#264)
|
|
|
|
* virt: convert volumes to disks for xen
|
|
|
|
The libvirt xen driver does not handle disk of 'volume' type. We thus
|
|
need to convert them into their equivalent using the 'file' or 'block'
|
|
type (issue #58333).
|
|
|
|
* Add pool and volume names to virt._get_all_volumes_paths
|
|
|
|
In order to avoid code duplication, extend the _get_all_volumes_path()
|
|
helper function to also provide the volume and pool names.
|
|
|
|
* virt.get_disk: show pools and volumes if possible
|
|
|
|
In some cases like Xen we have to change the volume disks into file or
|
|
block ones. Show pool/volumes informations in the virt.get_disk if
|
|
possible.
|
|
|
|
* virt: use the pool path in case the volume doesn't exist
|
|
|
|
When computing the volume path to generate the XML of a domain, the
|
|
volume may not exist yet. This happens typically during a virt.update
|
|
when generating the new XML to compare.
|
|
|
|
In such cases, use the pool target path to compute the volume path.
|
|
---
|
|
changelog/58333.fixed | 1 +
|
|
salt/modules/virt.py | 264 ++++++++++++--------
|
|
salt/templates/virt/libvirt_domain.jinja | 16 +-
|
|
tests/pytests/unit/modules/virt/conftest.py | 4 +-
|
|
tests/unit/modules/test_virt.py | 180 +++++--------
|
|
5 files changed, 232 insertions(+), 233 deletions(-)
|
|
create mode 100644 changelog/58333.fixed
|
|
|
|
diff --git a/changelog/58333.fixed b/changelog/58333.fixed
|
|
new file mode 100644
|
|
index 0000000000..f958d40964
|
|
--- /dev/null
|
|
+++ b/changelog/58333.fixed
|
|
@@ -0,0 +1 @@
|
|
+Convert disks of volume type to file or block disks on Xen
|
|
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
|
|
index c1a73fcb7f..e306bc0679 100644
|
|
--- a/salt/modules/virt.py
|
|
+++ b/salt/modules/virt.py
|
|
@@ -453,6 +453,8 @@ def _get_disks(conn, dom):
|
|
"""
|
|
disks = {}
|
|
doc = ElementTree.fromstring(dom.XMLDesc(0))
|
|
+ # Get the path, pool, volume name of each volume we can
|
|
+ all_volumes = _get_all_volumes_paths(conn)
|
|
for elem in doc.findall("devices/disk"):
|
|
source = elem.find("source")
|
|
if source is None:
|
|
@@ -465,13 +467,61 @@ def _get_disks(conn, dom):
|
|
extra_properties = None
|
|
if "dev" in target.attrib:
|
|
disk_type = elem.get("type")
|
|
+
|
|
+ def _get_disk_volume_data(pool_name, volume_name):
|
|
+ qemu_target = "{}/{}".format(pool_name, volume_name)
|
|
+ pool = conn.storagePoolLookupByName(pool_name)
|
|
+ vol = pool.storageVolLookupByName(volume_name)
|
|
+ vol_info = vol.info()
|
|
+ extra_properties = {
|
|
+ "virtual size": vol_info[1],
|
|
+ "disk size": vol_info[2],
|
|
+ }
|
|
+
|
|
+ backing_files = [
|
|
+ {
|
|
+ "file": node.find("source").get("file"),
|
|
+ "file format": node.find("format").get("type"),
|
|
+ }
|
|
+ for node in elem.findall(".//backingStore[source]")
|
|
+ ]
|
|
+
|
|
+ if backing_files:
|
|
+ # We had the backing files in a flat list, nest them again.
|
|
+ extra_properties["backing file"] = backing_files[0]
|
|
+ parent = extra_properties["backing file"]
|
|
+ for sub_backing_file in backing_files[1:]:
|
|
+ parent["backing file"] = sub_backing_file
|
|
+ parent = sub_backing_file
|
|
+
|
|
+ else:
|
|
+ # In some cases the backing chain is not displayed by the domain definition
|
|
+ # Try to see if we have some of it in the volume definition.
|
|
+ vol_desc = ElementTree.fromstring(vol.XMLDesc())
|
|
+ backing_path = vol_desc.find("./backingStore/path")
|
|
+ backing_format = vol_desc.find("./backingStore/format")
|
|
+ if backing_path is not None:
|
|
+ extra_properties["backing file"] = {"file": backing_path.text}
|
|
+ if backing_format is not None:
|
|
+ extra_properties["backing file"][
|
|
+ "file format"
|
|
+ ] = backing_format.get("type")
|
|
+ return (qemu_target, extra_properties)
|
|
+
|
|
if disk_type == "file":
|
|
qemu_target = source.get("file", "")
|
|
if qemu_target.startswith("/dev/zvol/"):
|
|
disks[target.get("dev")] = {"file": qemu_target, "zfs": True}
|
|
continue
|
|
- # Extract disk sizes, snapshots, backing files
|
|
- if elem.get("device", "disk") != "cdrom":
|
|
+
|
|
+ if qemu_target in all_volumes.keys():
|
|
+ # If the qemu_target is a known path, output a volume
|
|
+ volume = all_volumes[qemu_target]
|
|
+ qemu_target, extra_properties = _get_disk_volume_data(
|
|
+ volume["pool"], volume["name"]
|
|
+ )
|
|
+ elif elem.get("device", "disk") != "cdrom":
|
|
+ # Extract disk sizes, snapshots, backing files
|
|
try:
|
|
stdout = subprocess.Popen(
|
|
[
|
|
@@ -493,6 +543,12 @@ def _get_disks(conn, dom):
|
|
disk.update({"file": "Does not exist"})
|
|
elif disk_type == "block":
|
|
qemu_target = source.get("dev", "")
|
|
+ # If the qemu_target is a known path, output a volume
|
|
+ if qemu_target in all_volumes.keys():
|
|
+ volume = all_volumes[qemu_target]
|
|
+ qemu_target, extra_properties = _get_disk_volume_data(
|
|
+ volume["pool"], volume["name"]
|
|
+ )
|
|
elif disk_type == "network":
|
|
qemu_target = source.get("protocol")
|
|
source_name = source.get("name")
|
|
@@ -531,43 +587,9 @@ def _get_disks(conn, dom):
|
|
elif disk_type == "volume":
|
|
pool_name = source.get("pool")
|
|
volume_name = source.get("volume")
|
|
- qemu_target = "{}/{}".format(pool_name, volume_name)
|
|
- pool = conn.storagePoolLookupByName(pool_name)
|
|
- vol = pool.storageVolLookupByName(volume_name)
|
|
- vol_info = vol.info()
|
|
- extra_properties = {
|
|
- "virtual size": vol_info[1],
|
|
- "disk size": vol_info[2],
|
|
- }
|
|
-
|
|
- backing_files = [
|
|
- {
|
|
- "file": node.find("source").get("file"),
|
|
- "file format": node.find("format").get("type"),
|
|
- }
|
|
- for node in elem.findall(".//backingStore[source]")
|
|
- ]
|
|
-
|
|
- if backing_files:
|
|
- # We had the backing files in a flat list, nest them again.
|
|
- extra_properties["backing file"] = backing_files[0]
|
|
- parent = extra_properties["backing file"]
|
|
- for sub_backing_file in backing_files[1:]:
|
|
- parent["backing file"] = sub_backing_file
|
|
- parent = sub_backing_file
|
|
-
|
|
- else:
|
|
- # In some cases the backing chain is not displayed by the domain definition
|
|
- # Try to see if we have some of it in the volume definition.
|
|
- vol_desc = ElementTree.fromstring(vol.XMLDesc())
|
|
- backing_path = vol_desc.find("./backingStore/path")
|
|
- backing_format = vol_desc.find("./backingStore/format")
|
|
- if backing_path is not None:
|
|
- extra_properties["backing file"] = {"file": backing_path.text}
|
|
- if backing_format is not None:
|
|
- extra_properties["backing file"][
|
|
- "file format"
|
|
- ] = backing_format.get("type")
|
|
+ qemu_target, extra_properties = _get_disk_volume_data(
|
|
+ pool_name, volume_name
|
|
+ )
|
|
|
|
if not qemu_target:
|
|
continue
|
|
@@ -630,6 +652,73 @@ def _get_target(target, ssh):
|
|
return " {}://{}/{}".format(proto, target, "system")
|
|
|
|
|
|
+def _get_volume_path(pool, volume_name):
|
|
+ """
|
|
+ Get the path to a volume. If the volume doesn't exist, compute its path from the pool one.
|
|
+ """
|
|
+ if volume_name in pool.listVolumes():
|
|
+ volume = pool.storageVolLookupByName(volume_name)
|
|
+ volume_xml = ElementTree.fromstring(volume.XMLDesc())
|
|
+ return volume_xml.find("./target/path").text
|
|
+
|
|
+ # Get the path from the pool if the volume doesn't exist yet
|
|
+ pool_xml = ElementTree.fromstring(pool.XMLDesc())
|
|
+ pool_path = pool_xml.find("./target/path").text
|
|
+ return pool_path + "/" + volume_name
|
|
+
|
|
+
|
|
+def _disk_from_pool(conn, pool, pool_xml, volume_name):
|
|
+ """
|
|
+ Create a disk definition out of the pool XML and volume name.
|
|
+ The aim of this function is to replace the volume-based definition when not handled by libvirt.
|
|
+ It returns the disk Jinja context to be used when creating the VM
|
|
+ """
|
|
+ pool_type = pool_xml.get("type")
|
|
+ disk_context = {}
|
|
+
|
|
+ # handle dir, fs and netfs
|
|
+ if pool_type in ["dir", "netfs", "fs"]:
|
|
+ disk_context["type"] = "file"
|
|
+ disk_context["source_file"] = _get_volume_path(pool, volume_name)
|
|
+
|
|
+ elif pool_type in ["logical", "disk", "iscsi", "scsi"]:
|
|
+ disk_context["type"] = "block"
|
|
+ disk_context["format"] = "raw"
|
|
+ disk_context["source_file"] = _get_volume_path(pool, volume_name)
|
|
+
|
|
+ elif pool_type in ["rbd", "gluster", "sheepdog"]:
|
|
+ # libvirt can't handle rbd, gluster and sheepdog as volumes
|
|
+ disk_context["type"] = "network"
|
|
+ disk_context["protocol"] = pool_type
|
|
+ # Copy the hosts from the pool definition
|
|
+ disk_context["hosts"] = [
|
|
+ {"name": host.get("name"), "port": host.get("port")}
|
|
+ for host in pool_xml.findall(".//host")
|
|
+ ]
|
|
+ dir_node = pool_xml.find("./source/dir")
|
|
+ # Gluster and RBD need pool/volume name
|
|
+ name_node = pool_xml.find("./source/name")
|
|
+ if name_node is not None:
|
|
+ disk_context["volume"] = "{}/{}".format(name_node.text, volume_name)
|
|
+ # Copy the authentication if any for RBD
|
|
+ auth_node = pool_xml.find("./source/auth")
|
|
+ if auth_node is not None:
|
|
+ username = auth_node.get("username")
|
|
+ secret_node = auth_node.find("./secret")
|
|
+ usage = secret_node.get("usage")
|
|
+ if not usage:
|
|
+ # Get the usage from the UUID
|
|
+ uuid = secret_node.get("uuid")
|
|
+ usage = conn.secretLookupByUUIDString(uuid).usageID()
|
|
+ disk_context["auth"] = {
|
|
+ "type": "ceph",
|
|
+ "username": username,
|
|
+ "usage": usage,
|
|
+ }
|
|
+
|
|
+ return disk_context
|
|
+
|
|
+
|
|
def _gen_xml(
|
|
conn,
|
|
name,
|
|
@@ -735,41 +824,16 @@ def _gen_xml(
|
|
elif disk.get("pool"):
|
|
disk_context["volume"] = disk["filename"]
|
|
# If we had no source_file, then we want a volume
|
|
- pool_xml = ElementTree.fromstring(
|
|
- conn.storagePoolLookupByName(disk["pool"]).XMLDesc()
|
|
- )
|
|
+ pool = conn.storagePoolLookupByName(disk["pool"])
|
|
+ pool_xml = ElementTree.fromstring(pool.XMLDesc())
|
|
pool_type = pool_xml.get("type")
|
|
- if pool_type in ["rbd", "gluster", "sheepdog"]:
|
|
- # libvirt can't handle rbd, gluster and sheepdog as volumes
|
|
- disk_context["type"] = "network"
|
|
- disk_context["protocol"] = pool_type
|
|
- # Copy the hosts from the pool definition
|
|
- disk_context["hosts"] = [
|
|
- {"name": host.get("name"), "port": host.get("port")}
|
|
- for host in pool_xml.findall(".//host")
|
|
- ]
|
|
- dir_node = pool_xml.find("./source/dir")
|
|
- # Gluster and RBD need pool/volume name
|
|
- name_node = pool_xml.find("./source/name")
|
|
- if name_node is not None:
|
|
- disk_context["volume"] = "{}/{}".format(
|
|
- name_node.text, disk_context["volume"]
|
|
- )
|
|
- # Copy the authentication if any for RBD
|
|
- auth_node = pool_xml.find("./source/auth")
|
|
- if auth_node is not None:
|
|
- username = auth_node.get("username")
|
|
- secret_node = auth_node.find("./secret")
|
|
- usage = secret_node.get("usage")
|
|
- if not usage:
|
|
- # Get the usage from the UUID
|
|
- uuid = secret_node.get("uuid")
|
|
- usage = conn.secretLookupByUUIDString(uuid).usageID()
|
|
- disk_context["auth"] = {
|
|
- "type": "ceph",
|
|
- "username": username,
|
|
- "usage": usage,
|
|
- }
|
|
+
|
|
+ # For Xen VMs convert all pool types (issue #58333)
|
|
+ if hypervisor == "xen" or pool_type in ["rbd", "gluster", "sheepdog"]:
|
|
+ disk_context.update(
|
|
+ _disk_from_pool(conn, pool, pool_xml, disk_context["volume"])
|
|
+ )
|
|
+
|
|
else:
|
|
if pool_type in ["disk", "logical"]:
|
|
# The volume format for these types doesn't match the driver format in the VM
|
|
@@ -2441,9 +2505,9 @@ def update(
|
|
data = {k: v for k, v in locals().items() if bool(v)}
|
|
if boot_dev:
|
|
data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())}
|
|
- need_update = salt.utils.xmlutil.change_xml(
|
|
- desc, data, params_mapping
|
|
- ) or need_update
|
|
+ need_update = (
|
|
+ salt.utils.xmlutil.change_xml(desc, data, params_mapping) or need_update
|
|
+ )
|
|
|
|
# Update the XML definition with the new disks and diff changes
|
|
devices_node = desc.find("devices")
|
|
@@ -4092,7 +4156,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
|
|
directories.add(os.path.dirname(disks[disk]["file"]))
|
|
else:
|
|
# We may have a volume to delete here
|
|
- matcher = re.match("^(?P<pool>[^/]+)/(?P<volume>.*)$", disks[disk]["file"])
|
|
+ matcher = re.match("^(?P<pool>[^/]+)/(?P<volume>.*)$", disks[disk]["file"],)
|
|
if matcher:
|
|
pool_name = matcher.group("pool")
|
|
pool = None
|
|
@@ -6676,29 +6740,33 @@ def _is_valid_volume(vol):
|
|
|
|
def _get_all_volumes_paths(conn):
|
|
"""
|
|
- Extract the path and backing stores path of all volumes.
|
|
+ Extract the path, name, pool name and backing stores path of all volumes.
|
|
|
|
:param conn: libvirt connection to use
|
|
"""
|
|
- volumes = [
|
|
- vol
|
|
- for l in [
|
|
- obj.listAllVolumes()
|
|
- for obj in conn.listAllStoragePools()
|
|
- if obj.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING
|
|
- ]
|
|
- for vol in l
|
|
+ pools = [
|
|
+ pool
|
|
+ for pool in conn.listAllStoragePools()
|
|
+ if pool.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING
|
|
]
|
|
- return {
|
|
- vol.path(): [
|
|
- path.text
|
|
- for path in ElementTree.fromstring(vol.XMLDesc()).findall(
|
|
- ".//backingStore/path"
|
|
- )
|
|
- ]
|
|
- for vol in volumes
|
|
- if _is_valid_volume(vol)
|
|
- }
|
|
+ volumes = {}
|
|
+ for pool in pools:
|
|
+ pool_volumes = {
|
|
+ volume.path(): {
|
|
+ "pool": pool.name(),
|
|
+ "name": volume.name(),
|
|
+ "backing_stores": [
|
|
+ path.text
|
|
+ for path in ElementTree.fromstring(volume.XMLDesc()).findall(
|
|
+ ".//backingStore/path"
|
|
+ )
|
|
+ ],
|
|
+ }
|
|
+ for volume in pool.listAllVolumes()
|
|
+ if _is_valid_volume(volume)
|
|
+ }
|
|
+ volumes.update(pool_volumes)
|
|
+ return volumes
|
|
|
|
|
|
def volume_infos(pool=None, volume=None, **kwargs):
|
|
@@ -6769,8 +6837,8 @@ def volume_infos(pool=None, volume=None, **kwargs):
|
|
if vol.path():
|
|
as_backing_store = {
|
|
path
|
|
- for (path, all_paths) in backing_stores.items()
|
|
- if vol.path() in all_paths
|
|
+ for (path, volume) in backing_stores.items()
|
|
+ if vol.path() in volume.get("backing_stores")
|
|
}
|
|
used_by = [
|
|
vm_name
|
|
diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja
|
|
index 2a2f5e4141..18728a75b5 100644
|
|
--- a/salt/templates/virt/libvirt_domain.jinja
|
|
+++ b/salt/templates/virt/libvirt_domain.jinja
|
|
@@ -33,21 +33,13 @@
|
|
{% if disk.type == 'file' and 'source_file' in disk -%}
|
|
<source file='{{ disk.source_file }}' />
|
|
{% endif %}
|
|
+ {% if disk.type == 'block' -%}
|
|
+ <source dev='{{ disk.source_file }}' />
|
|
+ {% endif %}
|
|
{% if disk.type == 'volume' and 'pool' in disk -%}
|
|
<source pool='{{ disk.pool }}' volume='{{ disk.volume }}' />
|
|
{% endif %}
|
|
- {%- if disk.type == 'network' %}
|
|
- <source protocol='{{ disk.protocol }}' name='{{ disk.volume }}'{% if disk.get('query') %} query='{{ disk.query }}'{% endif %}>
|
|
- {%- for host in disk.get('hosts') %}
|
|
- <host name='{{ host.name }}'{% if host.get("port") %} port='{{ host.port }}'{% endif %}/>
|
|
- {%- endfor %}
|
|
- {%- if disk.get("auth") %}
|
|
- <auth username='{{ disk.auth.username }}'>
|
|
- <secret type='{{ disk.auth.type }}' usage='{{ disk.auth.usage}}'/>
|
|
- </auth>
|
|
- {%- endif %}
|
|
- </source>
|
|
- {%- endif %}
|
|
+ {%- if disk.type == 'network' %}{{ libvirt_disks.network_source(disk) }}{%- endif %}
|
|
<target dev='{{ disk.target_dev }}' bus='{{ disk.disk_bus }}' />
|
|
{% if disk.address -%}
|
|
<address type='drive' controller='0' bus='0' target='0' unit='{{ disk.index }}' />
|
|
diff --git a/tests/pytests/unit/modules/virt/conftest.py b/tests/pytests/unit/modules/virt/conftest.py
|
|
index d70c2abc9e..1c32ae12eb 100644
|
|
--- a/tests/pytests/unit/modules/virt/conftest.py
|
|
+++ b/tests/pytests/unit/modules/virt/conftest.py
|
|
@@ -48,7 +48,7 @@ class MappedResultMock(MagicMock):
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
-def setup_loader():
|
|
+def setup_loader(request):
|
|
# Create libvirt mock and connection mock
|
|
mock_libvirt = LibvirtMock()
|
|
mock_conn = MagicMock()
|
|
@@ -62,7 +62,7 @@ def setup_loader():
|
|
},
|
|
config: {},
|
|
}
|
|
- with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
|
|
+ with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
|
|
yield loader_mock
|
|
|
|
|
|
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
|
|
index 4a4c0395a7..e214e406e2 100644
|
|
--- a/tests/unit/modules/test_virt.py
|
|
+++ b/tests/unit/modules/test_virt.py
|
|
@@ -1138,6 +1138,65 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
|
|
self.assertEqual("vdb2", source.attrib["volume"])
|
|
self.assertEqual("raw", disk.find("driver").get("type"))
|
|
|
|
+ def test_get_xml_volume_xen_dir(self):
|
|
+ """
|
|
+ Test virt._gen_xml generating disks for a Xen hypervisor
|
|
+ """
|
|
+ self.mock_conn.listStoragePools.return_value = ["default"]
|
|
+ pool_mock = MagicMock()
|
|
+ pool_mock.XMLDesc.return_value = (
|
|
+ "<pool type='dir'><target><path>/path/to/images</path></target></pool>"
|
|
+ )
|
|
+ volume_xml = "<volume><target><path>/path/to/images/hello_system</path></target></volume>"
|
|
+ pool_mock.storageVolLookupByName.return_value.XMLDesc.return_value = volume_xml
|
|
+ self.mock_conn.storagePoolLookupByName.return_value = pool_mock
|
|
+ diskp = virt._disk_profile(
|
|
+ self.mock_conn,
|
|
+ None,
|
|
+ "xen",
|
|
+ [{"name": "system", "pool": "default"}],
|
|
+ "hello",
|
|
+ )
|
|
+ xml_data = virt._gen_xml(
|
|
+ self.mock_conn, "hello", 1, 512, diskp, [], "xen", "hvm", "x86_64",
|
|
+ )
|
|
+ root = ET.fromstring(xml_data)
|
|
+ disk = root.findall(".//disk")[0]
|
|
+ self.assertEqual(disk.attrib["type"], "file")
|
|
+ self.assertEqual(
|
|
+ "/path/to/images/hello_system", disk.find("source").attrib["file"]
|
|
+ )
|
|
+
|
|
+ def test_get_xml_volume_xen_block(self):
|
|
+ """
|
|
+ Test virt._gen_xml generating disks for a Xen hypervisor
|
|
+ """
|
|
+ self.mock_conn.listStoragePools.return_value = ["default"]
|
|
+ pool_mock = MagicMock()
|
|
+ pool_mock.listVolumes.return_value = ["vol01"]
|
|
+ volume_xml = "<volume><target><path>/dev/to/vol01</path></target></volume>"
|
|
+ pool_mock.storageVolLookupByName.return_value.XMLDesc.return_value = volume_xml
|
|
+ self.mock_conn.storagePoolLookupByName.return_value = pool_mock
|
|
+
|
|
+ for pool_type in ["logical", "disk", "iscsi", "scsi"]:
|
|
+ pool_mock.XMLDesc.return_value = "<pool type='{}'><source><device path='/dev/sda'/></source></pool>".format(
|
|
+ pool_type
|
|
+ )
|
|
+ diskp = virt._disk_profile(
|
|
+ self.mock_conn,
|
|
+ None,
|
|
+ "xen",
|
|
+ [{"name": "system", "pool": "default", "source_file": "vol01"}],
|
|
+ "hello",
|
|
+ )
|
|
+ xml_data = virt._gen_xml(
|
|
+ self.mock_conn, "hello", 1, 512, diskp, [], "xen", "hvm", "x86_64",
|
|
+ )
|
|
+ root = ET.fromstring(xml_data)
|
|
+ disk = root.findall(".//disk")[0]
|
|
+ self.assertEqual(disk.attrib["type"], "block")
|
|
+ self.assertEqual("/dev/to/vol01", disk.find("source").attrib["dev"])
|
|
+
|
|
def test_gen_xml_cdrom(self):
|
|
"""
|
|
Test virt._gen_xml(), generating a cdrom device (different disk type, no source)
|
|
@@ -5499,124 +5558,3 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
|
|
"vol1.qcow2",
|
|
"/path/to/file",
|
|
)
|
|
-
|
|
- def test_get_disks(self):
|
|
- """
|
|
- Test the virt.get_disks function
|
|
- """
|
|
- # test with volumes
|
|
- vm_def = """<domain type='kvm' id='3'>
|
|
- <name>srv01</name>
|
|
- <devices>
|
|
- <disk type='volume' device='disk'>
|
|
- <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
|
- <source pool='default' volume='srv01_system'/>
|
|
- <backingStore/>
|
|
- <target dev='vda' bus='virtio'/>
|
|
- <alias name='virtio-disk0'/>
|
|
- <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
|
|
- </disk>
|
|
- <disk type='volume' device='disk'>
|
|
- <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
|
- <source pool='default' volume='srv01_data'/>
|
|
- <backingStore type='file' index='1'>
|
|
- <format type='qcow2'/>
|
|
- <source file='/var/lib/libvirt/images/vol01'/>
|
|
- <backingStore/>
|
|
- </backingStore>
|
|
- <target dev='vdb' bus='virtio'/>
|
|
- <alias name='virtio-disk1'/>
|
|
- <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
|
|
- </disk>
|
|
- <disk type='volume' device='disk'>
|
|
- <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
|
- <source pool='default' volume='vm05_system'/>
|
|
- <backingStore type='file' index='1'>
|
|
- <format type='qcow2'/>
|
|
- <source file='/var/lib/libvirt/images/vm04_system.qcow2'/>
|
|
- <backingStore type='file' index='2'>
|
|
- <format type='raw'/>
|
|
- <source file='/var/testsuite-data/disk-image-template.raw'/>
|
|
- <backingStore/>
|
|
- </backingStore>
|
|
- </backingStore>
|
|
- <target dev='vdc' bus='virtio'/>
|
|
- <alias name='virtio-disk0'/>
|
|
- <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
|
|
- </disk>
|
|
- <disk type='network' device='cdrom'>
|
|
- <driver name='qemu' type='raw' cache='none' io='native'/>
|
|
- <source protocol='http' name='/pub/iso/myimage.iso' query='foo=bar&baz=flurb' index='1'>
|
|
- <host name='dev-srv.tf.local' port='80'/>
|
|
- </source>
|
|
- <target dev='hda' bus='ide'/>
|
|
- <readonly/>
|
|
- <alias name='ide0-0-0'/>
|
|
- <address type='drive' controller='0' bus='0' target='0' unit='0'/>
|
|
- </disk>
|
|
- </devices>
|
|
- </domain>
|
|
- """
|
|
- self.set_mock_vm("srv01", vm_def)
|
|
-
|
|
- pool_mock = MagicMock()
|
|
- pool_mock.storageVolLookupByName.return_value.info.return_value = [
|
|
- 0,
|
|
- 1234567,
|
|
- 12345,
|
|
- ]
|
|
- pool_mock.storageVolLookupByName.return_value.XMLDesc.side_effect = [
|
|
- "<volume />",
|
|
- """
|
|
- <volume>
|
|
- <backingStore>
|
|
- <path>/var/lib/libvirt/images/vol01</path>
|
|
- <format type="qcow2"/>
|
|
- </backingStore>
|
|
- </volume>""",
|
|
- ]
|
|
- self.mock_conn.storagePoolLookupByName.return_value = pool_mock
|
|
-
|
|
- self.assertDictEqual(
|
|
- virt.get_disks("srv01"),
|
|
- {
|
|
- "vda": {
|
|
- "type": "disk",
|
|
- "file": "default/srv01_system",
|
|
- "file format": "qcow2",
|
|
- "disk size": 12345,
|
|
- "virtual size": 1234567,
|
|
- },
|
|
- "vdb": {
|
|
- "type": "disk",
|
|
- "file": "default/srv01_data",
|
|
- "file format": "qcow2",
|
|
- "disk size": 12345,
|
|
- "virtual size": 1234567,
|
|
- "backing file": {
|
|
- "file": "/var/lib/libvirt/images/vol01",
|
|
- "file format": "qcow2",
|
|
- },
|
|
- },
|
|
- "vdc": {
|
|
- "type": "disk",
|
|
- "file": "default/vm05_system",
|
|
- "file format": "qcow2",
|
|
- "disk size": 12345,
|
|
- "virtual size": 1234567,
|
|
- "backing file": {
|
|
- "file": "/var/lib/libvirt/images/vm04_system.qcow2",
|
|
- "file format": "qcow2",
|
|
- "backing file": {
|
|
- "file": "/var/testsuite-data/disk-image-template.raw",
|
|
- "file format": "raw",
|
|
- },
|
|
- },
|
|
- },
|
|
- "hda": {
|
|
- "type": "cdrom",
|
|
- "file format": "raw",
|
|
- "file": "http://dev-srv.tf.local:80/pub/iso/myimage.iso?foo=bar&baz=flurb",
|
|
- },
|
|
- },
|
|
- )
|
|
--
|
|
2.29.2
|
|
|
|
|