1121 lines
44 KiB
Diff
1121 lines
44 KiB
Diff
|
From d260c5984d64fc8448a6adf8d5bf07ebb08e4126 Mon Sep 17 00:00:00 2001
|
||
|
From: Cedric Bosdonnat <cbosdonnat@suse.com>
|
||
|
Date: Mon, 5 Oct 2020 15:50:44 +0200
|
||
|
Subject: [PATCH] Xen disk fixes (#264)
|
||
|
|
||
|
* virt: convert volumes to disks for xen
|
||
|
|
||
|
The libvirt xen driver does not handle disk of 'volume' type. We thus
|
||
|
need to convert them into their equivalent using the 'file' or 'block'
|
||
|
type (issue #58333).
|
||
|
|
||
|
* Add pool and volume names to virt._get_all_volumes_paths
|
||
|
|
||
|
In order to avoid code duplication, extend the _get_all_volumes_path()
|
||
|
helper function to also provide the volume and pool names.
|
||
|
|
||
|
* virt.get_disk: show pools and volumes if possible
|
||
|
|
||
|
In some cases like Xen we have to change the volume disks into file or
|
||
|
block ones. Show pool/volumes informations in the virt.get_disk if
|
||
|
possible.
|
||
|
|
||
|
* virt: use the pool path in case the volume doesn't exist
|
||
|
|
||
|
When computing the volume path to generate the XML of a domain, the
|
||
|
volume may not exist yet. This happens typically during a virt.update
|
||
|
when generating the new XML to compare.
|
||
|
|
||
|
In such cases, use the pool target path to compute the volume path.
|
||
|
---
|
||
|
changelog/58333.fixed | 1 +
|
||
|
salt/modules/virt.py | 258 +++++++++++-------
|
||
|
salt/templates/virt/libvirt_disks.jinja | 12 +
|
||
|
salt/templates/virt/libvirt_domain.jinja | 17 +-
|
||
|
tests/pytests/unit/modules/virt/__init__.py | 0
|
||
|
tests/pytests/unit/modules/virt/conftest.py | 191 +++++++++++++
|
||
|
.../pytests/unit/modules/virt/test_domain.py | 256 +++++++++++++++++
|
||
|
.../pytests/unit/modules/virt/test_helpers.py | 11 +
|
||
|
tests/unit/modules/test_virt.py | 180 ++++--------
|
||
|
9 files changed, 698 insertions(+), 228 deletions(-)
|
||
|
create mode 100644 changelog/58333.fixed
|
||
|
create mode 100644 salt/templates/virt/libvirt_disks.jinja
|
||
|
create mode 100644 tests/pytests/unit/modules/virt/__init__.py
|
||
|
create mode 100644 tests/pytests/unit/modules/virt/conftest.py
|
||
|
create mode 100644 tests/pytests/unit/modules/virt/test_domain.py
|
||
|
create mode 100644 tests/pytests/unit/modules/virt/test_helpers.py
|
||
|
|
||
|
diff --git a/changelog/58333.fixed b/changelog/58333.fixed
|
||
|
new file mode 100644
|
||
|
index 0000000000..f958d40964
|
||
|
--- /dev/null
|
||
|
+++ b/changelog/58333.fixed
|
||
|
@@ -0,0 +1 @@
|
||
|
+Convert disks of volume type to file or block disks on Xen
|
||
|
diff --git a/salt/modules/virt.py b/salt/modules/virt.py
|
||
|
index 4a8a55ced6..34643787f9 100644
|
||
|
--- a/salt/modules/virt.py
|
||
|
+++ b/salt/modules/virt.py
|
||
|
@@ -459,6 +459,8 @@ def _get_disks(conn, dom):
|
||
|
"""
|
||
|
disks = {}
|
||
|
doc = ElementTree.fromstring(dom.XMLDesc(0))
|
||
|
+ # Get the path, pool, volume name of each volume we can
|
||
|
+ all_volumes = _get_all_volumes_paths(conn)
|
||
|
for elem in doc.findall("devices/disk"):
|
||
|
source = elem.find("source")
|
||
|
if source is None:
|
||
|
@@ -471,13 +473,61 @@ def _get_disks(conn, dom):
|
||
|
extra_properties = None
|
||
|
if "dev" in target.attrib:
|
||
|
disk_type = elem.get("type")
|
||
|
+
|
||
|
+ def _get_disk_volume_data(pool_name, volume_name):
|
||
|
+ qemu_target = "{}/{}".format(pool_name, volume_name)
|
||
|
+ pool = conn.storagePoolLookupByName(pool_name)
|
||
|
+ vol = pool.storageVolLookupByName(volume_name)
|
||
|
+ vol_info = vol.info()
|
||
|
+ extra_properties = {
|
||
|
+ "virtual size": vol_info[1],
|
||
|
+ "disk size": vol_info[2],
|
||
|
+ }
|
||
|
+
|
||
|
+ backing_files = [
|
||
|
+ {
|
||
|
+ "file": node.find("source").get("file"),
|
||
|
+ "file format": node.find("format").get("type"),
|
||
|
+ }
|
||
|
+ for node in elem.findall(".//backingStore[source]")
|
||
|
+ ]
|
||
|
+
|
||
|
+ if backing_files:
|
||
|
+ # We had the backing files in a flat list, nest them again.
|
||
|
+ extra_properties["backing file"] = backing_files[0]
|
||
|
+ parent = extra_properties["backing file"]
|
||
|
+ for sub_backing_file in backing_files[1:]:
|
||
|
+ parent["backing file"] = sub_backing_file
|
||
|
+ parent = sub_backing_file
|
||
|
+
|
||
|
+ else:
|
||
|
+ # In some cases the backing chain is not displayed by the domain definition
|
||
|
+ # Try to see if we have some of it in the volume definition.
|
||
|
+ vol_desc = ElementTree.fromstring(vol.XMLDesc())
|
||
|
+ backing_path = vol_desc.find("./backingStore/path")
|
||
|
+ backing_format = vol_desc.find("./backingStore/format")
|
||
|
+ if backing_path is not None:
|
||
|
+ extra_properties["backing file"] = {"file": backing_path.text}
|
||
|
+ if backing_format is not None:
|
||
|
+ extra_properties["backing file"][
|
||
|
+ "file format"
|
||
|
+ ] = backing_format.get("type")
|
||
|
+ return (qemu_target, extra_properties)
|
||
|
+
|
||
|
if disk_type == "file":
|
||
|
qemu_target = source.get("file", "")
|
||
|
if qemu_target.startswith("/dev/zvol/"):
|
||
|
disks[target.get("dev")] = {"file": qemu_target, "zfs": True}
|
||
|
continue
|
||
|
- # Extract disk sizes, snapshots, backing files
|
||
|
- if elem.get("device", "disk") != "cdrom":
|
||
|
+
|
||
|
+ if qemu_target in all_volumes.keys():
|
||
|
+ # If the qemu_target is a known path, output a volume
|
||
|
+ volume = all_volumes[qemu_target]
|
||
|
+ qemu_target, extra_properties = _get_disk_volume_data(
|
||
|
+ volume["pool"], volume["name"]
|
||
|
+ )
|
||
|
+ elif elem.get("device", "disk") != "cdrom":
|
||
|
+ # Extract disk sizes, snapshots, backing files
|
||
|
try:
|
||
|
stdout = subprocess.Popen(
|
||
|
[
|
||
|
@@ -499,6 +549,12 @@ def _get_disks(conn, dom):
|
||
|
disk.update({"file": "Does not exist"})
|
||
|
elif disk_type == "block":
|
||
|
qemu_target = source.get("dev", "")
|
||
|
+ # If the qemu_target is a known path, output a volume
|
||
|
+ if qemu_target in all_volumes.keys():
|
||
|
+ volume = all_volumes[qemu_target]
|
||
|
+ qemu_target, extra_properties = _get_disk_volume_data(
|
||
|
+ volume["pool"], volume["name"]
|
||
|
+ )
|
||
|
elif disk_type == "network":
|
||
|
qemu_target = source.get("protocol")
|
||
|
source_name = source.get("name")
|
||
|
@@ -537,43 +593,9 @@ def _get_disks(conn, dom):
|
||
|
elif disk_type == "volume":
|
||
|
pool_name = source.get("pool")
|
||
|
volume_name = source.get("volume")
|
||
|
- qemu_target = "{}/{}".format(pool_name, volume_name)
|
||
|
- pool = conn.storagePoolLookupByName(pool_name)
|
||
|
- vol = pool.storageVolLookupByName(volume_name)
|
||
|
- vol_info = vol.info()
|
||
|
- extra_properties = {
|
||
|
- "virtual size": vol_info[1],
|
||
|
- "disk size": vol_info[2],
|
||
|
- }
|
||
|
-
|
||
|
- backing_files = [
|
||
|
- {
|
||
|
- "file": node.find("source").get("file"),
|
||
|
- "file format": node.find("format").get("type"),
|
||
|
- }
|
||
|
- for node in elem.findall(".//backingStore[source]")
|
||
|
- ]
|
||
|
-
|
||
|
- if backing_files:
|
||
|
- # We had the backing files in a flat list, nest them again.
|
||
|
- extra_properties["backing file"] = backing_files[0]
|
||
|
- parent = extra_properties["backing file"]
|
||
|
- for sub_backing_file in backing_files[1:]:
|
||
|
- parent["backing file"] = sub_backing_file
|
||
|
- parent = sub_backing_file
|
||
|
-
|
||
|
- else:
|
||
|
- # In some cases the backing chain is not displayed by the domain definition
|
||
|
- # Try to see if we have some of it in the volume definition.
|
||
|
- vol_desc = ElementTree.fromstring(vol.XMLDesc())
|
||
|
- backing_path = vol_desc.find("./backingStore/path")
|
||
|
- backing_format = vol_desc.find("./backingStore/format")
|
||
|
- if backing_path is not None:
|
||
|
- extra_properties["backing file"] = {"file": backing_path.text}
|
||
|
- if backing_format is not None:
|
||
|
- extra_properties["backing file"][
|
||
|
- "file format"
|
||
|
- ] = backing_format.get("type")
|
||
|
+ qemu_target, extra_properties = _get_disk_volume_data(
|
||
|
+ pool_name, volume_name
|
||
|
+ )
|
||
|
|
||
|
if not qemu_target:
|
||
|
continue
|
||
|
@@ -636,6 +658,73 @@ def _get_target(target, ssh):
|
||
|
return " {}://{}/{}".format(proto, target, "system")
|
||
|
|
||
|
|
||
|
+def _get_volume_path(pool, volume_name):
|
||
|
+ """
|
||
|
+ Get the path to a volume. If the volume doesn't exist, compute its path from the pool one.
|
||
|
+ """
|
||
|
+ if volume_name in pool.listVolumes():
|
||
|
+ volume = pool.storageVolLookupByName(volume_name)
|
||
|
+ volume_xml = ElementTree.fromstring(volume.XMLDesc())
|
||
|
+ return volume_xml.find("./target/path").text
|
||
|
+
|
||
|
+ # Get the path from the pool if the volume doesn't exist yet
|
||
|
+ pool_xml = ElementTree.fromstring(pool.XMLDesc())
|
||
|
+ pool_path = pool_xml.find("./target/path").text
|
||
|
+ return pool_path + "/" + volume_name
|
||
|
+
|
||
|
+
|
||
|
+def _disk_from_pool(conn, pool, pool_xml, volume_name):
|
||
|
+ """
|
||
|
+ Create a disk definition out of the pool XML and volume name.
|
||
|
+ The aim of this function is to replace the volume-based definition when not handled by libvirt.
|
||
|
+ It returns the disk Jinja context to be used when creating the VM
|
||
|
+ """
|
||
|
+ pool_type = pool_xml.get("type")
|
||
|
+ disk_context = {}
|
||
|
+
|
||
|
+ # handle dir, fs and netfs
|
||
|
+ if pool_type in ["dir", "netfs", "fs"]:
|
||
|
+ disk_context["type"] = "file"
|
||
|
+ disk_context["source_file"] = _get_volume_path(pool, volume_name)
|
||
|
+
|
||
|
+ elif pool_type in ["logical", "disk", "iscsi", "scsi"]:
|
||
|
+ disk_context["type"] = "block"
|
||
|
+ disk_context["format"] = "raw"
|
||
|
+ disk_context["source_file"] = _get_volume_path(pool, volume_name)
|
||
|
+
|
||
|
+ elif pool_type in ["rbd", "gluster", "sheepdog"]:
|
||
|
+ # libvirt can't handle rbd, gluster and sheepdog as volumes
|
||
|
+ disk_context["type"] = "network"
|
||
|
+ disk_context["protocol"] = pool_type
|
||
|
+ # Copy the hosts from the pool definition
|
||
|
+ disk_context["hosts"] = [
|
||
|
+ {"name": host.get("name"), "port": host.get("port")}
|
||
|
+ for host in pool_xml.findall(".//host")
|
||
|
+ ]
|
||
|
+ dir_node = pool_xml.find("./source/dir")
|
||
|
+ # Gluster and RBD need pool/volume name
|
||
|
+ name_node = pool_xml.find("./source/name")
|
||
|
+ if name_node is not None:
|
||
|
+ disk_context["volume"] = "{}/{}".format(name_node.text, volume_name)
|
||
|
+ # Copy the authentication if any for RBD
|
||
|
+ auth_node = pool_xml.find("./source/auth")
|
||
|
+ if auth_node is not None:
|
||
|
+ username = auth_node.get("username")
|
||
|
+ secret_node = auth_node.find("./secret")
|
||
|
+ usage = secret_node.get("usage")
|
||
|
+ if not usage:
|
||
|
+ # Get the usage from the UUID
|
||
|
+ uuid = secret_node.get("uuid")
|
||
|
+ usage = conn.secretLookupByUUIDString(uuid).usageID()
|
||
|
+ disk_context["auth"] = {
|
||
|
+ "type": "ceph",
|
||
|
+ "username": username,
|
||
|
+ "usage": usage,
|
||
|
+ }
|
||
|
+
|
||
|
+ return disk_context
|
||
|
+
|
||
|
+
|
||
|
def _gen_xml(
|
||
|
conn,
|
||
|
name,
|
||
|
@@ -741,41 +830,16 @@ def _gen_xml(
|
||
|
elif disk.get("pool"):
|
||
|
disk_context["volume"] = disk["filename"]
|
||
|
# If we had no source_file, then we want a volume
|
||
|
- pool_xml = ElementTree.fromstring(
|
||
|
- conn.storagePoolLookupByName(disk["pool"]).XMLDesc()
|
||
|
- )
|
||
|
+ pool = conn.storagePoolLookupByName(disk["pool"])
|
||
|
+ pool_xml = ElementTree.fromstring(pool.XMLDesc())
|
||
|
pool_type = pool_xml.get("type")
|
||
|
- if pool_type in ["rbd", "gluster", "sheepdog"]:
|
||
|
- # libvirt can't handle rbd, gluster and sheepdog as volumes
|
||
|
- disk_context["type"] = "network"
|
||
|
- disk_context["protocol"] = pool_type
|
||
|
- # Copy the hosts from the pool definition
|
||
|
- disk_context["hosts"] = [
|
||
|
- {"name": host.get("name"), "port": host.get("port")}
|
||
|
- for host in pool_xml.findall(".//host")
|
||
|
- ]
|
||
|
- dir_node = pool_xml.find("./source/dir")
|
||
|
- # Gluster and RBD need pool/volume name
|
||
|
- name_node = pool_xml.find("./source/name")
|
||
|
- if name_node is not None:
|
||
|
- disk_context["volume"] = "{}/{}".format(
|
||
|
- name_node.text, disk_context["volume"]
|
||
|
- )
|
||
|
- # Copy the authentication if any for RBD
|
||
|
- auth_node = pool_xml.find("./source/auth")
|
||
|
- if auth_node is not None:
|
||
|
- username = auth_node.get("username")
|
||
|
- secret_node = auth_node.find("./secret")
|
||
|
- usage = secret_node.get("usage")
|
||
|
- if not usage:
|
||
|
- # Get the usage from the UUID
|
||
|
- uuid = secret_node.get("uuid")
|
||
|
- usage = conn.secretLookupByUUIDString(uuid).usageID()
|
||
|
- disk_context["auth"] = {
|
||
|
- "type": "ceph",
|
||
|
- "username": username,
|
||
|
- "usage": usage,
|
||
|
- }
|
||
|
+
|
||
|
+ # For Xen VMs convert all pool types (issue #58333)
|
||
|
+ if hypervisor == "xen" or pool_type in ["rbd", "gluster", "sheepdog"]:
|
||
|
+ disk_context.update(
|
||
|
+ _disk_from_pool(conn, pool, pool_xml, disk_context["volume"])
|
||
|
+ )
|
||
|
+
|
||
|
else:
|
||
|
if pool_type in ["disk", "logical"]:
|
||
|
# The volume format for these types doesn't match the driver format in the VM
|
||
|
@@ -3981,7 +4045,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
|
||
|
directories.add(os.path.dirname(disks[disk]["file"]))
|
||
|
else:
|
||
|
# We may have a volume to delete here
|
||
|
- matcher = re.match("^(?P<pool>[^/]+)/(?P<volume>.*)$", disks[disk]["file"])
|
||
|
+ matcher = re.match("^(?P<pool>[^/]+)/(?P<volume>.*)$", disks[disk]["file"],)
|
||
|
if matcher:
|
||
|
pool_name = matcher.group("pool")
|
||
|
pool = None
|
||
|
@@ -6499,29 +6563,33 @@ def _is_valid_volume(vol):
|
||
|
|
||
|
def _get_all_volumes_paths(conn):
|
||
|
"""
|
||
|
- Extract the path and backing stores path of all volumes.
|
||
|
+ Extract the path, name, pool name and backing stores path of all volumes.
|
||
|
|
||
|
:param conn: libvirt connection to use
|
||
|
"""
|
||
|
- volumes = [
|
||
|
- vol
|
||
|
- for l in [
|
||
|
- obj.listAllVolumes()
|
||
|
- for obj in conn.listAllStoragePools()
|
||
|
- if obj.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING
|
||
|
- ]
|
||
|
- for vol in l
|
||
|
+ pools = [
|
||
|
+ pool
|
||
|
+ for pool in conn.listAllStoragePools()
|
||
|
+ if pool.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING
|
||
|
]
|
||
|
- return {
|
||
|
- vol.path(): [
|
||
|
- path.text
|
||
|
- for path in ElementTree.fromstring(vol.XMLDesc()).findall(
|
||
|
- ".//backingStore/path"
|
||
|
- )
|
||
|
- ]
|
||
|
- for vol in volumes
|
||
|
- if _is_valid_volume(vol)
|
||
|
- }
|
||
|
+ volumes = {}
|
||
|
+ for pool in pools:
|
||
|
+ pool_volumes = {
|
||
|
+ volume.path(): {
|
||
|
+ "pool": pool.name(),
|
||
|
+ "name": volume.name(),
|
||
|
+ "backing_stores": [
|
||
|
+ path.text
|
||
|
+ for path in ElementTree.fromstring(volume.XMLDesc()).findall(
|
||
|
+ ".//backingStore/path"
|
||
|
+ )
|
||
|
+ ],
|
||
|
+ }
|
||
|
+ for volume in pool.listAllVolumes()
|
||
|
+ if _is_valid_volume(volume)
|
||
|
+ }
|
||
|
+ volumes.update(pool_volumes)
|
||
|
+ return volumes
|
||
|
|
||
|
|
||
|
def volume_infos(pool=None, volume=None, **kwargs):
|
||
|
@@ -6592,8 +6660,8 @@ def volume_infos(pool=None, volume=None, **kwargs):
|
||
|
if vol.path():
|
||
|
as_backing_store = {
|
||
|
path
|
||
|
- for (path, all_paths) in six.iteritems(backing_stores)
|
||
|
- if vol.path() in all_paths
|
||
|
+ for (path, volume) in six.iteritems(backing_stores)
|
||
|
+ if vol.path() in volume.get("backing_stores")
|
||
|
}
|
||
|
used_by = [
|
||
|
vm_name
|
||
|
diff --git a/salt/templates/virt/libvirt_disks.jinja b/salt/templates/virt/libvirt_disks.jinja
|
||
|
new file mode 100644
|
||
|
index 0000000000..38f836afbb
|
||
|
--- /dev/null
|
||
|
+++ b/salt/templates/virt/libvirt_disks.jinja
|
||
|
@@ -0,0 +1,12 @@
|
||
|
+{% macro network_source(disk) -%}
|
||
|
+ <source protocol='{{ disk.protocol }}' name='{{ disk.volume }}'{% if disk.get('query') %} query='{{ disk.query }}'{% endif %}>
|
||
|
+ {%- for host in disk.get('hosts') %}
|
||
|
+ <host name='{{ host.name }}'{% if host.get("port") %} port='{{ host.port }}'{% endif %}/>
|
||
|
+ {%- endfor %}
|
||
|
+ {%- if disk.get("auth") %}
|
||
|
+ <auth username='{{ disk.auth.username }}'>
|
||
|
+ <secret type='{{ disk.auth.type }}' usage='{{ disk.auth.usage}}'/>
|
||
|
+ </auth>
|
||
|
+ {%- endif %}
|
||
|
+ </source>
|
||
|
+{%- endmacro %}
|
||
|
diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja
|
||
|
index 04a61ffa78..18728a75b5 100644
|
||
|
--- a/salt/templates/virt/libvirt_domain.jinja
|
||
|
+++ b/salt/templates/virt/libvirt_domain.jinja
|
||
|
@@ -1,3 +1,4 @@
|
||
|
+{%- import 'libvirt_disks.jinja' as libvirt_disks -%}
|
||
|
<domain type='{{ hypervisor }}'>
|
||
|
<name>{{ name }}</name>
|
||
|
<vcpu>{{ cpu }}</vcpu>
|
||
|
@@ -32,21 +33,13 @@
|
||
|
{% if disk.type == 'file' and 'source_file' in disk -%}
|
||
|
<source file='{{ disk.source_file }}' />
|
||
|
{% endif %}
|
||
|
+ {% if disk.type == 'block' -%}
|
||
|
+ <source dev='{{ disk.source_file }}' />
|
||
|
+ {% endif %}
|
||
|
{% if disk.type == 'volume' and 'pool' in disk -%}
|
||
|
<source pool='{{ disk.pool }}' volume='{{ disk.volume }}' />
|
||
|
{% endif %}
|
||
|
- {%- if disk.type == 'network' %}
|
||
|
- <source protocol='{{ disk.protocol }}' name='{{ disk.volume }}'{% if disk.get('query') %} query='{{ disk.query }}'{% endif %}>
|
||
|
- {%- for host in disk.get('hosts') %}
|
||
|
- <host name='{{ host.name }}'{% if host.get("port") %} port='{{ host.port }}'{% endif %}/>
|
||
|
- {%- endfor %}
|
||
|
- {%- if disk.get("auth") %}
|
||
|
- <auth username='{{ disk.auth.username }}'>
|
||
|
- <secret type='{{ disk.auth.type }}' usage='{{ disk.auth.usage}}'/>
|
||
|
- </auth>
|
||
|
- {%- endif %}
|
||
|
- </source>
|
||
|
- {%- endif %}
|
||
|
+ {%- if disk.type == 'network' %}{{ libvirt_disks.network_source(disk) }}{%- endif %}
|
||
|
<target dev='{{ disk.target_dev }}' bus='{{ disk.disk_bus }}' />
|
||
|
{% if disk.address -%}
|
||
|
<address type='drive' controller='0' bus='0' target='0' unit='{{ disk.index }}' />
|
||
|
diff --git a/tests/pytests/unit/modules/virt/__init__.py b/tests/pytests/unit/modules/virt/__init__.py
|
||
|
new file mode 100644
|
||
|
index 0000000000..e69de29bb2
|
||
|
diff --git a/tests/pytests/unit/modules/virt/conftest.py b/tests/pytests/unit/modules/virt/conftest.py
|
||
|
new file mode 100644
|
||
|
index 0000000000..1c32ae12eb
|
||
|
--- /dev/null
|
||
|
+++ b/tests/pytests/unit/modules/virt/conftest.py
|
||
|
@@ -0,0 +1,191 @@
|
||
|
+import pytest
|
||
|
+import salt.modules.config as config
|
||
|
+import salt.modules.virt as virt
|
||
|
+from salt._compat import ElementTree as ET
|
||
|
+from tests.support.mock import MagicMock
|
||
|
+
|
||
|
+
|
||
|
+class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors
|
||
|
+ """
|
||
|
+ Libvirt library mock
|
||
|
+ """
|
||
|
+
|
||
|
+ class virDomain(MagicMock):
|
||
|
+ """
|
||
|
+ virDomain mock
|
||
|
+ """
|
||
|
+
|
||
|
+ class libvirtError(Exception):
|
||
|
+ """
|
||
|
+ libvirtError mock
|
||
|
+ """
|
||
|
+
|
||
|
+ def __init__(self, msg):
|
||
|
+ super().__init__(msg)
|
||
|
+ self.msg = msg
|
||
|
+
|
||
|
+ def get_error_message(self):
|
||
|
+ return self.msg
|
||
|
+
|
||
|
+
|
||
|
+class MappedResultMock(MagicMock):
|
||
|
+ """
|
||
|
+ Mock class consistently return the same mock object based on the first argument.
|
||
|
+ """
|
||
|
+
|
||
|
+ _instances = {}
|
||
|
+
|
||
|
+ def __init__(self):
|
||
|
+ def mapped_results(*args, **kwargs):
|
||
|
+ if args[0] not in self._instances.keys():
|
||
|
+ raise virt.libvirt.libvirtError("Not found: {}".format(args[0]))
|
||
|
+ return self._instances[args[0]]
|
||
|
+
|
||
|
+ super().__init__(side_effect=mapped_results)
|
||
|
+
|
||
|
+ def add(self, name):
|
||
|
+ self._instances[name] = MagicMock()
|
||
|
+
|
||
|
+
|
||
|
+@pytest.fixture(autouse=True)
|
||
|
+def setup_loader(request):
|
||
|
+ # Create libvirt mock and connection mock
|
||
|
+ mock_libvirt = LibvirtMock()
|
||
|
+ mock_conn = MagicMock()
|
||
|
+ mock_conn.getStoragePoolCapabilities.return_value = "<storagepoolCapabilities/>"
|
||
|
+
|
||
|
+ mock_libvirt.openAuth.return_value = mock_conn
|
||
|
+ setup_loader_modules = {
|
||
|
+ virt: {
|
||
|
+ "libvirt": mock_libvirt,
|
||
|
+ "__salt__": {"config.get": config.get, "config.option": config.option},
|
||
|
+ },
|
||
|
+ config: {},
|
||
|
+ }
|
||
|
+ with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock:
|
||
|
+ yield loader_mock
|
||
|
+
|
||
|
+
|
||
|
+@pytest.fixture
|
||
|
+def make_mock_vm():
|
||
|
+ def _make_mock_vm(xml_def):
|
||
|
+ mocked_conn = virt.libvirt.openAuth.return_value
|
||
|
+
|
||
|
+ doc = ET.fromstring(xml_def)
|
||
|
+ name = doc.find("name").text
|
||
|
+ os_type = "hvm"
|
||
|
+ os_type_node = doc.find("os/type")
|
||
|
+ if os_type_node is not None:
|
||
|
+ os_type = os_type_node.text
|
||
|
+
|
||
|
+ mocked_conn.listDefinedDomains.return_value = [name]
|
||
|
+
|
||
|
+ # Configure the mocked domain
|
||
|
+ domain_mock = virt.libvirt.virDomain()
|
||
|
+ if not isinstance(mocked_conn.lookupByName, MappedResultMock):
|
||
|
+ mocked_conn.lookupByName = MappedResultMock()
|
||
|
+ mocked_conn.lookupByName.add(name)
|
||
|
+ domain_mock = mocked_conn.lookupByName(name)
|
||
|
+ domain_mock.XMLDesc.return_value = xml_def
|
||
|
+ domain_mock.OSType.return_value = os_type
|
||
|
+
|
||
|
+ # Return state as shutdown
|
||
|
+ domain_mock.info.return_value = [
|
||
|
+ 4,
|
||
|
+ 2048 * 1024,
|
||
|
+ 1024 * 1024,
|
||
|
+ 2,
|
||
|
+ 1234,
|
||
|
+ ]
|
||
|
+ domain_mock.ID.return_value = 1
|
||
|
+ domain_mock.name.return_value = name
|
||
|
+
|
||
|
+ domain_mock.attachDevice.return_value = 0
|
||
|
+ domain_mock.detachDevice.return_value = 0
|
||
|
+
|
||
|
+ return domain_mock
|
||
|
+
|
||
|
+ return _make_mock_vm
|
||
|
+
|
||
|
+
|
||
|
+@pytest.fixture
|
||
|
+def make_mock_storage_pool():
|
||
|
+ def _make_mock_storage_pool(name, type, volumes):
|
||
|
+ mocked_conn = virt.libvirt.openAuth.return_value
|
||
|
+
|
||
|
+ # Append the pool name to the list of known mocked pools
|
||
|
+ all_pools = mocked_conn.listStoragePools.return_value
|
||
|
+ if not isinstance(all_pools, list):
|
||
|
+ all_pools = []
|
||
|
+ all_pools.append(name)
|
||
|
+ mocked_conn.listStoragePools.return_value = all_pools
|
||
|
+
|
||
|
+ # Ensure we have mapped results for the pools
|
||
|
+ if not isinstance(mocked_conn.storagePoolLookupByName, MappedResultMock):
|
||
|
+ mocked_conn.storagePoolLookupByName = MappedResultMock()
|
||
|
+
|
||
|
+ # Configure the pool
|
||
|
+ mocked_conn.storagePoolLookupByName.add(name)
|
||
|
+ mocked_pool = mocked_conn.storagePoolLookupByName(name)
|
||
|
+ source = ""
|
||
|
+ if type == "disk":
|
||
|
+ source = "<device path='/dev/{}'/>".format(name)
|
||
|
+ pool_path = "/path/to/{}".format(name)
|
||
|
+ mocked_pool.XMLDesc.return_value = """
|
||
|
+ <pool type='{}'>
|
||
|
+ <source>
|
||
|
+ {}
|
||
|
+ </source>
|
||
|
+ <target>
|
||
|
+ <path>{}</path>
|
||
|
+ </target>
|
||
|
+ </pool>
|
||
|
+ """.format(
|
||
|
+ type, source, pool_path
|
||
|
+ )
|
||
|
+ mocked_pool.name.return_value = name
|
||
|
+ mocked_pool.info.return_value = [
|
||
|
+ virt.libvirt.VIR_STORAGE_POOL_RUNNING,
|
||
|
+ ]
|
||
|
+
|
||
|
+ # Append the pool to the listAllStoragePools list
|
||
|
+ all_pools_obj = mocked_conn.listAllStoragePools.return_value
|
||
|
+ if not isinstance(all_pools_obj, list):
|
||
|
+ all_pools_obj = []
|
||
|
+ all_pools_obj.append(mocked_pool)
|
||
|
+ mocked_conn.listAllStoragePools.return_value = all_pools_obj
|
||
|
+
|
||
|
+ # Configure the volumes
|
||
|
+ if not isinstance(mocked_pool.storageVolLookupByName, MappedResultMock):
|
||
|
+ mocked_pool.storageVolLookupByName = MappedResultMock()
|
||
|
+ mocked_pool.listVolumes.return_value = volumes
|
||
|
+
|
||
|
+ all_volumes = []
|
||
|
+ for volume in volumes:
|
||
|
+ mocked_pool.storageVolLookupByName.add(volume)
|
||
|
+ mocked_vol = mocked_pool.storageVolLookupByName(volume)
|
||
|
+ vol_path = "{}/{}".format(pool_path, volume)
|
||
|
+ mocked_vol.XMLDesc.return_value = """
|
||
|
+ <volume>
|
||
|
+ <target>
|
||
|
+ <path>{}</path>
|
||
|
+ </target>
|
||
|
+ </volume>
|
||
|
+ """.format(
|
||
|
+ vol_path,
|
||
|
+ )
|
||
|
+ mocked_vol.path.return_value = vol_path
|
||
|
+ mocked_vol.name.return_value = volume
|
||
|
+
|
||
|
+ mocked_vol.info.return_value = [
|
||
|
+ 0,
|
||
|
+ 1234567,
|
||
|
+ 12345,
|
||
|
+ ]
|
||
|
+ all_volumes.append(mocked_vol)
|
||
|
+
|
||
|
+ # Set the listAllVolumes return_value
|
||
|
+ mocked_pool.listAllVolumes.return_value = all_volumes
|
||
|
+ return mocked_pool
|
||
|
+
|
||
|
+ return _make_mock_storage_pool
|
||
|
diff --git a/tests/pytests/unit/modules/virt/test_domain.py b/tests/pytests/unit/modules/virt/test_domain.py
|
||
|
new file mode 100644
|
||
|
index 0000000000..5f9b45ec9a
|
||
|
--- /dev/null
|
||
|
+++ b/tests/pytests/unit/modules/virt/test_domain.py
|
||
|
@@ -0,0 +1,256 @@
|
||
|
+import salt.modules.virt as virt
|
||
|
+from salt._compat import ElementTree as ET
|
||
|
+from tests.support.mock import MagicMock, patch
|
||
|
+
|
||
|
+from .test_helpers import append_to_XMLDesc
|
||
|
+
|
||
|
+
|
||
|
+def test_update_xen_disk_volumes(make_mock_vm, make_mock_storage_pool):
|
||
|
+ xml_def = """
|
||
|
+ <domain type='xen'>
|
||
|
+ <name>my_vm</name>
|
||
|
+ <memory unit='KiB'>524288</memory>
|
||
|
+ <currentMemory unit='KiB'>524288</currentMemory>
|
||
|
+ <vcpu placement='static'>1</vcpu>
|
||
|
+ <os>
|
||
|
+ <type arch='x86_64'>linux</type>
|
||
|
+ <kernel>/usr/lib/grub2/x86_64-xen/grub.xen</kernel>
|
||
|
+ </os>
|
||
|
+ <devices>
|
||
|
+ <disk type='file' device='disk'>
|
||
|
+ <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
+ <source file='/path/to/default/my_vm_system'/>
|
||
|
+ <target dev='xvda' bus='xen'/>
|
||
|
+ </disk>
|
||
|
+ <disk type='block' device='disk'>
|
||
|
+ <driver name='qemu' type='raw' cache='none' io='native'/>
|
||
|
+ <source dev='/path/to/my-iscsi/unit:0:0:1'/>
|
||
|
+ <target dev='xvdb' bus='xen'/>
|
||
|
+ </disk>
|
||
|
+ <controller type='xenbus' index='0'/>
|
||
|
+ </devices>
|
||
|
+ </domain>"""
|
||
|
+ domain_mock = make_mock_vm(xml_def)
|
||
|
+ make_mock_storage_pool("default", "dir", ["my_vm_system"])
|
||
|
+ make_mock_storage_pool("my-iscsi", "iscsi", ["unit:0:0:1"])
|
||
|
+ make_mock_storage_pool("vdb", "disk", ["vdb1"])
|
||
|
+
|
||
|
+ ret = virt.update(
|
||
|
+ "my_vm",
|
||
|
+ disks=[
|
||
|
+ {"name": "system", "pool": "default"},
|
||
|
+ {"name": "iscsi-data", "pool": "my-iscsi", "source_file": "unit:0:0:1"},
|
||
|
+ {"name": "vdb-data", "pool": "vdb", "source_file": "vdb1"},
|
||
|
+ {"name": "file-data", "pool": "default", "size": "10240"},
|
||
|
+ ],
|
||
|
+ )
|
||
|
+
|
||
|
+ assert ret["definition"]
|
||
|
+ define_mock = virt.libvirt.openAuth().defineXML
|
||
|
+ setxml = ET.fromstring(define_mock.call_args[0][0])
|
||
|
+ assert "block" == setxml.find(".//disk[3]").get("type")
|
||
|
+ assert "/path/to/vdb/vdb1" == setxml.find(".//disk[3]/source").get("dev")
|
||
|
+
|
||
|
+ # Note that my_vm-file-data was not an existing volume before the update
|
||
|
+ assert "file" == setxml.find(".//disk[4]").get("type")
|
||
|
+ assert "/path/to/default/my_vm_file-data" == setxml.find(".//disk[4]/source").get(
|
||
|
+ "file"
|
||
|
+ )
|
||
|
+
|
||
|
+
|
||
|
+def test_get_disks(make_mock_vm, make_mock_storage_pool):
|
||
|
+ # test with volumes
|
||
|
+ vm_def = """<domain type='kvm' id='3'>
|
||
|
+ <name>srv01</name>
|
||
|
+ <devices>
|
||
|
+ <disk type='volume' device='disk'>
|
||
|
+ <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
+ <source pool='default' volume='srv01_system'/>
|
||
|
+ <backingStore/>
|
||
|
+ <target dev='vda' bus='virtio'/>
|
||
|
+ <alias name='virtio-disk0'/>
|
||
|
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
|
||
|
+ </disk>
|
||
|
+ <disk type='volume' device='disk'>
|
||
|
+ <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
+ <source pool='default' volume='srv01_data'/>
|
||
|
+ <backingStore type='file' index='1'>
|
||
|
+ <format type='qcow2'/>
|
||
|
+ <source file='/var/lib/libvirt/images/vol01'/>
|
||
|
+ <backingStore/>
|
||
|
+ </backingStore>
|
||
|
+ <target dev='vdb' bus='virtio'/>
|
||
|
+ <alias name='virtio-disk1'/>
|
||
|
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
|
||
|
+ </disk>
|
||
|
+ <disk type='volume' device='disk'>
|
||
|
+ <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
+ <source pool='default' volume='vm05_system'/>
|
||
|
+ <backingStore type='file' index='1'>
|
||
|
+ <format type='qcow2'/>
|
||
|
+ <source file='/var/lib/libvirt/images/vm04_system.qcow2'/>
|
||
|
+ <backingStore type='file' index='2'>
|
||
|
+ <format type='raw'/>
|
||
|
+ <source file='/var/testsuite-data/disk-image-template.raw'/>
|
||
|
+ <backingStore/>
|
||
|
+ </backingStore>
|
||
|
+ </backingStore>
|
||
|
+ <target dev='vdc' bus='virtio'/>
|
||
|
+ <alias name='virtio-disk0'/>
|
||
|
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
|
||
|
+ </disk>
|
||
|
+ <disk type='network' device='cdrom'>
|
||
|
+ <driver name='qemu' type='raw' cache='none' io='native'/>
|
||
|
+ <source protocol='http' name='/pub/iso/myimage.iso' query='foo=bar&baz=flurb' index='1'>
|
||
|
+ <host name='dev-srv.tf.local' port='80'/>
|
||
|
+ </source>
|
||
|
+ <target dev='hda' bus='ide'/>
|
||
|
+ <readonly/>
|
||
|
+ <alias name='ide0-0-0'/>
|
||
|
+ <address type='drive' controller='0' bus='0' target='0' unit='0'/>
|
||
|
+ </disk>
|
||
|
+ </devices>
|
||
|
+ </domain>
|
||
|
+ """
|
||
|
+ domain_mock = make_mock_vm(vm_def)
|
||
|
+
|
||
|
+ pool_mock = make_mock_storage_pool(
|
||
|
+ "default", "dir", ["srv01_system", "srv01_data", "vm05_system"]
|
||
|
+ )
|
||
|
+
|
||
|
+ # Append backing store to srv01_data volume XML description
|
||
|
+ srv1data_mock = pool_mock.storageVolLookupByName("srv01_data")
|
||
|
+ append_to_XMLDesc(
|
||
|
+ srv1data_mock,
|
||
|
+ """
|
||
|
+ <backingStore>
|
||
|
+ <path>/var/lib/libvirt/images/vol01</path>
|
||
|
+ <format type="qcow2"/>
|
||
|
+ </backingStore>""",
|
||
|
+ )
|
||
|
+
|
||
|
+ assert virt.get_disks("srv01") == {
|
||
|
+ "vda": {
|
||
|
+ "type": "disk",
|
||
|
+ "file": "default/srv01_system",
|
||
|
+ "file format": "qcow2",
|
||
|
+ "disk size": 12345,
|
||
|
+ "virtual size": 1234567,
|
||
|
+ },
|
||
|
+ "vdb": {
|
||
|
+ "type": "disk",
|
||
|
+ "file": "default/srv01_data",
|
||
|
+ "file format": "qcow2",
|
||
|
+ "disk size": 12345,
|
||
|
+ "virtual size": 1234567,
|
||
|
+ "backing file": {
|
||
|
+ "file": "/var/lib/libvirt/images/vol01",
|
||
|
+ "file format": "qcow2",
|
||
|
+ },
|
||
|
+ },
|
||
|
+ "vdc": {
|
||
|
+ "type": "disk",
|
||
|
+ "file": "default/vm05_system",
|
||
|
+ "file format": "qcow2",
|
||
|
+ "disk size": 12345,
|
||
|
+ "virtual size": 1234567,
|
||
|
+ "backing file": {
|
||
|
+ "file": "/var/lib/libvirt/images/vm04_system.qcow2",
|
||
|
+ "file format": "qcow2",
|
||
|
+ "backing file": {
|
||
|
+ "file": "/var/testsuite-data/disk-image-template.raw",
|
||
|
+ "file format": "raw",
|
||
|
+ },
|
||
|
+ },
|
||
|
+ },
|
||
|
+ "hda": {
|
||
|
+ "type": "cdrom",
|
||
|
+ "file format": "raw",
|
||
|
+ "file": "http://dev-srv.tf.local:80/pub/iso/myimage.iso?foo=bar&baz=flurb",
|
||
|
+ },
|
||
|
+ }
|
||
|
+
|
||
|
+
|
||
|
+def test_get_disk_convert_volumes(make_mock_vm, make_mock_storage_pool):
|
||
|
+ vm_def = """<domain type='kvm' id='3'>
|
||
|
+ <name>srv01</name>
|
||
|
+ <devices>
|
||
|
+ <disk type='file' device='disk'>
|
||
|
+ <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
+ <source file='/path/to/default/srv01_system'/>
|
||
|
+ <target dev='vda' bus='virtio'/>
|
||
|
+ <alias name='virtio-disk0'/>
|
||
|
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
|
||
|
+ </disk>
|
||
|
+ <disk type='block' device='disk'>
|
||
|
+ <driver name='qemu' type='raw'/>
|
||
|
+ <source dev='/path/to/default/srv01_data'/>
|
||
|
+ <target dev='vdb' bus='virtio'/>
|
||
|
+ <alias name='virtio-disk1'/>
|
||
|
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
|
||
|
+ </disk>
|
||
|
+ <disk type='file' device='disk'>
|
||
|
+ <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
+ <source file='/path/to/srv01_extra'/>
|
||
|
+ <target dev='vdc' bus='virtio'/>
|
||
|
+ <alias name='virtio-disk1'/>
|
||
|
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
|
||
|
+ </disk>
|
||
|
+ </devices>
|
||
|
+ </domain>
|
||
|
+ """
|
||
|
+ domain_mock = make_mock_vm(vm_def)
|
||
|
+
|
||
|
+ pool_mock = make_mock_storage_pool("default", "dir", ["srv01_system", "srv01_data"])
|
||
|
+
|
||
|
+ subprocess_mock = MagicMock()
|
||
|
+ popen_mock = MagicMock(spec=virt.subprocess.Popen)
|
||
|
+ popen_mock.return_value.communicate.return_value = [
|
||
|
+ """[
|
||
|
+ {
|
||
|
+ "virtual-size": 214748364800,
|
||
|
+ "filename": "/path/to/srv01_extra",
|
||
|
+ "cluster-size": 65536,
|
||
|
+ "format": "qcow2",
|
||
|
+ "actual-size": 340525056,
|
||
|
+ "format-specific": {
|
||
|
+ "type": "qcow2",
|
||
|
+ "data": {
|
||
|
+ "compat": "1.1",
|
||
|
+ "lazy-refcounts": false,
|
||
|
+ "refcount-bits": 16,
|
||
|
+ "corrupt": false
|
||
|
+ }
|
||
|
+ },
|
||
|
+ "dirty-flag": false
|
||
|
+ }
|
||
|
+ ]
|
||
|
+ """
|
||
|
+ ]
|
||
|
+ subprocess_mock.Popen = popen_mock
|
||
|
+
|
||
|
+ with patch.dict(virt.__dict__, {"subprocess": subprocess_mock}):
|
||
|
+ assert {
|
||
|
+ "vda": {
|
||
|
+ "type": "disk",
|
||
|
+ "file": "default/srv01_system",
|
||
|
+ "file format": "qcow2",
|
||
|
+ "disk size": 12345,
|
||
|
+ "virtual size": 1234567,
|
||
|
+ },
|
||
|
+ "vdb": {
|
||
|
+ "type": "disk",
|
||
|
+ "file": "default/srv01_data",
|
||
|
+ "file format": "raw",
|
||
|
+ "disk size": 12345,
|
||
|
+ "virtual size": 1234567,
|
||
|
+ },
|
||
|
+ "vdc": {
|
||
|
+ "type": "disk",
|
||
|
+ "file": "/path/to/srv01_extra",
|
||
|
+ "file format": "qcow2",
|
||
|
+ "cluster size": 65536,
|
||
|
+ "disk size": 340525056,
|
||
|
+ "virtual size": 214748364800,
|
||
|
+ },
|
||
|
+ } == virt.get_disks("srv01")
|
||
|
diff --git a/tests/pytests/unit/modules/virt/test_helpers.py b/tests/pytests/unit/modules/virt/test_helpers.py
|
||
|
new file mode 100644
|
||
|
index 0000000000..f64aee2821
|
||
|
--- /dev/null
|
||
|
+++ b/tests/pytests/unit/modules/virt/test_helpers.py
|
||
|
@@ -0,0 +1,11 @@
|
||
|
+from salt._compat import ElementTree as ET
|
||
|
+
|
||
|
+
|
||
|
+def append_to_XMLDesc(mocked, fragment):
|
||
|
+ """
|
||
|
+ Append an XML fragment at the end of the mocked XMLDesc return_value of mocked.
|
||
|
+ """
|
||
|
+ xml_doc = ET.fromstring(mocked.XMLDesc())
|
||
|
+ xml_fragment = ET.fromstring(fragment)
|
||
|
+ xml_doc.append(xml_fragment)
|
||
|
+ mocked.XMLDesc.return_value = ET.tostring(xml_doc)
|
||
|
diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py
|
||
|
index 27c4b9d1b0..6e61544a1f 100644
|
||
|
--- a/tests/unit/modules/test_virt.py
|
||
|
+++ b/tests/unit/modules/test_virt.py
|
||
|
@@ -1141,6 +1141,65 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
|
||
|
self.assertEqual("vdb2", source.attrib["volume"])
|
||
|
self.assertEqual("raw", disk.find("driver").get("type"))
|
||
|
|
||
|
+ def test_get_xml_volume_xen_dir(self):
|
||
|
+ """
|
||
|
+ Test virt._gen_xml generating disks for a Xen hypervisor
|
||
|
+ """
|
||
|
+ self.mock_conn.listStoragePools.return_value = ["default"]
|
||
|
+ pool_mock = MagicMock()
|
||
|
+ pool_mock.XMLDesc.return_value = (
|
||
|
+ "<pool type='dir'><target><path>/path/to/images</path></target></pool>"
|
||
|
+ )
|
||
|
+ volume_xml = "<volume><target><path>/path/to/images/hello_system</path></target></volume>"
|
||
|
+ pool_mock.storageVolLookupByName.return_value.XMLDesc.return_value = volume_xml
|
||
|
+ self.mock_conn.storagePoolLookupByName.return_value = pool_mock
|
||
|
+ diskp = virt._disk_profile(
|
||
|
+ self.mock_conn,
|
||
|
+ None,
|
||
|
+ "xen",
|
||
|
+ [{"name": "system", "pool": "default"}],
|
||
|
+ "hello",
|
||
|
+ )
|
||
|
+ xml_data = virt._gen_xml(
|
||
|
+ self.mock_conn, "hello", 1, 512, diskp, [], "xen", "hvm", "x86_64",
|
||
|
+ )
|
||
|
+ root = ET.fromstring(xml_data)
|
||
|
+ disk = root.findall(".//disk")[0]
|
||
|
+ self.assertEqual(disk.attrib["type"], "file")
|
||
|
+ self.assertEqual(
|
||
|
+ "/path/to/images/hello_system", disk.find("source").attrib["file"]
|
||
|
+ )
|
||
|
+
|
||
|
+ def test_get_xml_volume_xen_block(self):
|
||
|
+ """
|
||
|
+ Test virt._gen_xml generating disks for a Xen hypervisor
|
||
|
+ """
|
||
|
+ self.mock_conn.listStoragePools.return_value = ["default"]
|
||
|
+ pool_mock = MagicMock()
|
||
|
+ pool_mock.listVolumes.return_value = ["vol01"]
|
||
|
+ volume_xml = "<volume><target><path>/dev/to/vol01</path></target></volume>"
|
||
|
+ pool_mock.storageVolLookupByName.return_value.XMLDesc.return_value = volume_xml
|
||
|
+ self.mock_conn.storagePoolLookupByName.return_value = pool_mock
|
||
|
+
|
||
|
+ for pool_type in ["logical", "disk", "iscsi", "scsi"]:
|
||
|
+ pool_mock.XMLDesc.return_value = "<pool type='{}'><source><device path='/dev/sda'/></source></pool>".format(
|
||
|
+ pool_type
|
||
|
+ )
|
||
|
+ diskp = virt._disk_profile(
|
||
|
+ self.mock_conn,
|
||
|
+ None,
|
||
|
+ "xen",
|
||
|
+ [{"name": "system", "pool": "default", "source_file": "vol01"}],
|
||
|
+ "hello",
|
||
|
+ )
|
||
|
+ xml_data = virt._gen_xml(
|
||
|
+ self.mock_conn, "hello", 1, 512, diskp, [], "xen", "hvm", "x86_64",
|
||
|
+ )
|
||
|
+ root = ET.fromstring(xml_data)
|
||
|
+ disk = root.findall(".//disk")[0]
|
||
|
+ self.assertEqual(disk.attrib["type"], "block")
|
||
|
+ self.assertEqual("/dev/to/vol01", disk.find("source").attrib["dev"])
|
||
|
+
|
||
|
def test_gen_xml_cdrom(self):
|
||
|
"""
|
||
|
Test virt._gen_xml(), generating a cdrom device (different disk type, no source)
|
||
|
@@ -5503,124 +5562,3 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin):
|
||
|
"vol1.qcow2",
|
||
|
"/path/to/file",
|
||
|
)
|
||
|
-
|
||
|
- def test_get_disks(self):
|
||
|
- """
|
||
|
- Test the virt.get_disks function
|
||
|
- """
|
||
|
- # test with volumes
|
||
|
- vm_def = """<domain type='kvm' id='3'>
|
||
|
- <name>srv01</name>
|
||
|
- <devices>
|
||
|
- <disk type='volume' device='disk'>
|
||
|
- <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
- <source pool='default' volume='srv01_system'/>
|
||
|
- <backingStore/>
|
||
|
- <target dev='vda' bus='virtio'/>
|
||
|
- <alias name='virtio-disk0'/>
|
||
|
- <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
|
||
|
- </disk>
|
||
|
- <disk type='volume' device='disk'>
|
||
|
- <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
- <source pool='default' volume='srv01_data'/>
|
||
|
- <backingStore type='file' index='1'>
|
||
|
- <format type='qcow2'/>
|
||
|
- <source file='/var/lib/libvirt/images/vol01'/>
|
||
|
- <backingStore/>
|
||
|
- </backingStore>
|
||
|
- <target dev='vdb' bus='virtio'/>
|
||
|
- <alias name='virtio-disk1'/>
|
||
|
- <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
|
||
|
- </disk>
|
||
|
- <disk type='volume' device='disk'>
|
||
|
- <driver name='qemu' type='qcow2' cache='none' io='native'/>
|
||
|
- <source pool='default' volume='vm05_system'/>
|
||
|
- <backingStore type='file' index='1'>
|
||
|
- <format type='qcow2'/>
|
||
|
- <source file='/var/lib/libvirt/images/vm04_system.qcow2'/>
|
||
|
- <backingStore type='file' index='2'>
|
||
|
- <format type='raw'/>
|
||
|
- <source file='/var/testsuite-data/disk-image-template.raw'/>
|
||
|
- <backingStore/>
|
||
|
- </backingStore>
|
||
|
- </backingStore>
|
||
|
- <target dev='vdc' bus='virtio'/>
|
||
|
- <alias name='virtio-disk0'/>
|
||
|
- <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
|
||
|
- </disk>
|
||
|
- <disk type='network' device='cdrom'>
|
||
|
- <driver name='qemu' type='raw' cache='none' io='native'/>
|
||
|
- <source protocol='http' name='/pub/iso/myimage.iso' query='foo=bar&baz=flurb' index='1'>
|
||
|
- <host name='dev-srv.tf.local' port='80'/>
|
||
|
- </source>
|
||
|
- <target dev='hda' bus='ide'/>
|
||
|
- <readonly/>
|
||
|
- <alias name='ide0-0-0'/>
|
||
|
- <address type='drive' controller='0' bus='0' target='0' unit='0'/>
|
||
|
- </disk>
|
||
|
- </devices>
|
||
|
- </domain>
|
||
|
- """
|
||
|
- self.set_mock_vm("srv01", vm_def)
|
||
|
-
|
||
|
- pool_mock = MagicMock()
|
||
|
- pool_mock.storageVolLookupByName.return_value.info.return_value = [
|
||
|
- 0,
|
||
|
- 1234567,
|
||
|
- 12345,
|
||
|
- ]
|
||
|
- pool_mock.storageVolLookupByName.return_value.XMLDesc.side_effect = [
|
||
|
- "<volume />",
|
||
|
- """
|
||
|
- <volume>
|
||
|
- <backingStore>
|
||
|
- <path>/var/lib/libvirt/images/vol01</path>
|
||
|
- <format type="qcow2"/>
|
||
|
- </backingStore>
|
||
|
- </volume>""",
|
||
|
- ]
|
||
|
- self.mock_conn.storagePoolLookupByName.return_value = pool_mock
|
||
|
-
|
||
|
- self.assertDictEqual(
|
||
|
- virt.get_disks("srv01"),
|
||
|
- {
|
||
|
- "vda": {
|
||
|
- "type": "disk",
|
||
|
- "file": "default/srv01_system",
|
||
|
- "file format": "qcow2",
|
||
|
- "disk size": 12345,
|
||
|
- "virtual size": 1234567,
|
||
|
- },
|
||
|
- "vdb": {
|
||
|
- "type": "disk",
|
||
|
- "file": "default/srv01_data",
|
||
|
- "file format": "qcow2",
|
||
|
- "disk size": 12345,
|
||
|
- "virtual size": 1234567,
|
||
|
- "backing file": {
|
||
|
- "file": "/var/lib/libvirt/images/vol01",
|
||
|
- "file format": "qcow2",
|
||
|
- },
|
||
|
- },
|
||
|
- "vdc": {
|
||
|
- "type": "disk",
|
||
|
- "file": "default/vm05_system",
|
||
|
- "file format": "qcow2",
|
||
|
- "disk size": 12345,
|
||
|
- "virtual size": 1234567,
|
||
|
- "backing file": {
|
||
|
- "file": "/var/lib/libvirt/images/vm04_system.qcow2",
|
||
|
- "file format": "qcow2",
|
||
|
- "backing file": {
|
||
|
- "file": "/var/testsuite-data/disk-image-template.raw",
|
||
|
- "file format": "raw",
|
||
|
- },
|
||
|
- },
|
||
|
- },
|
||
|
- "hda": {
|
||
|
- "type": "cdrom",
|
||
|
- "file format": "raw",
|
||
|
- "file": "http://dev-srv.tf.local:80/pub/iso/myimage.iso?foo=bar&baz=flurb",
|
||
|
- },
|
||
|
- },
|
||
|
- )
|
||
|
--
|
||
|
2.28.0
|
||
|
|
||
|
|