diff --git a/_lastrevision b/_lastrevision index 392034d..b22e101 100644 --- a/_lastrevision +++ b/_lastrevision @@ -1 +1 @@ -8afd80f388d6e97b882f8564a8afa1acab63e014 \ No newline at end of file +3ce95a1b386927b6f8cb27d1a6421018bebccd9a \ No newline at end of file diff --git a/backport-a-few-virt-prs-272.patch b/backport-a-few-virt-prs-272.patch new file mode 100644 index 0000000..b29492c --- /dev/null +++ b/backport-a-few-virt-prs-272.patch @@ -0,0 +1,1272 @@ +From f45df684fe68a93a7003aca2189479b0d0240305 Mon Sep 17 00:00:00 2001 +From: Cedric Bosdonnat +Date: Mon, 5 Oct 2020 16:49:59 +0200 +Subject: [PATCH] Backport a few virt PRs (#272) + +* Fix virt update when cpu and memory are changed + +If CPU is changed, the memory change would be short circuited. This is a +regression introduced by PR #58332 + +* virt: add VM memory tunning support + +* avoid comparing string with integer + +* fix pre-commit failure + +* Properly fix memory setting regression in virt.update + +The 'mem' property in the virt.update value should indicate the result +of a live memory setting. The value should be an int in KiB. Fixing the +code and tests for this. + +* virt: add stop_on_reboot parameter in guest states and definition + +It can be needed to force a VM to stop instead of rebooting. A typical +example of this is when creating a VM using a install CDROM ISO or when +using an autoinstallation profile. Forcing a shutdown allows libvirt to +pick up another XML definition for the new start to remove the +firstboot-only options. + +* virt: expose live parameter in virt.defined state + +Allow updating the definition of a VM without touching the live +instance. This can be helpful since live update may change the device +names in the guest. + +Co-authored-by: firefly +Co-authored-by: gqlo +--- + changelog/57639.added | 1 + + changelog/58589.added | 1 + + salt/modules/virt.py | 303 +++++++++++++++++++-- + salt/states/virt.py | 73 +++++- + salt/templates/virt/libvirt_domain.jinja | 30 ++- + salt/utils/xmlutil.py | 4 +- + tests/unit/modules/test_virt.py | 320 ++++++++++++++++++++++- + tests/unit/states/test_virt.py | 19 +- + 8 files changed, 704 insertions(+), 47 deletions(-) + create mode 100644 changelog/57639.added + create mode 100644 changelog/58589.added + +diff --git a/changelog/57639.added b/changelog/57639.added +new file mode 100644 +index 0000000000..c0281e9319 +--- /dev/null ++++ b/changelog/57639.added +@@ -0,0 +1 @@ ++Memory Tuning Support which allows much greater control of memory allocation +diff --git a/changelog/58589.added b/changelog/58589.added +new file mode 100644 +index 0000000000..5960555ec6 +--- /dev/null ++++ b/changelog/58589.added +@@ -0,0 +1 @@ ++Allow handling special first boot definition on virtual machine +diff --git a/salt/modules/virt.py b/salt/modules/virt.py +index 34643787f9..87ab7ca12d 100644 +--- a/salt/modules/virt.py ++++ b/salt/modules/virt.py +@@ -71,13 +71,57 @@ The calls not using the libvirt connection setup are: + - `libvirt URI format `_ + - `libvirt authentication configuration `_ + ++Units ++========== ++.. _virt-units: ++.. rubric:: Units specification ++.. versionadded:: Magnesium ++ ++The string should contain a number optionally followed ++by a unit. The number may have a decimal fraction. If ++the unit is not given then MiB are set by default. ++Units can optionally be given in IEC style (such as MiB), ++although the standard single letter style (such as M) is ++more convenient. ++ ++Valid units include: ++ ++========== ===== ========== ========== ====== ++Standard IEC Standard IEC ++ Unit Unit Name Name Factor ++========== ===== ========== ========== ====== ++ B Bytes 1 ++ K KiB Kilobytes Kibibytes 2**10 ++ M MiB Megabytes Mebibytes 2**20 ++ G GiB Gigabytes Gibibytes 2**30 ++ T TiB Terabytes Tebibytes 2**40 ++ P PiB Petabytes Pebibytes 2**50 ++ E EiB Exabytes Exbibytes 2**60 ++ Z ZiB Zettabytes Zebibytes 2**70 ++ Y YiB Yottabytes Yobibytes 2**80 ++========== ===== ========== ========== ====== ++ ++Additional decimal based units: ++ ++====== ======= ++Unit Factor ++====== ======= ++KB 10**3 ++MB 10**6 ++GB 10**9 ++TB 10**12 ++PB 10**15 ++EB 10**18 ++ZB 10**21 ++YB 10**24 ++====== ======= + """ + # Special Thanks to Michael Dehann, many of the concepts, and a few structures + # of his in the virt func module have been used + +-# Import python libs + + import base64 ++import collections + import copy + import datetime + import logging +@@ -89,10 +133,8 @@ import subprocess + import sys + import time + +-# Import third party libs + import jinja2.exceptions + +-# Import salt libs + import salt.utils.data + import salt.utils.files + import salt.utils.json +@@ -725,6 +767,39 @@ def _disk_from_pool(conn, pool, pool_xml, volume_name): + return disk_context + + ++def _handle_unit(s, def_unit="m"): ++ """ ++ Handle the unit conversion, return the value in bytes ++ """ ++ m = re.match(r"(?P[0-9.]*)\s*(?P.*)$", six.text_type(s).strip()) ++ value = m.group("value") ++ # default unit ++ unit = m.group("unit").lower() or def_unit ++ try: ++ value = int(value) ++ except ValueError: ++ try: ++ value = float(value) ++ except ValueError: ++ raise SaltInvocationError("invalid number") ++ # flag for base ten ++ dec = False ++ if re.match(r"[kmgtpezy]b$", unit): ++ dec = True ++ elif not re.match(r"(b|[kmgtpezy](ib)?)$", unit): ++ raise SaltInvocationError("invalid units") ++ p = "bkmgtpezy".index(unit[0]) ++ value *= 10 ** (p * 3) if dec else 2 ** (p * 10) ++ return int(value) ++ ++ ++def nesthash(): ++ """ ++ create default dict that allows arbitrary level of nesting ++ """ ++ return collections.defaultdict(nesthash) ++ ++ + def _gen_xml( + conn, + name, +@@ -738,18 +813,32 @@ def _gen_xml( + graphics=None, + boot=None, + boot_dev=None, ++ stop_on_reboot=False, + **kwargs + ): + """ + Generate the XML string to define a libvirt VM + """ +- mem = int(mem) * 1024 # MB + context = { + "hypervisor": hypervisor, + "name": name, + "cpu": six.text_type(cpu), +- "mem": six.text_type(mem), ++ "on_reboot": "destroy" if stop_on_reboot else "restart", + } ++ ++ context["mem"] = nesthash() ++ if isinstance(mem, int): ++ mem = int(mem) * 1024 # MB ++ context["mem"]["boot"] = six.text_type(mem) ++ context["mem"]["current"] = six.text_type(mem) ++ elif isinstance(mem, dict): ++ for tag, val in six.iteritems(mem): ++ if val: ++ if tag == "slots": ++ context["mem"]["slots"] = "{}='{}'".format(tag, val) ++ else: ++ context["mem"][tag] = six.text_type(int(_handle_unit(val) / 1024)) ++ + if hypervisor in ["qemu", "kvm"]: + context["controller_model"] = False + elif hypervisor == "vmware": +@@ -869,7 +958,6 @@ def _gen_xml( + except jinja2.exceptions.TemplateNotFound: + log.error("Could not load template %s", fn_) + return "" +- + return template.render(**context) + + +@@ -1668,6 +1756,7 @@ def init( + arch=None, + boot=None, + boot_dev=None, ++ stop_on_reboot=False, + **kwargs + ): + """ +@@ -1675,7 +1764,28 @@ def init( + + :param name: name of the virtual machine to create + :param cpu: Number of virtual CPUs to assign to the virtual machine +- :param mem: Amount of memory to allocate to the virtual machine in MiB. ++ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to ++ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, ++ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The ++ structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. ++ Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be ++ an integer. ++ ++ .. code-block:: python ++ ++ { ++ 'boot': 1g, ++ 'current': 1g, ++ 'max': 1g, ++ 'slots': 10, ++ 'hard_limit': '1024' ++ 'soft_limit': '512m' ++ 'swap_hard_limit': '1g' ++ 'min_guarantee': '512mib' ++ } ++ ++ .. versionchanged:: Magnesium ++ + :param nic: NIC profile to use (Default: ``'default'``). + The profile interfaces can be customized / extended with the interfaces parameter. + If set to ``None``, no profile will be used. +@@ -1732,6 +1842,15 @@ def init( + :param password: password to connect with, overriding defaults + + .. versionadded:: 2019.2.0 ++ ++ :param stop_on_reboot: ++ If set to ``True`` the guest will stop instead of rebooting. ++ This is specially useful when creating a virtual machine with an installation cdrom or ++ an autoinstallation needing a special first boot configuration. ++ Defaults to ``False`` ++ ++ .. versionadded:: Aluminium ++ + :param boot: + Specifies kernel, initial ramdisk and kernel command line parameters for the virtual machine. + This is an optional parameter, all of the keys are optional within the dictionary. The structure of +@@ -1788,6 +1907,36 @@ def init( + + .. versionadded:: sodium + ++ .. _init-mem-def: ++ ++ .. rubric:: Memory parameter definition ++ ++ Memory parameter can contain the following properties: ++ ++ boot ++ The maximum allocation of memory for the guest at boot time ++ ++ current ++ The actual allocation of memory for the guest ++ ++ max ++ The run time maximum memory allocation of the guest ++ ++ slots ++ specifies the number of slots available for adding memory to the guest ++ ++ hard_limit ++ the maximum memory the guest can use ++ ++ soft_limit ++ memory limit to enforce during memory contention ++ ++ swap_hard_limit ++ the maximum memory plus swap the guest can use ++ ++ min_guarantee ++ the guaranteed minimum memory allocation for the guest ++ + .. _init-nic-def: + + .. rubric:: Network Interfaces Definitions +@@ -2082,6 +2231,7 @@ def init( + graphics, + boot, + boot_dev, ++ stop_on_reboot, + **kwargs + ) + log.debug("New virtual machine definition: %s", vm_xml) +@@ -2311,6 +2461,7 @@ def update( + boot=None, + test=False, + boot_dev=None, ++ stop_on_reboot=False, + **kwargs + ): + """ +@@ -2318,7 +2469,24 @@ def update( + + :param name: Name of the domain to update + :param cpu: Number of virtual CPUs to assign to the virtual machine +- :param mem: Amount of memory to allocate to the virtual machine in MiB. ++ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to ++ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, ++ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The ++ structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. ++ Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be ++ an integer. ++ ++ To remove any parameters, pass a None object, for instance: 'soft_limit': ``None``. Please note that ``None`` ++ is mapped to ``null`` in sls file, pass ``null`` in sls file instead. ++ ++ .. code-block:: yaml ++ ++ - mem: ++ hard_limit: null ++ soft_limit: null ++ ++ .. versionchanged:: Magnesium ++ + :param disk_profile: disk profile to use + :param disks: + Disk definitions as documented in the :func:`init` function. +@@ -2375,6 +2543,14 @@ def update( + + .. versionadded:: Magnesium + ++ :param stop_on_reboot: ++ If set to ``True`` the guest will stop instead of rebooting. ++ This is specially useful when creating a virtual machine with an installation cdrom or ++ an autoinstallation needing a special first boot configuration. ++ Defaults to ``False`` ++ ++ .. versionadded:: Aluminium ++ + :param test: run in dry-run mode if set to True + + .. versionadded:: sodium +@@ -2438,6 +2614,8 @@ def update( + desc.find(".//os/type").get("arch"), + graphics, + boot, ++ boot_dev, ++ stop_on_reboot, + **kwargs + ) + ) +@@ -2458,12 +2636,26 @@ def update( + def _set_nvram(node, value): + node.set("template", value) + +- def _set_with_mib_unit(node, value): ++ def _set_with_byte_unit(node, value): + node.text = six.text_type(value) +- node.set("unit", "MiB") ++ node.set("unit", "bytes") ++ ++ def _get_with_unit(node): ++ unit = node.get("unit", "KiB") ++ # _handle_unit treats bytes as invalid unit for the purpose of consistency ++ unit = unit if unit != "bytes" else "b" ++ value = node.get("memory") or node.text ++ return _handle_unit("{}{}".format(value, unit)) if value else None ++ ++ old_mem = int(_get_with_unit(desc.find("memory")) / 1024) + + # Update the kernel boot parameters + params_mapping = [ ++ { ++ "path": "stop_on_reboot", ++ "xpath": "on_reboot", ++ "convert": lambda v: "destroy" if v else "restart", ++ }, + {"path": "boot:kernel", "xpath": "os/kernel"}, + {"path": "boot:initrd", "xpath": "os/initrd"}, + {"path": "boot:cmdline", "xpath": "os/cmdline"}, +@@ -2473,14 +2665,72 @@ def update( + { + "path": "mem", + "xpath": "memory", +- "get": lambda n: int(n.text) / 1024, +- "set": _set_with_mib_unit, ++ "convert": _handle_unit, ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, + }, + { + "path": "mem", + "xpath": "currentMemory", +- "get": lambda n: int(n.text) / 1024, +- "set": _set_with_mib_unit, ++ "convert": _handle_unit, ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, ++ }, ++ { ++ "path": "mem:max", ++ "convert": _handle_unit, ++ "xpath": "maxMemory", ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, ++ }, ++ { ++ "path": "mem:boot", ++ "convert": _handle_unit, ++ "xpath": "memory", ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, ++ }, ++ { ++ "path": "mem:current", ++ "convert": _handle_unit, ++ "xpath": "currentMemory", ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, ++ }, ++ { ++ "path": "mem:slots", ++ "xpath": "maxMemory", ++ "get": lambda n: n.get("slots"), ++ "set": lambda n, v: n.set("slots", str(v)), ++ "del": salt.utils.xmlutil.del_attribute("slots", ["unit"]), ++ }, ++ { ++ "path": "mem:hard_limit", ++ "convert": _handle_unit, ++ "xpath": "memtune/hard_limit", ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, ++ }, ++ { ++ "path": "mem:soft_limit", ++ "convert": _handle_unit, ++ "xpath": "memtune/soft_limit", ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, ++ }, ++ { ++ "path": "mem:swap_hard_limit", ++ "convert": _handle_unit, ++ "xpath": "memtune/swap_hard_limit", ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, ++ }, ++ { ++ "path": "mem:min_guarantee", ++ "convert": _handle_unit, ++ "xpath": "memtune/min_guarantee", ++ "get": _get_with_unit, ++ "set": _set_with_byte_unit, + }, + { + "path": "boot_dev:{dev}", +@@ -2566,13 +2816,24 @@ def update( + } + ) + if mem: +- commands.append( +- { +- "device": "mem", +- "cmd": "setMemoryFlags", +- "args": [mem * 1024, libvirt.VIR_DOMAIN_AFFECT_LIVE], +- } +- ) ++ if isinstance(mem, dict): ++ # setMemoryFlags takes memory amount in KiB ++ new_mem = ( ++ int(_handle_unit(mem.get("current")) / 1024) ++ if "current" in mem ++ else None ++ ) ++ elif isinstance(mem, int): ++ new_mem = int(mem * 1024) ++ ++ if old_mem != new_mem and new_mem is not None: ++ commands.append( ++ { ++ "device": "mem", ++ "cmd": "setMemoryFlags", ++ "args": [new_mem, libvirt.VIR_DOMAIN_AFFECT_LIVE], ++ } ++ ) + + # Look for removable device source changes + new_disks = [] +diff --git a/salt/states/virt.py b/salt/states/virt.py +index 1a0c889d58..740f6c5746 100644 +--- a/salt/states/virt.py ++++ b/salt/states/virt.py +@@ -11,13 +11,11 @@ for the generation and signing of certificates for systems running libvirt: + virt.keys + """ + +-# Import Python libs + + import fnmatch + import logging + import os + +-# Import Salt libs + import salt.utils.args + import salt.utils.files + import salt.utils.stringutils +@@ -290,6 +288,8 @@ def defined( + boot=None, + update=True, + boot_dev=None, ++ stop_on_reboot=False, ++ live=True, + ): + """ + Starts an existing guest, or defines and starts a new VM with specified arguments. +@@ -298,7 +298,28 @@ def defined( + + :param name: name of the virtual machine to run + :param cpu: number of CPUs for the virtual machine to create +- :param mem: amount of memory in MiB for the new virtual machine ++ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to ++ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, ++ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The ++ structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. ++ Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be ++ an integer. ++ ++ .. code-block:: python ++ ++ { ++ 'boot': 1g, ++ 'current': 1g, ++ 'max': 1g, ++ 'slots': 10, ++ 'hard_limit': '1024' ++ 'soft_limit': '512m' ++ 'swap_hard_limit': '1g' ++ 'min_guarantee': '512mib' ++ } ++ ++ .. versionchanged:: Magnesium ++ + :param vm_type: force virtual machine type for the new VM. The default value is taken from + the host capabilities. This could be useful for example to use ``'qemu'`` type instead + of the ``'kvm'`` one. +@@ -358,6 +379,20 @@ def defined( + + .. versionadded:: Magnesium + ++ :param stop_on_reboot: ++ If set to ``True`` the guest will stop instead of rebooting. ++ This is specially useful when creating a virtual machine with an installation cdrom or ++ an autoinstallation needing a special first boot configuration. ++ Defaults to ``False`` ++ ++ .. versionadded:: Aluminium ++ ++ :param live: ++ If set to ``False`` the changes will not be applied live to the running instance, but will ++ only apply at the next start. Note that reboot will not take those changes. ++ ++ .. versionadded:: Aluminium ++ + .. rubric:: Example States + + Make sure a virtual machine called ``domain_name`` is defined: +@@ -415,13 +450,14 @@ def defined( + nic_profile=nic_profile, + interfaces=interfaces, + graphics=graphics, +- live=True, ++ live=live, + connection=connection, + username=username, + password=password, + boot=boot, + test=__opts__["test"], + boot_dev=boot_dev, ++ stop_on_reboot=stop_on_reboot, + ) + ret["changes"][name] = status + if not status.get("definition"): +@@ -457,6 +493,7 @@ def defined( + boot=boot, + start=False, + boot_dev=boot_dev, ++ stop_on_reboot=stop_on_reboot, + ) + ret["changes"][name] = {"definition": True} + ret["comment"] = "Domain {} defined".format(name) +@@ -490,6 +527,7 @@ def running( + arch=None, + boot=None, + boot_dev=None, ++ stop_on_reboot=False, + ): + """ + Starts an existing guest, or defines and starts a new VM with specified arguments. +@@ -498,7 +536,23 @@ def running( + + :param name: name of the virtual machine to run + :param cpu: number of CPUs for the virtual machine to create +- :param mem: amount of memory in MiB for the new virtual machine ++ :param mem: Amount of memory to allocate to the virtual machine in MiB. Since Magnesium, a dictionary can be used to ++ contain detailed configuration which support memory allocation or tuning. Supported parameters are ``boot``, ++ ``current``, ``max``, ``slots``, ``hard_limit``, ``soft_limit``, ``swap_hard_limit`` and ``min_guarantee``. The ++ structure of the dictionary is documented in :ref:`init-mem-def`. Both decimal and binary base are supported. ++ Detail unit specification is documented in :ref:`virt-units`. Please note that the value for ``slots`` must be ++ an integer. ++ ++ To remove any parameters, pass a None object, for instance: 'soft_limit': ``None``. Please note that ``None`` ++ is mapped to ``null`` in sls file, pass ``null`` in sls file instead. ++ ++ .. code-block:: yaml ++ ++ - mem: ++ hard_limit: null ++ soft_limit: null ++ ++ .. versionchanged:: Magnesium + :param vm_type: force virtual machine type for the new VM. The default value is taken from + the host capabilities. This could be useful for example to use ``'qemu'`` type instead + of the ``'kvm'`` one. +@@ -609,6 +663,14 @@ def running( + + .. versionadded:: Magnesium + ++ :param stop_on_reboot: ++ If set to ``True`` the guest will stop instead of rebooting. ++ This is specially useful when creating a virtual machine with an installation cdrom or ++ an autoinstallation needing a special first boot configuration. ++ Defaults to ``False`` ++ ++ .. versionadded:: Aluminium ++ + .. rubric:: Example States + + Make sure an already-defined virtual machine called ``domain_name`` is running: +@@ -677,6 +739,7 @@ def running( + boot=boot, + update=update, + boot_dev=boot_dev, ++ stop_on_reboot=stop_on_reboot, + connection=connection, + username=username, + password=password, +diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja +index 18728a75b5..fb4c9f40d0 100644 +--- a/salt/templates/virt/libvirt_domain.jinja ++++ b/salt/templates/virt/libvirt_domain.jinja +@@ -2,9 +2,32 @@ + + {{ name }} + {{ cpu }} +- {{ mem }} +- {{ mem }} +- ++ {%- if mem.max %} ++ {{ mem.max }} ++ {%- endif %} ++ {%- if mem.boot %} ++ {{ mem.boot }} ++ {%- endif %} ++ {%- if mem.current %} ++ {{ mem.current }} ++ {%- endif %} ++ {%- if mem %} ++ ++ {%- if 'hard_limit' in mem and mem.hard_limit %} ++ {{ mem.hard_limit }} ++ {%- endif %} ++ {%- if 'soft_limit' in mem and mem.soft_limit %} ++ {{ mem.soft_limit }} ++ {%- endif %} ++ {%- if 'swap_hard_limit' in mem and mem.swap_hard_limit %} ++ {{ mem.swap_hard_limit }} ++ {%- endif %} ++ {%- if 'min_guarantee' in mem and mem.min_guarantee %} ++ {{ mem.min_guarantee }} ++ {%- endif %} ++ ++ {%- endif %} ++ + {{ os_type }} + {% if boot %} + {% if 'kernel' in boot %} +@@ -27,6 +50,7 @@ + + {% endfor %} + ++ {{ on_reboot }} + + {% for disk in disks %} + +diff --git a/salt/utils/xmlutil.py b/salt/utils/xmlutil.py +index 68191bc528..6c32f22ad4 100644 +--- a/salt/utils/xmlutil.py ++++ b/salt/utils/xmlutil.py +@@ -2,12 +2,10 @@ + Various XML utilities + """ + +-# Import Python libs + import re + import string # pylint: disable=deprecated-module + from xml.etree import ElementTree + +-# Import salt libs + import salt.utils.data + from salt.ext import six + +@@ -301,7 +299,7 @@ def change_xml(doc, data, mapping): + if convert_fn: + new_value = convert_fn(new_value) + +- if current_value != new_value: ++ if six.text_type(current_value) != six.text_type(new_value): + set_fn(node, new_value) + need_update = True + else: +diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py +index 6e61544a1f..ca5e80d2d2 100644 +--- a/tests/unit/modules/test_virt.py ++++ b/tests/unit/modules/test_virt.py +@@ -4,7 +4,6 @@ virt execution module unit tests + + # pylint: disable=3rd-party-module-not-gated + +-# Import python libs + + import datetime + import os +@@ -16,7 +15,6 @@ import salt.modules.config as config + import salt.modules.virt as virt + import salt.syspaths + +-# Import salt libs + import salt.utils.yaml + from salt._compat import ElementTree as ET + from salt.exceptions import CommandExecutionError, SaltInvocationError +@@ -24,7 +22,6 @@ from salt.exceptions import CommandExecutionError, SaltInvocationError + # pylint: disable=import-error + from salt.ext.six.moves import range # pylint: disable=redefined-builtin + +-# Import Salt Testing libs + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.mock import MagicMock, patch + from tests.support.unit import TestCase +@@ -1859,6 +1856,25 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + virt.update("my_vm"), + ) + ++ # mem + cpu case ++ define_mock.reset_mock() ++ domain_mock.setMemoryFlags.return_value = 0 ++ domain_mock.setVcpusFlags.return_value = 0 ++ self.assertEqual( ++ { ++ "definition": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ "mem": True, ++ "cpu": True, ++ }, ++ virt.update("my_vm", mem=2048, cpu=2), ++ ) ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual("2", setxml.find("vcpu").text) ++ self.assertEqual("2147483648", setxml.find("memory").text) ++ self.assertEqual(2048 * 1024, domain_mock.setMemoryFlags.call_args[0][0]) ++ + # Same parameters passed than in default virt.defined state case + self.assertEqual( + { +@@ -2004,6 +2020,50 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + with self.assertRaises(SaltInvocationError): + virt.update("my_vm", boot={"efi": "Not a boolean value"}) + ++ # Update memtune parameter case ++ memtune = { ++ "soft_limit": "0.5g", ++ "hard_limit": "1024", ++ "swap_hard_limit": "2048m", ++ "min_guarantee": "1 g", ++ } ++ ++ self.assertEqual( ++ { ++ "definition": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ }, ++ virt.update("my_vm", mem=memtune), ++ ) ++ ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual( ++ setxml.find("memtune").find("soft_limit").text, str(int(0.5 * 1024 ** 3)) ++ ) ++ self.assertEqual(setxml.find("memtune").find("soft_limit").get("unit"), "bytes") ++ self.assertEqual( ++ setxml.find("memtune").find("hard_limit").text, str(1024 * 1024 ** 2) ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("swap_hard_limit").text, str(2048 * 1024 ** 2) ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3) ++ ) ++ ++ invalid_unit = {"soft_limit": "2HB"} ++ ++ with self.assertRaises(SaltInvocationError): ++ virt.update("my_vm", mem=invalid_unit) ++ ++ invalid_number = { ++ "soft_limit": "3.4.MB", ++ } ++ ++ with self.assertRaises(SaltInvocationError): ++ virt.update("my_vm", mem=invalid_number) ++ + # Update memory case + setmem_mock = MagicMock(return_value=0) + domain_mock.setMemoryFlags = setmem_mock +@@ -2018,10 +2078,43 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + virt.update("my_vm", mem=2048), + ) + setxml = ET.fromstring(define_mock.call_args[0][0]) +- self.assertEqual(setxml.find("memory").text, "2048") +- self.assertEqual(setxml.find("memory").get("unit"), "MiB") ++ self.assertEqual(setxml.find("memory").text, str(2048 * 1024 ** 2)) ++ self.assertEqual(setxml.find("memory").get("unit"), "bytes") + self.assertEqual(setmem_mock.call_args[0][0], 2048 * 1024) + ++ mem_dict = {"boot": "0.5g", "current": "2g", "max": "1g", "slots": 12} ++ self.assertEqual( ++ { ++ "definition": True, ++ "mem": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ }, ++ virt.update("my_vm", mem=mem_dict), ++ ) ++ ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual(setxml.find("memory").get("unit"), "bytes") ++ self.assertEqual(setxml.find("memory").text, str(int(0.5 * 1024 ** 3))) ++ self.assertEqual(setxml.find("maxMemory").text, str(1 * 1024 ** 3)) ++ self.assertEqual(setxml.find("currentMemory").text, str(2 * 1024 ** 3)) ++ ++ max_slot_reverse = { ++ "slots": "10", ++ "max": "3096m", ++ } ++ self.assertEqual( ++ { ++ "definition": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ }, ++ virt.update("my_vm", mem=max_slot_reverse), ++ ) ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2)) ++ self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10") ++ + # Update disks case + devattach_mock = MagicMock(return_value=0) + devdetach_mock = MagicMock(return_value=0) +@@ -2536,7 +2629,6 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + """ + Test virt.update() with existing boot parameters. + """ +- root_dir = os.path.join(salt.syspaths.ROOT_DIR, "srv", "salt-images") + xml_boot = """ + + vm_with_boot_param +@@ -2594,9 +2686,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + + + +- """.format( +- root_dir, os.sep +- ) ++ """ + domain_mock_boot = self.set_mock_vm("vm_with_boot_param", xml_boot) + domain_mock_boot.OSType = MagicMock(return_value="hvm") + define_mock_boot = MagicMock(return_value=True) +@@ -2697,6 +2787,218 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + self.assertEqual(setxml.find("os").find("loader"), None) + self.assertEqual(setxml.find("os").find("nvram"), None) + ++ def test_update_memtune_params(self): ++ """ ++ Test virt.update() with memory tuning parameters. ++ """ ++ xml_with_memtune_params = """ ++ ++ vm_with_boot_param ++ 1048576 ++ 1048576 ++ 1048576 ++ 1 ++ ++ 1048576 ++ 2097152 ++ 2621440 ++ 671088 ++ ++ ++ hvm ++ ++ ++ """ ++ domain_mock = self.set_mock_vm("vm_with_memtune_param", xml_with_memtune_params) ++ domain_mock.OSType = MagicMock(return_value="hvm") ++ define_mock = MagicMock(return_value=True) ++ self.mock_conn.defineXML = define_mock ++ ++ memtune_new_val = { ++ "boot": "0.7g", ++ "current": "2.5g", ++ "max": "3096m", ++ "slots": "10", ++ "soft_limit": "2048m", ++ "hard_limit": "1024", ++ "swap_hard_limit": "2.5g", ++ "min_guarantee": "1 g", ++ } ++ ++ domain_mock.setMemoryFlags.return_value = 0 ++ self.assertEqual( ++ { ++ "definition": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ "mem": True, ++ }, ++ virt.update("vm_with_memtune_param", mem=memtune_new_val), ++ ) ++ self.assertEqual( ++ domain_mock.setMemoryFlags.call_args[0][0], int(2.5 * 1024 ** 2) ++ ) ++ ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual( ++ setxml.find("memtune").find("soft_limit").text, str(2048 * 1024) ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("hard_limit").text, str(1024 * 1024) ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("swap_hard_limit").text, ++ str(int(2.5 * 1024 ** 2)), ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("swap_hard_limit").get("unit"), "KiB", ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3) ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("min_guarantee").attrib.get("unit"), "bytes" ++ ) ++ self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2)) ++ self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10") ++ self.assertEqual(setxml.find("currentMemory").text, str(int(2.5 * 1024 ** 3))) ++ self.assertEqual(setxml.find("memory").text, str(int(0.7 * 1024 ** 3))) ++ ++ max_slot_reverse = { ++ "slots": "10", ++ "max": "3096m", ++ } ++ self.assertEqual( ++ { ++ "definition": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ }, ++ virt.update("vm_with_memtune_param", mem=max_slot_reverse), ++ ) ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual(setxml.find("maxMemory").text, str(3096 * 1024 ** 2)) ++ self.assertEqual(setxml.find("maxMemory").get("unit"), "bytes") ++ self.assertEqual(setxml.find("maxMemory").attrib.get("slots"), "10") ++ ++ max_swap_none = { ++ "boot": "0.7g", ++ "current": "2.5g", ++ "max": None, ++ "slots": "10", ++ "soft_limit": "2048m", ++ "hard_limit": "1024", ++ "swap_hard_limit": None, ++ "min_guarantee": "1 g", ++ } ++ ++ domain_mock.setMemoryFlags.reset_mock() ++ self.assertEqual( ++ { ++ "definition": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ "mem": True, ++ }, ++ virt.update("vm_with_memtune_param", mem=max_swap_none), ++ ) ++ self.assertEqual( ++ domain_mock.setMemoryFlags.call_args[0][0], int(2.5 * 1024 ** 2) ++ ) ++ ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual( ++ setxml.find("memtune").find("soft_limit").text, str(2048 * 1024) ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("hard_limit").text, str(1024 * 1024) ++ ) ++ self.assertEqual(setxml.find("memtune").find("swap_hard_limit"), None) ++ self.assertEqual( ++ setxml.find("memtune").find("min_guarantee").text, str(1 * 1024 ** 3) ++ ) ++ self.assertEqual( ++ setxml.find("memtune").find("min_guarantee").attrib.get("unit"), "bytes" ++ ) ++ self.assertEqual(setxml.find("maxMemory").text, None) ++ self.assertEqual(setxml.find("currentMemory").text, str(int(2.5 * 1024 ** 3))) ++ self.assertEqual(setxml.find("memory").text, str(int(0.7 * 1024 ** 3))) ++ ++ memtune_none = { ++ "soft_limit": None, ++ "hard_limit": None, ++ "swap_hard_limit": None, ++ "min_guarantee": None, ++ } ++ ++ self.assertEqual( ++ { ++ "definition": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ }, ++ virt.update("vm_with_memtune_param", mem=memtune_none), ++ ) ++ ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual(setxml.find("memtune").find("soft_limit"), None) ++ self.assertEqual(setxml.find("memtune").find("hard_limit"), None) ++ self.assertEqual(setxml.find("memtune").find("swap_hard_limit"), None) ++ self.assertEqual(setxml.find("memtune").find("min_guarantee"), None) ++ ++ max_none = { ++ "max": None, ++ } ++ ++ self.assertEqual( ++ { ++ "definition": True, ++ "disk": {"attached": [], "detached": [], "updated": []}, ++ "interface": {"attached": [], "detached": []}, ++ }, ++ virt.update("vm_with_memtune_param", mem=max_none), ++ ) ++ ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ self.assertEqual(setxml.find("maxMemory"), None) ++ self.assertEqual(setxml.find("currentMemory").text, str(int(1 * 1024 ** 2))) ++ self.assertEqual(setxml.find("memory").text, str(int(1 * 1024 ** 2))) ++ ++ def test_handle_unit(self): ++ """ ++ Test regex function for handling units ++ """ ++ valid_case = [ ++ ("2", 2097152), ++ ("42", 44040192), ++ ("5b", 5), ++ ("2.3Kib", 2355), ++ ("5.8Kb", 5800), ++ ("16MiB", 16777216), ++ ("20 GB", 20000000000), ++ ("16KB", 16000), ++ (".5k", 512), ++ ("2.k", 2048), ++ ] ++ ++ for key, val in valid_case: ++ self.assertEqual(virt._handle_unit(key), val) ++ ++ invalid_case = [ ++ ("9ib", "invalid units"), ++ ("8byte", "invalid units"), ++ ("512bytes", "invalid units"), ++ ("4 Kbytes", "invalid units"), ++ ("3.4.MB", "invalid number"), ++ ("", "invalid number"), ++ ("bytes", "invalid number"), ++ ("2HB", "invalid units"), ++ ] ++ ++ for key, val in invalid_case: ++ with self.assertRaises(SaltInvocationError): ++ virt._handle_unit(key) ++ + def test_mixed_dict_and_list_as_profile_objects(self): + """ + Test virt._nic_profile with mixed dictionaries and lists as input. +diff --git a/tests/unit/states/test_virt.py b/tests/unit/states/test_virt.py +index f03159334b..1923ae5c0f 100644 +--- a/tests/unit/states/test_virt.py ++++ b/tests/unit/states/test_virt.py +@@ -1,21 +1,15 @@ + """ + :codeauthor: Jayesh Kariya + """ +-# Import Python libs + + import shutil + import tempfile + +-# Import Salt Libs + import salt.states.virt as virt + import salt.utils.files + from salt.exceptions import CommandExecutionError, SaltInvocationError +- +-# Import 3rd-party libs + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.mock import MagicMock, mock_open, patch +- +-# Import Salt Testing Libs + from tests.support.runtests import RUNTIME_VARS + from tests.support.unit import TestCase + +@@ -351,6 +345,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + install=False, + pub_key="/path/to/key.pub", + priv_key="/path/to/key", ++ stop_on_reboot=True, + connection="someconnection", + username="libvirtuser", + password="supersecret", +@@ -376,6 +371,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + start=False, + pub_key="/path/to/key.pub", + priv_key="/path/to/key", ++ stop_on_reboot=True, + connection="someconnection", + username="libvirtuser", + password="supersecret", +@@ -489,6 +485,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + password=None, + boot=None, + test=False, ++ stop_on_reboot=False, + ) + + # Failed definition update case +@@ -559,6 +556,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + install=False, + pub_key="/path/to/key.pub", + priv_key="/path/to/key", ++ stop_on_reboot=False, + connection="someconnection", + username="libvirtuser", + password="supersecret", +@@ -601,6 +599,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + boot=None, + test=True, + boot_dev=None, ++ stop_on_reboot=False, + ) + + # No changes case +@@ -636,6 +635,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + boot=None, + test=True, + boot_dev=None, ++ stop_on_reboot=False, + ) + + def test_running(self): +@@ -713,6 +713,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + pub_key=None, + priv_key=None, + boot_dev=None, ++ stop_on_reboot=False, + connection=None, + username=None, + password=None, +@@ -775,6 +776,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + pub_key="/path/to/key.pub", + priv_key="/path/to/key", + boot_dev="network hd", ++ stop_on_reboot=True, + connection="someconnection", + username="libvirtuser", + password="supersecret", +@@ -800,6 +802,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + pub_key="/path/to/key.pub", + priv_key="/path/to/key", + boot_dev="network hd", ++ stop_on_reboot=True, + connection="someconnection", + username="libvirtuser", + password="supersecret", +@@ -945,6 +948,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + boot=None, + test=False, + boot_dev=None, ++ stop_on_reboot=False, + ) + + # Failed definition update case +@@ -1018,6 +1022,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + install=False, + pub_key="/path/to/key.pub", + priv_key="/path/to/key", ++ stop_on_reboot=True, + connection="someconnection", + username="libvirtuser", + password="supersecret", +@@ -1064,6 +1069,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + boot=None, + test=True, + boot_dev=None, ++ stop_on_reboot=False, + ) + start_mock.assert_not_called() + +@@ -1101,6 +1107,7 @@ class LibvirtTestCase(TestCase, LoaderModuleMockMixin): + boot=None, + test=True, + boot_dev=None, ++ stop_on_reboot=False, + ) + + def test_stopped(self): +-- +2.28.0 + + diff --git a/bsc-1176024-fix-file-directory-user-and-group-owners.patch b/bsc-1176024-fix-file-directory-user-and-group-owners.patch new file mode 100644 index 0000000..d4c5bda --- /dev/null +++ b/bsc-1176024-fix-file-directory-user-and-group-owners.patch @@ -0,0 +1,112 @@ +From 8973063f6ad24fd5b3788292aa8cc341221d7fb5 Mon Sep 17 00:00:00 2001 +From: Victor Zhestkov <35733135+vzhestkov@users.noreply.github.com> +Date: Tue, 6 Oct 2020 12:36:41 +0300 +Subject: [PATCH] bsc#1176024: Fix file/directory user and group + ownership containing UTF-8 characters (#275) + +* Fix check_perm typos of file module + +* Fix UTF8 support for user/group ownership operations with file module and state + +* Fix UTF8 support for user/group ownership operations with file module and state + +Co-authored-by: Victor Zhestkov +--- + salt/modules/file.py | 18 +++++++++--------- + salt/states/file.py | 4 ++-- + 2 files changed, 11 insertions(+), 11 deletions(-) + +diff --git a/salt/modules/file.py b/salt/modules/file.py +index b5b70e2d4c..0b516aff05 100644 +--- a/salt/modules/file.py ++++ b/salt/modules/file.py +@@ -256,7 +256,7 @@ def group_to_gid(group): + try: + if isinstance(group, int): + return group +- return grp.getgrnam(group).gr_gid ++ return grp.getgrnam(salt.utils.stringutils.to_str(group)).gr_gid + except KeyError: + return '' + +@@ -344,7 +344,7 @@ def user_to_uid(user): + try: + if isinstance(user, int): + return user +- return pwd.getpwnam(user).pw_uid ++ return pwd.getpwnam(salt.utils.stringutils.to_str(user)).pw_uid + except KeyError: + return '' + +@@ -4574,7 +4574,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) + if (salt.utils.platform.is_windows() and + user_to_uid(user) != user_to_uid(perms['luser']) + ) or ( +- not salt.utils.platform.is_windows() and user != perms['luser'] ++ not salt.utils.platform.is_windows() and salt.utils.stringutils.to_str(user) != perms['luser'] + ): + perms['cuser'] = user + +@@ -4584,7 +4584,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) + if (salt.utils.platform.is_windows() and + group_to_gid(group) != group_to_gid(perms['lgroup']) + ) or ( +- not salt.utils.platform.is_windows() and group != perms['lgroup'] ++ not salt.utils.platform.is_windows() and salt.utils.stringutils.to_str(group) != perms['lgroup'] + ): + perms['cgroup'] = group + +@@ -4615,7 +4615,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) + user != '' + ) or ( + not salt.utils.platform.is_windows() and +- user != get_user(name, follow_symlinks=follow_symlinks) and ++ salt.utils.stringutils.to_str(user) != get_user(name, follow_symlinks=follow_symlinks) and + user != '' + ): + if __opts__['test'] is True: +@@ -4633,10 +4633,10 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) + if (salt.utils.platform.is_windows() and + group_to_gid(group) != group_to_gid( + get_group(name, follow_symlinks=follow_symlinks)) and +- user != '') or ( ++ group != '') or ( + not salt.utils.platform.is_windows() and +- group != get_group(name, follow_symlinks=follow_symlinks) and +- user != '' ++ salt.utils.stringutils.to_str(group) != get_group(name, follow_symlinks=follow_symlinks) and ++ group != '' + ): + if __opts__['test'] is True: + ret['changes']['group'] = group +@@ -4644,7 +4644,7 @@ def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False) + ret['result'] = False + ret['comment'].append('Failed to change group to {0}' + .format(group)) +- elif 'cgroup' in perms and user != '': ++ elif 'cgroup' in perms and group != '': + ret['changes']['group'] = group + + # Mode changes if needed +diff --git a/salt/states/file.py b/salt/states/file.py +index 0e925bb2ed..f21e0d12fc 100644 +--- a/salt/states/file.py ++++ b/salt/states/file.py +@@ -960,11 +960,11 @@ def _check_dir_meta(name, + changes['directory'] = 'new' + return changes + if (user is not None +- and user != stats['user'] ++ and salt.utils.stringutils.to_str(user) != stats['user'] + and user != stats.get('uid')): + changes['user'] = user + if (group is not None +- and group != stats['group'] ++ and salt.utils.stringutils.to_str(group) != stats['group'] + and group != stats.get('gid')): + changes['group'] = group + # Normalize the dir mode +-- +2.28.0 + + diff --git a/drop-wrong-mock-from-chroot-unit-test.patch b/drop-wrong-mock-from-chroot-unit-test.patch new file mode 100644 index 0000000..7f56f14 --- /dev/null +++ b/drop-wrong-mock-from-chroot-unit-test.patch @@ -0,0 +1,26 @@ +From e2c3b1cb72b796fe12f94af64baa2e64cbe5db0b Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= + +Date: Tue, 13 Oct 2020 12:02:00 +0100 +Subject: [PATCH] Drop wrong mock from chroot unit test + +--- + tests/unit/modules/test_chroot.py | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py +index 62808ed680..045d56c5b0 100644 +--- a/tests/unit/modules/test_chroot.py ++++ b/tests/unit/modules/test_chroot.py +@@ -83,7 +83,6 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin): + self.assertTrue(chroot.create('/chroot')) + makedirs.assert_called() + +- @patch("salt.modules.chroot.exist") + @patch("salt.utils.files.fopen") + def test_in_chroot(self, fopen): + """ +-- +2.28.0 + + diff --git a/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch b/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch new file mode 100644 index 0000000..000cd9d --- /dev/null +++ b/ensure-virt.update-stop_on_reboot-is-updated-with-it.patch @@ -0,0 +1,51 @@ +From 173444cecc1e7b4867570f1f8764db1b7f82061e Mon Sep 17 00:00:00 2001 +From: Cedric Bosdonnat +Date: Wed, 14 Oct 2020 12:39:16 +0200 +Subject: [PATCH] Ensure virt.update stop_on_reboot is updated with its + default value (#280) + +While all virt.update properties default values should not be used when +updating the XML definition, the stop_on_reboot default value (False) +needs to be passed still or the user will never be able to update with +this value. +--- + salt/modules/virt.py | 1 + + tests/unit/modules/test_virt.py | 2 ++ + 2 files changed, 3 insertions(+) + +diff --git a/salt/modules/virt.py b/salt/modules/virt.py +index 87ab7ca12d..9bc7bc6093 100644 +--- a/salt/modules/virt.py ++++ b/salt/modules/virt.py +@@ -2742,6 +2742,7 @@ def update( + ] + + data = {k: v for k, v in six.iteritems(locals()) if bool(v)} ++ data["stop_on_reboot"] = stop_on_reboot + if boot_dev: + data["boot_dev"] = {i + 1: dev for i, dev in enumerate(boot_dev.split())} + need_update = salt.utils.xmlutil.change_xml( +diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py +index ca5e80d2d2..fbc03cf7a6 100644 +--- a/tests/unit/modules/test_virt.py ++++ b/tests/unit/modules/test_virt.py +@@ -1778,6 +1778,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + 1048576 + 1048576 + 1 ++ restart + + hvm + +@@ -2350,6 +2351,7 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + 1048576 + 1048576 + 1 ++ restart + + hvm + +-- +2.28.0 + + diff --git a/fix-grains.test_core-unit-test-277.patch b/fix-grains.test_core-unit-test-277.patch new file mode 100644 index 0000000..a65482a --- /dev/null +++ b/fix-grains.test_core-unit-test-277.patch @@ -0,0 +1,46 @@ +From 4998996a08db72a1b925b2c3f725c4fba4fe9622 Mon Sep 17 00:00:00 2001 +From: Dominik Gedon +Date: Tue, 6 Oct 2020 14:00:55 +0200 +Subject: [PATCH] Fix grains.test_core unit test (#277) + +This reverts 63b94ae and fixes the grains test_core unit test. The +changes are aligned with upstream. +--- + tests/unit/grains/test_core.py | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) + +diff --git a/tests/unit/grains/test_core.py b/tests/unit/grains/test_core.py +index 36aa49f232..d3b6515d00 100644 +--- a/tests/unit/grains/test_core.py ++++ b/tests/unit/grains/test_core.py +@@ -69,10 +69,11 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + def test_parse_etc_os_release(self, path_isfile_mock): + path_isfile_mock.side_effect = lambda x: x == "/usr/lib/os-release" + with salt.utils.files.fopen(os.path.join(OS_RELEASE_DIR, "ubuntu-17.10")) as os_release_file: +- os_release_content = os_release_file.readlines() +- with patch("salt.utils.files.fopen", mock_open()) as os_release_file: +- os_release_file.return_value.__iter__.return_value = os_release_content +- os_release = core._parse_os_release(["/etc/os-release", "/usr/lib/os-release"]) ++ os_release_content = os_release_file.read() ++ with patch("salt.utils.files.fopen", mock_open(read_data=os_release_content)): ++ os_release = core._parse_os_release( ++ "/etc/os-release", "/usr/lib/os-release" ++ ) + self.assertEqual(os_release, { + "NAME": "Ubuntu", + "VERSION": "17.10 (Artful Aardvark)", +@@ -134,7 +135,9 @@ class CoreGrainsTestCase(TestCase, LoaderModuleMockMixin): + + def test_missing_os_release(self): + with patch('salt.utils.files.fopen', mock_open(read_data={})): +- os_release = core._parse_os_release(['/etc/os-release', '/usr/lib/os-release']) ++ os_release = core._parse_os_release( ++ "/etc/os-release", "/usr/lib/os-release" ++ ) + self.assertEqual(os_release, {}) + + @skipIf(not salt.utils.platform.is_windows(), 'System is not Windows') +-- +2.28.0 + + diff --git a/invalidate-file-list-cache-when-cache-file-modified-.patch b/invalidate-file-list-cache-when-cache-file-modified-.patch new file mode 100644 index 0000000..a4826a3 --- /dev/null +++ b/invalidate-file-list-cache-when-cache-file-modified-.patch @@ -0,0 +1,95 @@ +From 1ca1bb7c01b1e589147c32b16eda719537ab5b62 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Pablo=20Su=C3=A1rez=20Hern=C3=A1ndez?= + +Date: Tue, 22 Sep 2020 15:15:51 +0100 +Subject: [PATCH] Invalidate file list cache when cache file modified + time is in the future (bsc#1176397) + +Add test_future_file_list_cache_file_ignored unit test +--- + salt/fileserver/__init__.py | 2 +- + tests/unit/test_fileserver.py | 47 +++++++++++++++++++++++++++++++++-- + 2 files changed, 46 insertions(+), 3 deletions(-) + +diff --git a/salt/fileserver/__init__.py b/salt/fileserver/__init__.py +index 919987e2fc..1b8de51bdc 100644 +--- a/salt/fileserver/__init__.py ++++ b/salt/fileserver/__init__.py +@@ -142,7 +142,7 @@ def check_file_list_cache(opts, form, list_cache, w_lock): + 'file=%s mtime=%s current_time=%s', + list_cache, current_time, file_mtime + ) +- age = 0 ++ age = -1 + else: + age = current_time - file_mtime + else: +diff --git a/tests/unit/test_fileserver.py b/tests/unit/test_fileserver.py +index d38e22c8e1..b92b32947b 100644 +--- a/tests/unit/test_fileserver.py ++++ b/tests/unit/test_fileserver.py +@@ -6,11 +6,17 @@ + # Import Python libs + from __future__ import absolute_import, print_function, unicode_literals + +-# Import Salt Testing libs +-from tests.support.unit import TestCase ++import datetime ++import os ++import time + ++import salt.utils.files + from salt import fileserver + ++# Import Salt Testing libs ++from tests.support.helpers import with_tempdir ++from tests.support.unit import TestCase ++ + + class MapDiffTestCase(TestCase): + def test_diff_with_diffent_keys(self): +@@ -28,3 +34,40 @@ class MapDiffTestCase(TestCase): + map1 = {'file1': 12345} + map2 = {'file1': 1234} + assert fileserver.diff_mtime_map(map1, map2) is True ++ ++ ++class VCSBackendWhitelistCase(TestCase): ++ def setup_loader_modules(self): ++ return {fileserver: {}} ++ ++ @with_tempdir() ++ def test_future_file_list_cache_file_ignored(self, cachedir): ++ opts = { ++ "fileserver_backend": ["roots"], ++ "cachedir": cachedir, ++ "extension_modules": "", ++ } ++ ++ back_cachedir = os.path.join(cachedir, "file_lists/roots") ++ os.makedirs(os.path.join(back_cachedir)) ++ ++ # Touch a couple files ++ for filename in ("base.p", "foo.txt"): ++ with salt.utils.files.fopen( ++ os.path.join(back_cachedir, filename), "wb" ++ ) as _f: ++ if filename == "base.p": ++ _f.write(b"\x80") ++ ++ # Set modification time to file list cache file to 1 year in the future ++ now = datetime.datetime.utcnow() ++ future = now + datetime.timedelta(days=365) ++ mod_time = time.mktime(future.timetuple()) ++ os.utime(os.path.join(back_cachedir, "base.p"), (mod_time, mod_time)) ++ ++ list_cache = os.path.join(back_cachedir, "base.p") ++ w_lock = os.path.join(back_cachedir, ".base.w") ++ ret = fileserver.check_file_list_cache(opts, "files", list_cache, w_lock) ++ assert ( ++ ret[1] is True ++ ), "Cache file list cache file is not refreshed when future modification time" +-- +2.28.0 + + diff --git a/salt.changes b/salt.changes index aabb69b..3dff44d 100644 --- a/salt.changes +++ b/salt.changes @@ -1,3 +1,61 @@ +------------------------------------------------------------------- +Wed Oct 14 10:49:33 UTC 2020 - Pablo Suárez Hernández + +- Ensure virt.update stop_on_reboot is updated with its default value + +- Added: + * ensure-virt.update-stop_on_reboot-is-updated-with-it.patch + +------------------------------------------------------------------- +Tue Oct 13 15:26:05 UTC 2020 - Pablo Suárez Hernández + +- Do not break package building for systemd OSes + +------------------------------------------------------------------- +Tue Oct 13 11:10:06 UTC 2020 - Pablo Suárez Hernández + +- Drop wrong mock from chroot unit test + +- Added: + * drop-wrong-mock-from-chroot-unit-test.patch + +------------------------------------------------------------------- +Wed Oct 7 12:19:05 UTC 2020 - Jochen Breuer + +- Support systemd versions with dot (bsc#1176294) + +------------------------------------------------------------------- +Tue Oct 6 12:52:51 UTC 2020 - Jochen Breuer + +- Fix for grains.test_core unit test +- Fix file/directory user and group ownership containing UTF-8 + characters (bsc#1176024) +- Several changes to virtualization: +- - Fix virt update when cpu and memory are changed +- - Memory Tuning GSoC +- - Properly fix memory setting regression in virt.update +- - Expose libvirt on_reboot in virt states +- Support transactional systems (MicroOS) +- zypperpkg module ignores retcode 104 for search() (bsc#1159670) +- Xen disk fixes. No longer generates volumes for Xen disks, but the + corresponding file or block disk (bsc#1175987) + +- Added: + * fix-grains.test_core-unit-test-277.patch + * support-transactional-systems-microos-271.patch + * backport-a-few-virt-prs-272.patch + * xen-disk-fixes-264.patch + * zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch + * bsc-1176024-fix-file-directory-user-and-group-owners.patch + +------------------------------------------------------------------- +Wed Sep 23 14:48:41 UTC 2020 - Pablo Suárez Hernández + +- Invalidate file list cache when cache file modified time is in the future (bsc#1176397) + +- Added: + * invalidate-file-list-cache-when-cache-file-modified-.patch + ------------------------------------------------------------------- Wed Sep 16 11:52:33 UTC 2020 - Pablo Suárez Hernández diff --git a/salt.spec b/salt.spec index 6435ab8..a5ac381 100644 --- a/salt.spec +++ b/salt.spec @@ -345,6 +345,24 @@ Patch129: fix-virt.update-with-cpu-defined-263.patch Patch130: remove-msgpack-1.0.0-requirement-in-the-installed-me.patch # PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/bc20f38d0fa492af70321fef7fe2530937dfc86a Patch131: prevent-import-errors-when-running-test_btrfs-unit-t.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58529 +Patch132: invalidate-file-list-cache-when-cache-file-modified-.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58400 +Patch133: xen-disk-fixes-264.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58552 +Patch134: zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch +# PATCH-FIX_UPSTREAM: https://github.com/saltstack/salt/pull/58520 +Patch135: support-transactional-systems-microos-271.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/272 +Patch136: backport-a-few-virt-prs-272.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/275 +Patch137: bsc-1176024-fix-file-directory-user-and-group-owners.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/277 +Patch138: fix-grains.test_core-unit-test-277.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/commit/e2c3b1cb72b796fe12f94af64baa2e64cbe5db0b +Patch139: drop-wrong-mock-from-chroot-unit-test.patch +# PATCH-FIX_OPENSUSE: https://github.com/openSUSE/salt/pull/280 +Patch140: ensure-virt.update-stop_on_reboot-is-updated-with-it.patch BuildRoot: %{_tmppath}/%{name}-%{version}-build BuildRequires: logrotate @@ -690,6 +708,7 @@ Requires: pmtools %endif %if %{with systemd} %{?systemd_requires} +BuildRequires: systemd %else %if 0%{?suse_version} Requires(pre): %insserv_prereq @@ -982,6 +1001,15 @@ cp %{S:5} ./.travis.yml %patch129 -p1 %patch130 -p1 %patch131 -p1 +%patch132 -p1 +%patch133 -p1 +%patch134 -p1 +%patch135 -p1 +%patch136 -p1 +%patch137 -p1 +%patch138 -p1 +%patch139 -p1 +%patch140 -p1 %build # Putting /usr/bin at the front of $PATH is needed for RHEL/RES 7. Without this @@ -1357,7 +1385,8 @@ if [ $1 -eq 2 ] ; then true fi %if %{with systemd} -if [ `rpm -q systemd --queryformat="%%{VERSION}"` -lt 228 ]; then +systemd_ver=$(rpm -q systemd --queryformat="%%{VERSION}") +if [ "${systemd_ver%%.*}" -lt 228 ]; then # On systemd < 228 the 'TasksTask' attribute is not available. # Removing TasksMax from salt-master.service on SLE12SP1 LTSS (bsc#985112) sed -i '/TasksMax=infinity/d' %{_unitdir}/salt-master.service diff --git a/support-transactional-systems-microos-271.patch b/support-transactional-systems-microos-271.patch new file mode 100644 index 0000000..5f7cdbd --- /dev/null +++ b/support-transactional-systems-microos-271.patch @@ -0,0 +1,3145 @@ +From 479ec4e978d81da75e45e2ead3193ca96e075753 Mon Sep 17 00:00:00 2001 +From: Alberto Planas +Date: Mon, 5 Oct 2020 16:32:44 +0200 +Subject: [PATCH] Support transactional systems (MicroOS) (#271) + +* Add rebootmgr module + +* Add transactional_update module + +* chroot: add chroot detector + +* systemd: add offline mode detector + +* transactional_update: add pending_transaction detector + +* extra: add EFI and transactional grains + +* transactional_update: add call, apply_, sls & highstate + +* transactional_update: add documentation + +* transactional_update: add executor + +* Add changelog entry 58519.added + +Closes #58519 + +* transactional_update: update the cleanups family + +* transactional_update: add activate_transaction param + +* transactional_update: skip tests on Windows +--- + changelog/58519.added | 1 + + doc/ref/executors/all/index.rst | 1 + + .../salt.executors.transactional_update.rst | 6 + + doc/ref/modules/all/index.rst | 2 + + .../modules/all/salt.modules.rebootmgr.rst | 5 + + .../all/salt.modules.transactional_update.rst | 5 + + salt/executors/transactional_update.py | 126 ++ + salt/grains/extra.py | 29 + + salt/modules/chroot.py | 39 +- + salt/modules/rebootmgr.py | 357 +++++ + salt/modules/systemd_service.py | 22 +- + salt/modules/transactional_update.py | 1270 +++++++++++++++++ + salt/utils/systemd.py | 22 + + tests/unit/modules/test_chroot.py | 15 + + tests/unit/modules/test_rebootmgr.py | 304 ++++ + .../unit/modules/test_transactional_update.py | 683 +++++++++ + 16 files changed, 2882 insertions(+), 5 deletions(-) + create mode 100644 changelog/58519.added + create mode 100644 doc/ref/executors/all/salt.executors.transactional_update.rst + create mode 100644 doc/ref/modules/all/salt.modules.rebootmgr.rst + create mode 100644 doc/ref/modules/all/salt.modules.transactional_update.rst + create mode 100644 salt/executors/transactional_update.py + create mode 100644 salt/modules/rebootmgr.py + create mode 100644 salt/modules/transactional_update.py + create mode 100644 tests/unit/modules/test_rebootmgr.py + create mode 100644 tests/unit/modules/test_transactional_update.py + +diff --git a/changelog/58519.added b/changelog/58519.added +new file mode 100644 +index 0000000000..1cc8d7dc74 +--- /dev/null ++++ b/changelog/58519.added +@@ -0,0 +1 @@ ++Add support for transactional systems, like openSUSE MicroOS +\ No newline at end of file +diff --git a/doc/ref/executors/all/index.rst b/doc/ref/executors/all/index.rst +index 1f26a86fc3..4cd430d8e3 100644 +--- a/doc/ref/executors/all/index.rst ++++ b/doc/ref/executors/all/index.rst +@@ -14,3 +14,4 @@ executors modules + docker + splay + sudo ++ transactional_update +diff --git a/doc/ref/executors/all/salt.executors.transactional_update.rst b/doc/ref/executors/all/salt.executors.transactional_update.rst +new file mode 100644 +index 0000000000..17f00b2d27 +--- /dev/null ++++ b/doc/ref/executors/all/salt.executors.transactional_update.rst +@@ -0,0 +1,6 @@ ++salt.executors.transactional_update module ++========================================== ++ ++.. automodule:: salt.executors.transactional_update ++ :members: ++ +diff --git a/doc/ref/modules/all/index.rst b/doc/ref/modules/all/index.rst +index 8e1bf2ecf1..ec5f4b9cd9 100644 +--- a/doc/ref/modules/all/index.rst ++++ b/doc/ref/modules/all/index.rst +@@ -371,6 +371,7 @@ execution modules + rbac_solaris + rbenv + rdp ++ rebootmgr + redismod + reg + rest_pkg +@@ -457,6 +458,7 @@ execution modules + tls + tomcat + trafficserver ++ transactional_update + travisci + tuned + twilio_notify +diff --git a/doc/ref/modules/all/salt.modules.rebootmgr.rst b/doc/ref/modules/all/salt.modules.rebootmgr.rst +new file mode 100644 +index 0000000000..22240080b0 +--- /dev/null ++++ b/doc/ref/modules/all/salt.modules.rebootmgr.rst +@@ -0,0 +1,5 @@ ++salt.modules.rebootmgr module ++============================= ++ ++.. automodule:: salt.modules.rebootmgr ++ :members: +diff --git a/doc/ref/modules/all/salt.modules.transactional_update.rst b/doc/ref/modules/all/salt.modules.transactional_update.rst +new file mode 100644 +index 0000000000..2f15b95ad4 +--- /dev/null ++++ b/doc/ref/modules/all/salt.modules.transactional_update.rst +@@ -0,0 +1,5 @@ ++salt.modules.transactional_update module ++======================================== ++ ++.. automodule:: salt.modules.transactional_update ++ :members: +diff --git a/salt/executors/transactional_update.py b/salt/executors/transactional_update.py +new file mode 100644 +index 0000000000..ef7d92bc05 +--- /dev/null ++++ b/salt/executors/transactional_update.py +@@ -0,0 +1,126 @@ ++""" ++Transactional executor module ++ ++.. versionadded:: TBD ++ ++""" ++ ++import salt.utils.path ++ ++# Functions that are mapped into an equivalent one in ++# transactional_update module ++DELEGATION_MAP = { ++ "state.single": "transactional_update.single", ++ "state.sls": "transactional_update.sls", ++ "state.apply": "transactional_update.apply", ++ "state.highstate": "transactional_update.highstate", ++} ++ ++# By default, all modules and functions are executed outside the ++# transaction. The next two sets will enumerate the exceptions that ++# will be routed to transactional_update.call() ++DEFAULT_DELEGATED_MODULES = [ ++ "ansible", ++ "cabal", ++ "chef", ++ "cmd", ++ "composer", ++ "cp", ++ "cpan", ++ "cyg", ++ "file", ++ "freeze", ++ "nix", ++ "npm", ++ "pip", ++ "pkg", ++ "puppet", ++ "pyenv", ++ "rbenv", ++ "scp", ++] ++DEFAULT_DELEGATED_FUNCTIONS = [] ++ ++ ++def __virtual__(): ++ if salt.utils.path.which("transactional-update"): ++ return True ++ else: ++ return (False, "transactional_update executor requires a transactional system") ++ ++ ++def execute(opts, data, func, args, kwargs): ++ """Delegate into transactional_update module ++ ++ The ``transactional_update`` module support the execution of ++ functions inside a transaction, as support apply a state (via ++ ``apply``, ``sls``, ``single`` or ``highstate``). ++ ++ This execution module can be used to route some Salt modules and ++ functions to be executed inside the transaction snapshot. ++ ++ Add this executor in the minion configuration file: ++ ++ .. code-block:: yaml ++ ++ module_executors: ++ - transactional_update ++ - direct_call ++ ++ Or use the command line parameter: ++ ++ .. code-block:: bash ++ ++ salt-call --module-executors='[transactional_update, direct_call]' test.version ++ ++ You can also schedule a reboot if needed: ++ ++ .. code-block:: bash ++ ++ salt-call --module-executors='[transactional_update]' state.sls stuff activate_transaction=True ++ ++ There are some configuration parameters supported: ++ ++ .. code-block:: yaml ++ ++ # Replace the list of default modules that all the functions ++ # are delegated to `transactional_update.call()` ++ delegated_modules: [cmd, pkg] ++ ++ # Replace the list of default functions that are delegated to ++ # `transactional_update.call()` ++ delegated_functions: [pip.install] ++ ++ # Expand the default list of modules ++ add_delegated_modules: [ansible] ++ ++ # Expand the default list of functions ++ add_delegated_functions: [file.copy] ++ ++ """ ++ fun = data["fun"] ++ module, _ = fun.split(".") ++ ++ delegated_modules = set(opts.get("delegated_modules", DEFAULT_DELEGATED_MODULES)) ++ delegated_functions = set( ++ opts.get("delegated_functions", DEFAULT_DELEGATED_FUNCTIONS) ++ ) ++ if "executor_opts" in data: ++ delegated_modules |= set(data["executor_opts"].get("add_delegated_modules", [])) ++ delegated_functions |= set( ++ data["executor_opts"].get("add_delegated_functions", []) ++ ) ++ else: ++ delegated_modules |= set(opts.get("add_delegated_modules", [])) ++ delegated_functions |= set(opts.get("add_delegated_functions", [])) ++ ++ if fun in DELEGATION_MAP: ++ result = __executors__["direct_call.execute"]( ++ opts, data, __salt__[DELEGATION_MAP[fun]], args, kwargs ++ ) ++ elif module in delegated_modules or fun in delegated_functions: ++ result = __salt__["transactional_update.call"](fun, *args, **kwargs) ++ else: ++ result = __executors__["direct_call.execute"](opts, data, func, args, kwargs) ++ ++ return result +diff --git a/salt/grains/extra.py b/salt/grains/extra.py +index b30ab0091f..6a26aece77 100644 +--- a/salt/grains/extra.py ++++ b/salt/grains/extra.py +@@ -3,14 +3,18 @@ + from __future__ import absolute_import, print_function, unicode_literals + + # Import python libs ++import glob ++import logging + import os + + # Import third party libs + import logging + + # Import salt libs ++import salt.utils + import salt.utils.data + import salt.utils.files ++import salt.utils.path + import salt.utils.platform + import salt.utils.yaml + +@@ -83,3 +87,28 @@ def suse_backported_capabilities(): + '__suse_reserved_pkg_patches_support': True, + '__suse_reserved_saltutil_states_support': True + } ++ ++ ++def __secure_boot(): ++ """Detect if secure-boot is enabled.""" ++ enabled = False ++ sboot = glob.glob("/sys/firmware/efi/vars/SecureBoot-*/data") ++ if len(sboot) == 1: ++ with salt.utils.files.fopen(sboot[0], "rb") as fd: ++ enabled = fd.read()[-1:] == b"\x01" ++ return enabled ++ ++ ++def uefi(): ++ """Populate UEFI grains.""" ++ grains = { ++ "efi": os.path.exists("/sys/firmware/efi/systab"), ++ "efi-secure-boot": __secure_boot(), ++ } ++ ++ return grains ++ ++ ++def transactional(): ++ """Determine if the system in transactional.""" ++ return {"transactional": bool(salt.utils.path.which("transactional-update"))} +diff --git a/salt/modules/chroot.py b/salt/modules/chroot.py +index bc089ebf18..5e890b5c35 100644 +--- a/salt/modules/chroot.py ++++ b/salt/modules/chroot.py +@@ -21,6 +21,7 @@ import salt.defaults.exitcodes + import salt.exceptions + import salt.ext.six as six + import salt.utils.args ++import salt.utils.files + + + __func_alias__ = { +@@ -82,6 +83,38 @@ def create(root): + return True + + ++def in_chroot(): ++ """ ++ Return True if the process is inside a chroot jail ++ ++ .. versionadded:: TBD ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt myminion chroot.in_chroot ++ ++ """ ++ result = False ++ ++ try: ++ # We cannot assume that we are "root", so we cannot read ++ # '/proc/1/root', that is required for the usual way of ++ # detecting that we are in a chroot jail. We use the debian ++ # ischroot method. ++ with salt.utils.files.fopen( ++ "/proc/1/mountinfo" ++ ) as root_fd, salt.utils.files.fopen("/proc/self/mountinfo") as self_fd: ++ root_mountinfo = root_fd.read() ++ self_mountinfo = self_fd.read() ++ result = root_mountinfo != self_mountinfo ++ except OSError: ++ pass ++ ++ return result ++ ++ + def call(root, function, *args, **kwargs): + ''' + Executes a Salt function inside a chroot environment. +@@ -121,7 +154,7 @@ def call(root, function, *args, **kwargs): + so_mods=__salt__['config.option']('thin_so_mods', '') + ) + # Some bug in Salt is preventing us to use `archive.tar` here. A +- # AsyncZeroMQReqChannel is not closed at the end os the salt-call, ++ # AsyncZeroMQReqChannel is not closed at the end of the salt-call, + # and makes the client never exit. + # + # stdout = __salt__['archive.tar']('xzf', thin_path, dest=thin_dest_path) +@@ -198,7 +231,7 @@ def apply_(root, mods=None, **kwargs): + + def _create_and_execute_salt_state(root, chunks, file_refs, test, hash_type): + ''' +- Create the salt_stage tarball, and execute in the chroot ++ Create the salt_state tarball, and execute in the chroot + ''' + # Create the tar containing the state pkg and relevant files. + salt.client.ssh.wrapper.state._cleanup_slsmod_low_data(chunks) +@@ -210,7 +243,7 @@ def _create_and_execute_salt_state(root, chunks, file_refs, test, hash_type): + ret = None + + # Create a temporary directory inside the chroot where we can move +- # the salt_stage.tgz ++ # the salt_state.tgz + salt_state_path = tempfile.mkdtemp(dir=root) + salt_state_path = os.path.join(salt_state_path, 'salt_state.tgz') + salt_state_path_in_chroot = salt_state_path.replace(root, '', 1) +diff --git a/salt/modules/rebootmgr.py b/salt/modules/rebootmgr.py +new file mode 100644 +index 0000000000..96133c754b +--- /dev/null ++++ b/salt/modules/rebootmgr.py +@@ -0,0 +1,357 @@ ++""" ++:maintainer: Alberto Planas ++:maturity: new ++:depends: None ++:platform: Linux ++""" ++ ++import logging ++import re ++ ++import salt.exceptions ++ ++log = logging.getLogger(__name__) ++ ++ ++def __virtual__(): ++ """rebootmgrctl command is required.""" ++ if __utils__["path.which"]("rebootmgrctl") is not None: ++ return True ++ else: ++ return (False, "Module rebootmgt requires the command rebootmgrctl") ++ ++ ++def _cmd(cmd, retcode=False): ++ """Utility function to run commands.""" ++ result = __salt__["cmd.run_all"](cmd) ++ if retcode: ++ return result["retcode"] ++ ++ if result["retcode"]: ++ raise salt.exceptions.CommandExecutionError(result["stderr"]) ++ ++ return result["stdout"] ++ ++ ++def version(): ++ """Return the version of rebootmgrd ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr version ++ ++ """ ++ cmd = ["rebootmgrctl", "--version"] ++ ++ return _cmd(cmd).split()[-1] ++ ++ ++def is_active(): ++ """Check if the rebootmgrd is running and active or not. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr is_active ++ ++ """ ++ cmd = ["rebootmgrctl", "is_active", "--quiet"] ++ ++ return _cmd(cmd, retcode=True) == 0 ++ ++ ++def reboot(order=None): ++ """Tells rebootmgr to schedule a reboot. ++ ++ With the [now] option, a forced reboot is done, no lock from etcd ++ is requested and a set maintenance window is ignored. With the ++ [fast] option, a lock from etcd is requested if needed, but a ++ defined maintenance window is ignored. ++ ++ order ++ If specified, can be "now" or "fast" ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr reboot ++ salt microos rebootmgt reboot order=now ++ ++ """ ++ if order and order not in ("now", "fast"): ++ raise salt.exceptions.CommandExecutionError( ++ "Order parameter, if specified, must be 'now' or 'fast'" ++ ) ++ ++ cmd = ["rebootmgrctl", "reboot"] ++ if order: ++ cmd.append(order) ++ ++ return _cmd(cmd) ++ ++ ++def cancel(): ++ """Cancels an already running reboot. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr cancel ++ ++ """ ++ cmd = ["rebootmgrctl", "cancel"] ++ ++ return _cmd(cmd) ++ ++ ++def status(): ++ """Returns the current status of rebootmgrd. ++ ++ Valid returned values are: ++ 0 - No reboot requested ++ 1 - Reboot requested ++ 2 - Reboot requested, waiting for maintenance window ++ 3 - Reboot requested, waiting for etcd lock. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr status ++ ++ """ ++ cmd = ["rebootmgrctl", "status", "--quiet"] ++ ++ return _cmd(cmd, retcode=True) ++ ++ ++def set_strategy(strategy=None): ++ """A new strategy to reboot the machine is set and written into ++ /etc/rebootmgr.conf. ++ ++ strategy ++ If specified, must be one of those options: ++ ++ best-effort - This is the default strategy. If etcd is ++ running, etcd-lock is used. If no etcd is running, but a ++ maintenance window is specified, the strategy will be ++ maint-window. If no maintenance window is specified, the ++ machine is immediately rebooted (instantly). ++ ++ etcd-lock - A lock at etcd for the specified lock-group will ++ be acquired before reboot. If a maintenance window is ++ specified, the lock is only acquired during this window. ++ ++ maint-window - Reboot does happen only during a specified ++ maintenance window. If no window is specified, the ++ instantly strategy is followed. ++ ++ instantly - Other services will be informed that a reboot will ++ happen. Reboot will be done without getting any locks or ++ waiting for a maintenance window. ++ ++ off - Reboot requests are temporary ++ ignored. /etc/rebootmgr.conf is not modified. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr set_strategy stragegy=off ++ ++ """ ++ if strategy and strategy not in ( ++ "best-effort", ++ "etcd-lock", ++ "maint-window", ++ "instantly", ++ "off", ++ ): ++ raise salt.exceptions.CommandExecutionError("Strategy parameter not valid") ++ ++ cmd = ["rebootmgrctl", "set-strategy"] ++ if strategy: ++ cmd.append(strategy) ++ ++ return _cmd(cmd) ++ ++ ++def get_strategy(): ++ """The currently used reboot strategy of rebootmgrd will be printed. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr get_strategy ++ ++ """ ++ cmd = ["rebootmgrctl", "get-strategy"] ++ ++ return _cmd(cmd).split(":")[-1].strip() ++ ++ ++def set_window(time, duration): ++ """Set's the maintenance window. ++ ++ time ++ The format of time is the same as described in ++ systemd.time(7). ++ ++ duration ++ The format of duration is "[XXh][YYm]". ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr set_window time="Thu,Fri 2020-*-1,5 11:12:13" duration=1h ++ ++ """ ++ cmd = ["rebootmgrctl", "set-window", time, duration] ++ ++ return _cmd(cmd) ++ ++ ++def get_window(): ++ """The currently set maintenance window will be printed. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr get_window ++ ++ """ ++ cmd = ["rebootmgrctl", "get-window"] ++ window = _cmd(cmd) ++ ++ return dict( ++ zip( ++ ("time", "duration"), ++ re.search( ++ r"Maintenance window is set to (.*), lasting (.*).", window ++ ).groups(), ++ ) ++ ) ++ ++ ++def set_group(group): ++ """Set the group, to which this machine belongs to get a reboot lock ++ from etcd. ++ ++ group ++ Group name ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr set_group group=group_1 ++ ++ """ ++ cmd = ["rebootmgrctl", "set-group", group] ++ ++ return _cmd(cmd) ++ ++ ++def get_group(): ++ """The currently set lock group for etcd. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr get_group ++ ++ """ ++ cmd = ["rebootmgrctl", "get-group"] ++ group = _cmd(cmd) ++ ++ return re.search(r"Etcd lock group is set to (.*)", group).groups()[0] ++ ++ ++def set_max(max_locks, group=None): ++ """Set the maximal number of hosts in a group, which are allowed to ++ reboot at the same time. ++ ++ number ++ Maximal number of hosts in a group ++ ++ group ++ Group name ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr set_max 4 ++ ++ """ ++ cmd = ["rebootmgrctl", "set-max"] ++ if group: ++ cmd.extend(["--group", group]) ++ cmd.append(max_locks) ++ ++ return _cmd(cmd) ++ ++ ++def lock(machine_id=None, group=None): ++ """Lock a machine. If no group is specified, the local default group ++ will be used. If no machine-id is specified, the local machine ++ will be locked. ++ ++ machine_id ++ The machine-id is a network wide, unique ID. Per default the ++ ID from /etc/machine-id is used. ++ ++ group ++ Group name ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr lock group=group1 ++ ++ """ ++ cmd = ["rebootmgrctl", "lock"] ++ if group: ++ cmd.extend(["--group", group]) ++ if machine_id: ++ cmd.append(machine_id) ++ ++ return _cmd(cmd) ++ ++ ++def unlock(machine_id=None, group=None): ++ """Unlock a machine. If no group is specified, the local default group ++ will be used. If no machine-id is specified, the local machine ++ will be locked. ++ ++ machine_id ++ The machine-id is a network wide, unique ID. Per default the ++ ID from /etc/machine-id is used. ++ ++ group ++ Group name ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos rebootmgr unlock group=group1 ++ ++ """ ++ cmd = ["rebootmgrctl", "unlock"] ++ if group: ++ cmd.extend(["--group", group]) ++ if machine_id: ++ cmd.append(machine_id) ++ ++ return _cmd(cmd) +diff --git a/salt/modules/systemd_service.py b/salt/modules/systemd_service.py +index e39962f9ac..a684ec0778 100644 +--- a/salt/modules/systemd_service.py ++++ b/salt/modules/systemd_service.py +@@ -56,8 +56,10 @@ def __virtual__(): + ''' + Only work on systems that have been booted with systemd + ''' +- if __grains__['kernel'] == 'Linux' \ +- and salt.utils.systemd.booted(__context__): ++ is_linux = __grains__.get("kernel") == "Linux" ++ is_booted = salt.utils.systemd.booted(__context__) ++ is_offline = salt.utils.systemd.offline(__context__) ++ if is_linux and (is_booted or is_offline): + return __virtualname__ + return ( + False, +@@ -1419,3 +1421,19 @@ def firstboot(locale=None, locale_message=None, keymap=None, + 'systemd-firstboot error: {}'.format(out['stderr'])) + + return True ++ ++ ++def offline(): ++ """ ++ .. versionadded:: TBD ++ ++ Check if systemd is working in offline mode, where is not possible ++ to talk with PID 1. ++ ++ CLI Example: ++ ++ salt '*' service.offline ++ ++ """ ++ ++ return salt.utils.systemd.offline(__context__) +diff --git a/salt/modules/transactional_update.py b/salt/modules/transactional_update.py +new file mode 100644 +index 0000000000..9b14557e07 +--- /dev/null ++++ b/salt/modules/transactional_update.py +@@ -0,0 +1,1270 @@ ++"""Transactional update ++==================== ++ ++.. versionadded: TBD ++ ++A transactional system, like `MicroOS`_, can present some challenges ++when the user decided to manage it via Salt. ++ ++MicroOS provide a read-only rootfs and a tool, ++``transactional-update``, that takes care of the management of the ++system (updating, upgrading, installation or reboot, among others) in ++an atomic way. ++ ++Atomicity is the main feature of MicroOS, and to guarantee this ++property, this model leverages ``snapper``, ``zypper``, ``btrfs`` and ++``overlayfs`` to create snapshots that will be updated independently ++of the currently running system, and that are activated after the ++reboot. This implies, for example, that some changes made on the ++system are not visible until the next reboot, as those changes are ++living in a different snapshot of the file system. ++ ++This model present a lot of problems with the traditional Salt model, ++where the inspections (like 'is this package installed?') are executed ++in order to determine if a subsequent action is required (like ++'install this package'). ++ ++Lets consider this use case, to see how it works on a traditional ++system, and in a transactional system: ++ ++1) Check if ``apache`` is installed ++ ++2) If it is not installed, install it ++ ++3) Check that a ``vhost`` is configured for ``apache`` ++ ++4) Make sure that ``apache2.service`` is enabled ++ ++5) If the configuration changes, restart ``apache2.service`` ++ ++In the traditional system everything will work as expected. The ++system can see if the package is present or not, install it if it ++isn't, and a re-check will shows that is already present. The same ++will happen to the configuration file in ``/etc/apache2``, that will ++be available as soon the package gets installed. Salt can inspect the ++current form of this file, and add the missing bits if required. Salt ++can annotate that a change is present, and restart the service. ++ ++In a transactional system we will have multiple issues. The first one ++is that Salt can only see the content of the snapshot where the system ++booted from. Later snapshots may contain different content, including ++the presence of ``apache``. If Salt decides to install ``apache`` ++calling ``zypper``, it will fail, as this will try to write into the ++read-only rootfs. Even if Salt would call ``transactional-update pkg ++install``, the package would only be present in the new transaction ++(snapshot), and will not be found in the currently running system when ++later Salt tries to validate the presence of the package in the ++current one. ++ ++Any change in ``/etc`` alone will have also problems, as the changes ++will be alive in a different overlay, only visible after the reboot. ++And, finally, the service can only be enabled and restarted if the ++service file is already present in the current ``/etc``. ++ ++ ++General strategy ++---------------- ++ ++``transactional-update`` is the reference tool used for the ++administration of transactional systems. Newer versions of this tool ++support the execution of random commands in the new transaction, the ++continuation of a transaction, the automatic detection of changes in ++new transactions and the merge of ``/etc`` overlays. ++ ++Continue a transaction ++...................... ++ ++One prerequisite already present is the support for branching from a ++different snapshot than the current one in snapper. ++ ++With this feature we can represent in ``transactional-update`` the ++action of creating a transaction snapshot based on one that is planned ++to be the active one after the reboot. This feature removes a lot of ++user complains (like, for example, loosing changes that are stored in ++a transaction not yet activated), but also provide a more simple model ++to work with. ++ ++So, for example, if the user have this scenario:: ++ ++ +-----+ *=====* +--V--+ ++ --| T.1 |--| T.2 |--| T.3 | ++ +-----+ *=====* +--A--+ ++ ++where T.2 is the current active one, and T.3 is an snapshot generated ++from T.2 with a new package (``apache2``), and is marked to be the ++active after the reboot. ++ ++Previously, if the user (that is still on T.2) created a new ++transaction, maybe for adding a new package (``tomcat``, for example), ++the new T.4 will be based on the content of T.2 again, and not T.3, so ++the new T.4 will have lost the changes of T.3 (i.e. `apache2` will not ++be present in T.4). ++ ++With the ``--continue`` parameter, ``transactional-update`` will ++create T.4 based on T.3, and nothing will be lost. ++ ++Command execution inside a new transaction ++.......................................... ++ ++With ``transactional-update run`` we will create a new transaction ++based on the current one (T.2), where we can send interactive commands ++that can modify the new transaction, and as commented, with ++``transactional-update --continue run``, we will create a new ++transaction based on the last created (T.3) ++ ++The ``run`` command can execute any application inside the new ++transaction namespace. This module uses this feature to execute the ++different Salt execution modules, via ``call()``. Or even the full ++``salt-thin`` or ``salt-call`` via ``sls()``, ``apply()``, ++``single()`` or ``highstate``. ++ ++``transactional-update`` will drop empty snapshots ++.................................................. ++ ++The option ``--drop-if-no-change`` is used to detect whether there is ++any change in the file system on the read-only subvolume of the new ++transaction will be added. If a change is present, the new ++transaction will remain, if not it will be discarded. ++ ++For example:: ++ ++ transactional-update --continute --drop-if-no-change run zypper in apache2" ++ ++If we are in the scenario described before, ``apache2`` is already ++present in T.3. In this case a new transaction, T.4, will be created ++based on T.3, ``zypper`` will detect that the package is already ++present and no change will be produced on T.4. At the end of the ++execution, ``transactional-update`` will validate that T.3 and T.4 are ++equivalent and T.4 will be discarded. ++ ++If the command is:: ++ ++ transactional-update --continue --drop-if-no-change run zypper in tomcat ++ ++the new T.4 will be indeed different from T.3, and will remain after ++the transaction is closed. ++ ++With this feature, every time that we call any function of this ++execution module, we will minimize the amount of transaction, while ++maintaining the idempotence so some operations. ++ ++Report for pending transaction ++.............................. ++ ++A change in the system will create a new transaction, that needs to be ++activated via a reboot. With ``pending_transaction()`` we can check ++if a reboot is needed. We can execute the reboot using the ++``reboot()`` function, that will follow the plan established by the ++functions of the ``rebootmgr`` execution module. ++ ++``/etc`` overlay merge when no new transaction is created ++......................................................... ++ ++In a transactional model, ``/etc`` is an overlay file system. Changes ++done during the update are only present in the new transaction, and so ++will only be available after the reboot. Or worse, if the transaction ++gets dropped, because there is no change in the ``rootfs``, the ++changes in ``/etc`` will be dropped too!. This is designed like that ++in order to make the configuration files for the new package available ++only when new package is also available to the user. So, after the ++reboot. ++ ++This makes sense for the case when, for example, ``apache2`` is not ++present in the current transaction, but we installed it. The new ++snapshot contains the ``apache2`` service, and the configuration files ++in ``/etc`` will be accessible only after the reboot. ++ ++But this model presents an issue. If we use ``transactional-update ++--continue --drop-if-no-change run ``, where ```` ++does not make any change in the read-only subvolume, but only in ++``/etc`` (which is also read-write in the running system), the new ++overlay with the changes in ``/etc`` will be dropped together with the ++transaction. ++ ++To fix this, ``transactional-update`` will detect that when no change ++has been made on the read-only subvolume, but done in the overlay, the ++transaction will be dropped and the changes in the overlay will be ++merged back into ``/etc`` overlay of the current transaction. ++ ++ ++Using the execution module ++-------------------------- ++ ++With this module we can create states that leverage Salt into this ++kind of systems:: ++ ++ # Install apache (low-level API) ++ salt-call transactional_update.pkg_install apache2 ++ ++ # We can call any execution module ++ salt-call transactional_update.call pkg.install apache2 ++ ++ # Or via a state ++ salt-call transactional_update.single pkg.installed name=apache2 ++ ++ # We can also execute a zypper directly ++ salt-call transactional_update run "zypper in apache2" snapshot="continue" ++ ++ # We can reuse SLS states ++ salt-call transactional_update.apply install_and_configure_apache ++ ++ # Or apply the full highstate ++ salt-call transactional_update.highstate ++ ++ # Is there any change done in the system? ++ salt-call transactional_update pending_transaction ++ ++ # If so, reboot via rebootmgr ++ salt-call transactional_update reboot ++ ++ # We can enable the service ++ salt-call service.enable apache2 ++ ++ # If apache2 is available, this will work too ++ salt-call service.restart apache2 ++ ++ ++Fixing some expectations ++------------------------ ++ ++This module alone is an improvement over the current state, but is ++easy to see some limitations and problems: ++ ++Is not a fully transparent approach ++................................... ++ ++The user needs to know if the system is transactional or not, as not ++everything can be expressed inside a transaction (for example, ++restarting a service inside transaction is not allowed). ++ ++Two step for service restart ++............................ ++ ++In the ``apache2` example from the beginning we can observe the ++biggest drawback. If the package ``apache2`` is missing, the new ++module will create a new transaction, will execute ``pkg.install`` ++inside the transaction (creating the salt-thin, moving it inside and ++delegating the execution to `transactional-update` CLI as part of the ++full state). Inside the transaction we can do too the required ++changes in ``/etc`` for adding the new ``vhost``, and we can enable the ++service via systemctl inside the same transaction. ++ ++At this point we will not merge the ``/etc`` overlay into the current ++one, and we expect from the user call the ``reboot`` function inside ++this module, in order to activate the new transaction and start the ++``apache2`` service. ++ ++In the case that the package is already there, but the configuration ++for the ``vhost`` is required, the new transaction will be dropped and ++the ``/etc`` overlay will be visible in the live system. Then from ++outside the transaction, via a different call to Salt, we can command ++a restart of the ``apache2`` service. ++ ++We can see that in both cases we break the user expectation, where a ++change on the configuration will trigger automatically the restart of ++the associated service. In a transactional scenario we need two ++different steps: or a reboot, or a restart from outside of the ++transaction. ++ ++.. _MicroOS: https://microos.opensuse.org/ ++ ++:maintainer: Alberto Planas ++:maturity: new ++:depends: None ++:platform: Linux ++ ++""" ++ ++import copy ++import logging ++import os ++import sys ++import tempfile ++ ++import salt.client.ssh.state ++import salt.client.ssh.wrapper.state ++import salt.exceptions ++import salt.utils.args ++ ++__func_alias__ = {"apply_": "apply"} ++ ++log = logging.getLogger(__name__) ++ ++ ++def __virtual__(): ++ """ ++ transactional-update command is required. ++ """ ++ if __utils__["path.which"]("transactional-update"): ++ return True ++ else: ++ return (False, "Module transactional_update requires a transactional system") ++ ++ ++def _global_params(self_update, snapshot=None, quiet=False): ++ """Utility function to prepare common global parameters.""" ++ params = ["--non-interactive", "--drop-if-no-change"] ++ if self_update is False: ++ params.append("--no-selfupdate") ++ if snapshot and snapshot != "continue": ++ params.extend(["--continue", snapshot]) ++ elif snapshot: ++ params.append("--continue") ++ if quiet: ++ params.append("--quiet") ++ return params ++ ++ ++def _pkg_params(pkg, pkgs, args): ++ """Utility function to prepare common package parameters.""" ++ params = [] ++ ++ if not pkg and not pkgs: ++ raise salt.exceptions.CommandExecutionError("Provide pkg or pkgs parameters") ++ ++ if args and isinstance(args, str): ++ params.extend(args.split()) ++ elif args and isinstance(args, list): ++ params.extend(args) ++ ++ if pkg: ++ params.append(pkg) ++ ++ if pkgs and isinstance(pkgs, str): ++ params.extend(pkgs.split()) ++ elif pkgs and isinstance(pkgs, list): ++ params.extend(pkgs) ++ ++ return params ++ ++ ++def _cmd(cmd, retcode=False): ++ """Utility function to run commands.""" ++ result = __salt__["cmd.run_all"](cmd) ++ if retcode: ++ return result["retcode"] ++ ++ if result["retcode"]: ++ raise salt.exceptions.CommandExecutionError(result["stderr"]) ++ ++ return result["stdout"] ++ ++ ++def transactional(): ++ """Check if the system is a transactional system ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update transactional ++ ++ """ ++ return bool(__utils__["path.which"]("transactional-update")) ++ ++ ++def in_transaction(): ++ """Check if Salt is executing while in a transaction ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update in_transaction ++ ++ """ ++ return transactional() and __salt__["chroot.in_chroot"]() ++ ++ ++def cleanup(self_update=False): ++ """Run both cleanup-snapshots and cleanup-overlays. ++ ++ Identical to calling both cleanup-snapshots and cleanup-overlays. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update cleanup ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update)) ++ cmd.append("cleanup") ++ return _cmd(cmd) ++ ++ ++def cleanup_snapshots(self_update=False): ++ """Mark unused snapshots for snapper removal. ++ ++ If the current root filesystem is identical to the active root ++ filesystem (means after a reboot, before transactional-update ++ creates a new snapshot with updates), all old snapshots without a ++ cleanup algorithm get a cleanup algorithm set. This is to make ++ sure, that old snapshots will be deleted by snapper. See the ++ section about cleanup algorithms in snapper(8). ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update cleanup_snapshots ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update)) ++ cmd.append("cleanup-snapshots") ++ return _cmd(cmd) ++ ++ ++def cleanup_overlays(self_update=False): ++ """Remove unused overlay layers. ++ ++ Removes all unreferenced (and thus unused) /etc overlay ++ directories in /var/lib/overlay. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update cleanup_overlays ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update)) ++ cmd.append("cleanup-overlays") ++ return _cmd(cmd) ++ ++ ++def grub_cfg(self_update=False, snapshot=None): ++ """Regenerate grub.cfg ++ ++ grub2-mkconfig(8) is called to create a new /boot/grub2/grub.cfg ++ configuration file for the bootloader. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update grub_cfg snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.append("grub.cfg") ++ return _cmd(cmd) ++ ++ ++def bootloader(self_update=False, snapshot=None): ++ """Reinstall the bootloader ++ ++ Same as grub.cfg, but will also rewrite the bootloader itself. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update bootloader snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.append("bootloader") ++ return _cmd(cmd) ++ ++ ++def initrd(self_update=False, snapshot=None): ++ """Regenerate initrd ++ ++ A new initrd is created in a snapshot. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update initrd snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.append("initrd") ++ return _cmd(cmd) ++ ++ ++def kdump(self_update=False, snapshot=None): ++ """Regenerate kdump initrd ++ ++ A new initrd for kdump is created in a snapshot. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update kdump snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.append("kdump") ++ return _cmd(cmd) ++ ++ ++def run(command, self_update=False, snapshot=None): ++ """Run a command in a new snapshot ++ ++ Execute the command inside a new snapshot. By default this snaphot ++ will remain, but if --drop-if-no-chage is set, the new snapshot ++ will be dropped if there is no change in the file system. ++ ++ command ++ Command with parameters that will be executed (as string or ++ array) ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update run "mkdir /tmp/dir" snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot, quiet=True)) ++ cmd.append("run") ++ if isinstance(command, str): ++ cmd.extend(command.split()) ++ elif isinstance(command, list): ++ cmd.extend(command) ++ else: ++ raise salt.exceptions.CommandExecutionError("Command parameter not recognized") ++ return _cmd(cmd) ++ ++ ++def reboot(self_update=False): ++ """Reboot after update ++ ++ Trigger a reboot after updating the system. ++ ++ Several different reboot methods are supported, configurable via ++ the REBOOT_METHOD configuration option in ++ transactional-update.conf(5). By default rebootmgrd(8) will be ++ used to reboot the system according to the configured policies if ++ the service is running, otherwise systemctl reboot will be called. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update reboot ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update)) ++ cmd.append("reboot") ++ return _cmd(cmd) ++ ++ ++def dup(self_update=False, snapshot=None): ++ """Call 'zypper dup' ++ ++ If new updates are available, a new snapshot is created and zypper ++ dup --no-allow-vendor-change is used to update the ++ snapshot. Afterwards, the snapshot is activated and will be used ++ as the new root filesystem during next boot. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update dup snapshot="continue" ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.append("dup") ++ return _cmd(cmd) ++ ++ ++def up(self_update=False, snapshot=None): ++ """Call 'zypper up' ++ ++ If new updates are available, a new snapshot is created and zypper ++ up is used to update the snapshot. Afterwards, the snapshot is ++ activated and will be used as the new root filesystem during next ++ boot. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update up snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.append("up") ++ return _cmd(cmd) ++ ++ ++def patch(self_update=False, snapshot=None): ++ """Call 'zypper patch' ++ ++ If new updates are available, a new snapshot is created and zypper ++ patch is used to update the snapshot. Afterwards, the snapshot is ++ activated and will be used as the new root filesystem during next ++ boot. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update patch snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.append("patch") ++ return _cmd(cmd) ++ ++ ++def migration(self_update=False, snapshot=None): ++ """Updates systems registered via SCC / SMT ++ ++ On systems which are registered against the SUSE Customer Center ++ (SCC) or SMT, a migration to a new version of the installed ++ products can be made with this option. ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update migration snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.append("migration") ++ return _cmd(cmd) ++ ++ ++def pkg_install(pkg=None, pkgs=None, args=None, self_update=False, snapshot=None): ++ """Install individual packages ++ ++ Installs additional software. See the install description in the ++ "Package Management Commands" section of zypper's man page for all ++ available arguments. ++ ++ pkg ++ Package name to install ++ ++ pkgs ++ List of packages names to install ++ ++ args ++ String or list of extra parameters for zypper ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update pkg_install pkg=emacs snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.extend(["pkg", "install"]) ++ cmd.extend(_pkg_params(pkg, pkgs, args)) ++ return _cmd(cmd) ++ ++ ++def pkg_remove(pkg=None, pkgs=None, args=None, self_update=False, snapshot=None): ++ """Remove individual packages ++ ++ Removes installed software. See the remove description in the ++ "Package Management Commands" section of zypper's man page for all ++ available arguments. ++ ++ pkg ++ Package name to install ++ ++ pkgs ++ List of packages names to install ++ ++ args ++ String or list of extra parameters for zypper ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update pkg_remove pkg=vim snapshot="continue" ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.extend(["pkg", "remove"]) ++ cmd.extend(_pkg_params(pkg, pkgs, args)) ++ return _cmd(cmd) ++ ++ ++def pkg_update(pkg=None, pkgs=None, args=None, self_update=False, snapshot=None): ++ """Updates individual packages ++ ++ Update selected software. See the update description in the ++ "Update Management Commands" section of zypper's man page for all ++ available arguments. ++ ++ pkg ++ Package name to install ++ ++ pkgs ++ List of packages names to install ++ ++ args ++ String or list of extra parameters for zypper ++ ++ self_update ++ Check for newer transactional-update versions. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "continue" to indicate the last snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update pkg_update pkg=emacs snapshot="continue" ++ ++ """ ++ cmd = ["transactional-update"] ++ cmd.extend(_global_params(self_update=self_update, snapshot=snapshot)) ++ cmd.extend(["pkg", "update"]) ++ cmd.extend(_pkg_params(pkg, pkgs, args)) ++ return _cmd(cmd) ++ ++ ++def rollback(snapshot=None): ++ """Set the current, given or last working snapshot as default snapshot ++ ++ Sets the default root file system. On a read-only system the root ++ file system is set directly using btrfs. On read-write systems ++ snapper(8) rollback is called. ++ ++ If no snapshot number is given, the current root file system is ++ set as the new default root file system. Otherwise number can ++ either be a snapshot number (as displayed by snapper list) or the ++ word last. last will try to reset to the latest working snapshot. ++ ++ snapshot ++ Use the given snapshot or, if no number is given, the current ++ default snapshot as a base for the next snapshot. Use ++ "last" to indicate the last working snapshot done. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update rollback ++ ++ """ ++ if ( ++ snapshot ++ and isinstance(snapshot, str) ++ and snapshot != "last" ++ and not snapshot.isnumeric() ++ ): ++ raise salt.exceptions.CommandExecutionError( ++ "snapshot should be a number or 'last'" ++ ) ++ cmd = ["transactional-update"] ++ cmd.append("rollback") ++ if snapshot: ++ cmd.append(snapshot) ++ return _cmd(cmd) ++ ++ ++def pending_transaction(): ++ """Check if there is a pending transaction ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update pending_transaction ++ ++ """ ++ # If we are running inside a transaction, we do not have a good ++ # way yet to detect a pending transaction ++ if in_transaction(): ++ raise salt.exceptions.CommandExecutionError( ++ "pending_transaction cannot be executed inside a transaction" ++ ) ++ ++ cmd = ["snapper", "--no-dbus", "list", "--columns", "number"] ++ snapshots = _cmd(cmd) ++ ++ return any(snapshot.endswith("+") for snapshot in snapshots) ++ ++ ++def call(function, *args, **kwargs): ++ """Executes a Salt function inside a transaction. ++ ++ The chroot does not need to have Salt installed, but Python is ++ required. ++ ++ function ++ Salt execution module function ++ ++ activate_transaction ++ If at the end of the transaction there is a pending activation ++ (i.e there is a new snaphot in the system), a new reboot will ++ be scheduled (default False) ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update.call test.ping ++ salt microos transactional_update.call ssh.set_auth_key user key=mykey ++ salt microos transactional_update.call pkg.install emacs activate_transaction=True ++ ++ """ ++ ++ if not function: ++ raise salt.exceptions.CommandExecutionError("Missing function parameter") ++ ++ activate_transaction = kwargs.pop("activate_transaction", False) ++ ++ # Generate the salt-thin and create a temporary directory in a ++ # place that the new transaction will have access to, and where we ++ # can untar salt-thin ++ thin_path = __utils__["thin.gen_thin"]( ++ __opts__["cachedir"], ++ extra_mods=__salt__["config.option"]("thin_extra_mods", ""), ++ so_mods=__salt__["config.option"]("thin_so_mods", ""), ++ ) ++ thin_dest_path = tempfile.mkdtemp(dir=__opts__["cachedir"]) ++ # Some bug in Salt is preventing us to use `archive.tar` here. A ++ # AsyncZeroMQReqChannel is not closed at the end of the salt-call, ++ # and makes the client never exit. ++ # ++ # stdout = __salt__['archive.tar']('xzf', thin_path, dest=thin_dest_path) ++ # ++ stdout = __salt__["cmd.run"](["tar", "xzf", thin_path, "-C", thin_dest_path]) ++ if stdout: ++ __utils__["files.rm_rf"](thin_dest_path) ++ return {"result": False, "comment": stdout} ++ ++ try: ++ safe_kwargs = salt.utils.args.clean_kwargs(**kwargs) ++ salt_argv = ( ++ [ ++ "python{}".format(sys.version_info[0]), ++ os.path.join(thin_dest_path, "salt-call"), ++ "--metadata", ++ "--local", ++ "--log-file", ++ os.path.join(thin_dest_path, "log"), ++ "--cachedir", ++ os.path.join(thin_dest_path, "cache"), ++ "--out", ++ "json", ++ "-l", ++ "quiet", ++ "--", ++ function, ++ ] ++ + list(args) ++ + ["{}={}".format(k, v) for (k, v) in safe_kwargs.items()] ++ ) ++ try: ++ ret_stdout = run([str(x) for x in salt_argv], snapshot="continue") ++ except salt.exceptions.CommandExecutionError as e: ++ ret_stdout = e.message ++ ++ # Process "real" result in stdout ++ try: ++ data = __utils__["json.find_json"](ret_stdout) ++ local = data.get("local", data) ++ if isinstance(local, dict) and "retcode" in local: ++ __context__["retcode"] = local["retcode"] ++ return local.get("return", data) ++ except (KeyError, ValueError): ++ return {"result": False, "comment": ret_stdout} ++ finally: ++ __utils__["files.rm_rf"](thin_dest_path) ++ ++ # Check if reboot is needed ++ if activate_transaction and pending_transaction(): ++ reboot() ++ ++ ++def apply_(mods=None, **kwargs): ++ """Apply an state inside a transaction. ++ ++ This function will call `transactional_update.highstate` or ++ `transactional_update.sls` based on the arguments passed to this ++ function. It exists as a more intuitive way of applying states. ++ ++ For a formal description of the possible parameters accepted in ++ this function, check `state.apply_` documentation. ++ ++ activate_transaction ++ If at the end of the transaction there is a pending activation ++ (i.e there is a new snaphot in the system), a new reboot will ++ be scheduled (default False) ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update.apply ++ salt microos transactional_update.apply stuff ++ salt microos transactional_update.apply stuff pillar='{"foo": "bar"}' ++ salt microos transactional_update.apply stuff activate_transaction=True ++ ++ """ ++ if mods: ++ return sls(mods, **kwargs) ++ return highstate(**kwargs) ++ ++ ++def _create_and_execute_salt_state( ++ chunks, file_refs, test, hash_type, activate_transaction ++): ++ """Create the salt_state tarball, and execute it in a transaction""" ++ ++ # Create the tar containing the state pkg and relevant files. ++ salt.client.ssh.wrapper.state._cleanup_slsmod_low_data(chunks) ++ trans_tar = salt.client.ssh.state.prep_trans_tar( ++ salt.fileclient.get_file_client(__opts__), chunks, file_refs, __pillar__ ++ ) ++ trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, hash_type) ++ ++ ret = None ++ ++ # Create a temporary directory accesible later by the transaction ++ # where we can move the salt_state.tgz ++ salt_state_path = tempfile.mkdtemp(dir=__opts__["cachedir"]) ++ salt_state_path = os.path.join(salt_state_path, "salt_state.tgz") ++ try: ++ salt.utils.files.copyfile(trans_tar, salt_state_path) ++ ret = call( ++ "state.pkg", ++ salt_state_path, ++ test=test, ++ pkg_sum=trans_tar_sum, ++ hash_type=hash_type, ++ activate_transaction=activate_transaction, ++ ) ++ finally: ++ __utils__["files.rm_rf"](salt_state_path) ++ ++ return ret ++ ++ ++def sls( ++ mods, saltenv="base", test=None, exclude=None, activate_transaction=False, **kwargs ++): ++ """Execute the states in one or more SLS files inside a transaction. ++ ++ saltenv ++ Specify a salt fileserver environment to be used when applying ++ states ++ ++ mods ++ List of states to execute ++ ++ test ++ Run states in test-only (dry-run) mode ++ ++ exclude ++ Exclude specific states from execution. Accepts a list of sls ++ names, a comma-separated string of sls names, or a list of ++ dictionaries containing ``sls`` or ``id`` keys. Glob-patterns ++ may be used to match multiple states. ++ ++ activate_transaction ++ If at the end of the transaction there is a pending activation ++ (i.e there is a new snaphot in the system), a new reboot will ++ be scheduled (default False) ++ ++ For a formal description of the possible parameters accepted in ++ this function, check `state.sls` documentation. ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update.sls stuff pillar='{"foo": "bar"}' ++ salt microos transactional_update.sls stuff activate_transaction=True ++ ++ """ ++ # Get a copy of the pillar data, to avoid overwriting the current ++ # pillar, instead the one delegated ++ pillar = copy.deepcopy(__pillar__) ++ pillar.update(kwargs.get("pillar", {})) ++ ++ # Clone the options data and apply some default values. May not be ++ # needed, as this module just delegate ++ opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) ++ st_ = salt.client.ssh.state.SSHHighState( ++ opts, pillar, __salt__, salt.fileclient.get_file_client(__opts__) ++ ) ++ ++ if isinstance(mods, str): ++ mods = mods.split(",") ++ ++ high_data, errors = st_.render_highstate({saltenv: mods}) ++ if exclude: ++ if isinstance(exclude, str): ++ exclude = exclude.split(",") ++ if "__exclude__" in high_data: ++ high_data["__exclude__"].extend(exclude) ++ else: ++ high_data["__exclude__"] = exclude ++ ++ high_data, ext_errors = st_.state.reconcile_extend(high_data) ++ errors += ext_errors ++ errors += st_.state.verify_high(high_data) ++ if errors: ++ return errors ++ ++ high_data, req_in_errors = st_.state.requisite_in(high_data) ++ errors += req_in_errors ++ if errors: ++ return errors ++ ++ high_data = st_.state.apply_exclude(high_data) ++ ++ # Compile and verify the raw chunks ++ chunks = st_.state.compile_high_data(high_data) ++ file_refs = salt.client.ssh.state.lowstate_file_refs( ++ chunks, ++ salt.client.ssh.wrapper.state._merge_extra_filerefs( ++ kwargs.get("extra_filerefs", ""), opts.get("extra_filerefs", "") ++ ), ++ ) ++ ++ hash_type = opts["hash_type"] ++ return _create_and_execute_salt_state( ++ chunks, file_refs, test, hash_type, activate_transaction ++ ) ++ ++ ++def highstate(activate_transaction=False, **kwargs): ++ """Retrieve the state data from the salt master for this minion and ++ execute it inside a transaction. ++ ++ For a formal description of the possible parameters accepted in ++ this function, check `state.highstate` documentation. ++ ++ activate_transaction ++ If at the end of the transaction there is a pending activation ++ (i.e there is a new snaphot in the system), a new reboot will ++ be scheduled (default False) ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update.highstate ++ salt microos transactional_update.highstate pillar='{"foo": "bar"}' ++ salt microos transactional_update.highstate activate_transaction=True ++ ++ """ ++ # Get a copy of the pillar data, to avoid overwriting the current ++ # pillar, instead the one delegated ++ pillar = copy.deepcopy(__pillar__) ++ pillar.update(kwargs.get("pillar", {})) ++ ++ # Clone the options data and apply some default values. May not be ++ # needed, as this module just delegate ++ opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) ++ st_ = salt.client.ssh.state.SSHHighState( ++ opts, pillar, __salt__, salt.fileclient.get_file_client(__opts__) ++ ) ++ ++ # Compile and verify the raw chunks ++ chunks = st_.compile_low_chunks() ++ file_refs = salt.client.ssh.state.lowstate_file_refs( ++ chunks, ++ salt.client.ssh.wrapper.state._merge_extra_filerefs( ++ kwargs.get("extra_filerefs", ""), opts.get("extra_filerefs", "") ++ ), ++ ) ++ # Check for errors ++ for chunk in chunks: ++ if not isinstance(chunk, dict): ++ __context__["retcode"] = 1 ++ return chunks ++ ++ test = kwargs.pop("test", False) ++ hash_type = opts["hash_type"] ++ return _create_and_execute_salt_state( ++ chunks, file_refs, test, hash_type, activate_transaction ++ ) ++ ++ ++def single(fun, name, test=None, activate_transaction=False, **kwargs): ++ """Execute a single state function with the named kwargs, returns ++ False if insufficient data is sent to the command ++ ++ By default, the values of the kwargs will be parsed as YAML. So, ++ you can specify lists values, or lists of single entry key-value ++ maps, as you would in a YAML salt file. Alternatively, JSON format ++ of keyword values is also supported. ++ ++ activate_transaction ++ If at the end of the transaction there is a pending activation ++ (i.e there is a new snaphot in the system), a new reboot will ++ be scheduled (default False) ++ ++ CLI Example: ++ ++ .. code-block:: bash ++ ++ salt microos transactional_update.single pkg.installed name=emacs ++ salt microos transactional_update.single pkg.installed name=emacs activate_transaction=True ++ ++ """ ++ # Get a copy of the pillar data, to avoid overwriting the current ++ # pillar, instead the one delegated ++ pillar = copy.deepcopy(__pillar__) ++ pillar.update(kwargs.get("pillar", {})) ++ ++ # Clone the options data and apply some default values. May not be ++ # needed, as this module just delegate ++ opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) ++ st_ = salt.client.ssh.state.SSHState(opts, pillar) ++ ++ # state.fun -> [state, fun] ++ comps = fun.split(".") ++ if len(comps) < 2: ++ __context__["retcode"] = 1 ++ return "Invalid function passed" ++ ++ # Create the low chunk, using kwargs as a base ++ kwargs.update({"state": comps[0], "fun": comps[1], "__id__": name, "name": name}) ++ ++ # Verify the low chunk ++ err = st_.verify_data(kwargs) ++ if err: ++ __context__["retcode"] = 1 ++ return err ++ ++ # Must be a list of low-chunks ++ chunks = [kwargs] ++ ++ # Retrieve file refs for the state run, so we can copy relevant ++ # files down to the minion before executing the state ++ file_refs = salt.client.ssh.state.lowstate_file_refs( ++ chunks, ++ salt.client.ssh.wrapper.state._merge_extra_filerefs( ++ kwargs.get("extra_filerefs", ""), opts.get("extra_filerefs", "") ++ ), ++ ) ++ ++ hash_type = opts["hash_type"] ++ return _create_and_execute_salt_state( ++ chunks, file_refs, test, hash_type, activate_transaction ++ ) +diff --git a/salt/utils/systemd.py b/salt/utils/systemd.py +index 060bc1e3fb..674b6d419f 100644 +--- a/salt/utils/systemd.py ++++ b/salt/utils/systemd.py +@@ -11,6 +11,7 @@ import subprocess + + # Import Salt libs + from salt.exceptions import SaltInvocationError ++import salt.utils.path + import salt.utils.stringutils + + log = logging.getLogger(__name__) +@@ -47,6 +48,27 @@ def booted(context=None): + return ret + + ++def offline(context=None): ++ """Return True is systemd is in offline mode""" ++ contextkey = "salt.utils.systemd.offline" ++ if isinstance(context, dict): ++ if contextkey in context: ++ return context[contextkey] ++ elif context is not None: ++ raise SaltInvocationError("context must be a dictionary if passed") ++ ++ # Note that there is a difference from SYSTEMD_OFFLINE=1. Here we ++ # assume that there is no PID 1 to talk with. ++ ret = not booted(context) and salt.utils.path.which("systemctl") ++ ++ try: ++ context[contextkey] = ret ++ except TypeError: ++ pass ++ ++ return ret ++ ++ + def version(context=None): + ''' + Attempts to run systemctl --version. Returns None if unable to determine +diff --git a/tests/unit/modules/test_chroot.py b/tests/unit/modules/test_chroot.py +index de3041e98f..62808ed680 100644 +--- a/tests/unit/modules/test_chroot.py ++++ b/tests/unit/modules/test_chroot.py +@@ -31,6 +31,9 @@ from __future__ import absolute_import, print_function, unicode_literals + import sys + + # Import Salt Testing Libs ++import salt.modules.chroot as chroot ++import salt.utils.platform ++from salt.exceptions import CommandExecutionError + from tests.support.mixins import LoaderModuleMockMixin + from tests.support.unit import skipIf, TestCase + from tests.support.mock import MagicMock, patch +@@ -80,6 +83,18 @@ class ChrootTestCase(TestCase, LoaderModuleMockMixin): + self.assertTrue(chroot.create('/chroot')) + makedirs.assert_called() + ++ @patch("salt.modules.chroot.exist") ++ @patch("salt.utils.files.fopen") ++ def test_in_chroot(self, fopen): ++ """ ++ Test the detection of chroot environment. ++ """ ++ matrix = (("a", "b", True), ("a", "a", False)) ++ for root_mountinfo, self_mountinfo, result in matrix: ++ fopen.return_value.__enter__.return_value = fopen ++ fopen.read = MagicMock(side_effect=(root_mountinfo, self_mountinfo)) ++ self.assertEqual(chroot.in_chroot(), result) ++ + @patch('salt.modules.chroot.exist') + def test_call_fails_input_validation(self, exist): + ''' +diff --git a/tests/unit/modules/test_rebootmgr.py b/tests/unit/modules/test_rebootmgr.py +new file mode 100644 +index 0000000000..4cf573997c +--- /dev/null ++++ b/tests/unit/modules/test_rebootmgr.py +@@ -0,0 +1,304 @@ ++import pytest ++import salt.modules.rebootmgr as rebootmgr ++from salt.exceptions import CommandExecutionError ++ ++# Import Salt Testing Libs ++from tests.support.mixins import LoaderModuleMockMixin ++from tests.support.mock import MagicMock, patch ++from tests.support.unit import TestCase ++ ++ ++class RebootMgrTestCase(TestCase, LoaderModuleMockMixin): ++ """ ++ Test cases for salt.modules.rebootmgr ++ """ ++ ++ def setup_loader_modules(self): ++ return {rebootmgr: {"__salt__": {}, "__utils__": {}}} ++ ++ def test_version(self): ++ """ ++ Test rebootmgr.version without parameters ++ """ ++ version = "rebootmgrctl (rebootmgr) 1.3" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": version, "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.version() == "1.3" ++ salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "--version"]) ++ ++ def test_is_active(self): ++ """ ++ Test rebootmgr.is_active without parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": None, "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.is_active() ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "is_active", "--quiet"] ++ ) ++ ++ def test_reboot(self): ++ """ ++ Test rebootmgr.reboot without parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.reboot() == "output" ++ salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "reboot"]) ++ ++ def test_reboot_order(self): ++ """ ++ Test rebootmgr.reboot with order parameter ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.reboot("now") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "reboot", "now"] ++ ) ++ ++ def test_reboot_invalid(self): ++ """ ++ Test rebootmgr.reboot with invalid parameter ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ with pytest.raises(CommandExecutionError): ++ rebootmgr.reboot("invalid") ++ ++ def test_cancel(self): ++ """ ++ Test rebootmgr.cancel without parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.cancel() == "output" ++ salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "cancel"]) ++ ++ def test_status(self): ++ """ ++ Test rebootmgr.status without parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ # 0 - No reboot requested ++ assert rebootmgr.status() == 0 ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "status", "--quiet"] ++ ) ++ ++ def test_set_strategy_default(self): ++ """ ++ Test rebootmgr.set_strategy without parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.set_strategy() == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "set-strategy"] ++ ) ++ ++ def test_set_strategy(self): ++ """ ++ Test rebootmgr.set_strategy with strategy parameter ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.set_strategy("best-effort") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "set-strategy", "best-effort"] ++ ) ++ ++ def test_set_strategy_invalid(self): ++ """ ++ Test rebootmgr.strategy with invalid parameter ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ with pytest.raises(CommandExecutionError): ++ rebootmgr.set_strategy("invalid") ++ ++ def test_get_strategy(self): ++ """ ++ Test rebootmgr.get_strategy without parameters ++ """ ++ strategy = "Reboot strategy: best-effort" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": strategy, "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.get_strategy() == "best-effort" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "get-strategy"] ++ ) ++ ++ def test_set_window(self): ++ """ ++ Test rebootmgr.set_window with parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.set_window("Thu,Fri 2020-*-1,5 11:12:13", "1h") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "set-window", "Thu,Fri 2020-*-1,5 11:12:13", "1h"] ++ ) ++ ++ def test_get_window(self): ++ """ ++ Test rebootmgr.get_window without parameters ++ """ ++ window = "Maintenance window is set to *-*-* 03:30:00, lasting 01h30m." ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": window, "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.get_window() == { ++ "time": "*-*-* 03:30:00", ++ "duration": "01h30m", ++ } ++ salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "get-window"]) ++ ++ def test_set_group(self): ++ """ ++ Test rebootmgr.set_group with parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.set_group("group1") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "set-group", "group1"] ++ ) ++ ++ def test_get_group(self): ++ """ ++ Test rebootmgr.get_group without parameters ++ """ ++ group = "Etcd lock group is set to group1" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": group, "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.get_group() == "group1" ++ salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "get-group"]) ++ ++ def test_set_max(self): ++ """ ++ Test rebootmgr.set_max with default parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.set_max(10) == "output" ++ salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "set-max", 10]) ++ ++ def test_set_max_group(self): ++ """ ++ Test rebootmgr.set_max with group parameter ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.set_max(10, "group1") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "set-max", "--group", "group1", 10] ++ ) ++ ++ def test_lock(self): ++ """ ++ Test rebootmgr.lock without parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.lock() == "output" ++ salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "lock"]) ++ ++ def test_lock_machine_id(self): ++ """ ++ Test rebootmgr.lock with machine_id parameter ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.lock("machine-id") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "lock", "machine-id"] ++ ) ++ ++ def test_lock_machine_id_group(self): ++ """ ++ Test rebootmgr.lock with machine_id and group parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.lock("machine-id", "group1") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "lock", "--group", "group1", "machine-id"] ++ ) ++ ++ def test_unlock(self): ++ """ ++ Test rebootmgr.unlock without parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.unlock() == "output" ++ salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "unlock"]) ++ ++ def test_unlock_machine_id(self): ++ """ ++ Test rebootmgr.unlock with machine_id parameter ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.unlock("machine-id") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "unlock", "machine-id"] ++ ) ++ ++ def test_unlock_machine_id_group(self): ++ """ ++ Test rebootmgr.unlock with machine_id and group parameters ++ """ ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(rebootmgr.__salt__, salt_mock): ++ assert rebootmgr.unlock("machine-id", "group1") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["rebootmgrctl", "unlock", "--group", "group1", "machine-id"] ++ ) +diff --git a/tests/unit/modules/test_transactional_update.py b/tests/unit/modules/test_transactional_update.py +new file mode 100644 +index 0000000000..b42734a53d +--- /dev/null ++++ b/tests/unit/modules/test_transactional_update.py +@@ -0,0 +1,683 @@ ++import sys ++ ++import pytest ++import salt.modules.transactional_update as tu ++import salt.utils.platform ++from salt.exceptions import CommandExecutionError ++ ++# Import Salt Testing Libs ++from tests.support.mixins import LoaderModuleMockMixin ++from tests.support.mock import MagicMock, patch ++from tests.support.unit import TestCase, skipIf ++ ++ ++@skipIf(salt.utils.platform.is_windows(), "Do not run these tests on Windows") ++class TransactionalUpdateTestCase(TestCase, LoaderModuleMockMixin): ++ """ ++ Test cases for salt.modules.transactional_update ++ """ ++ ++ def setup_loader_modules(self): ++ return {tu: {"__salt__": {}, "__utils__": {}}} ++ ++ def test__global_params_no_self_update(self): ++ """Test transactional_update._global_params without self_update""" ++ assert tu._global_params(self_update=False) == [ ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ ] ++ ++ def test__global_params_self_update(self): ++ """Test transactional_update._global_params with self_update""" ++ assert tu._global_params(self_update=True) == [ ++ "--non-interactive", ++ "--drop-if-no-change", ++ ] ++ ++ def test__global_params_no_self_update_snapshot(self): ++ """Test transactional_update._global_params without self_update and ++ snapshot ++ ++ """ ++ assert tu._global_params(self_update=False, snapshot=10) == [ ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--continue", ++ 10, ++ ] ++ ++ def test__global_params_no_self_update_continue(self): ++ """Test transactional_update._global_params without self_update and ++ snapshot conitue ++ ++ """ ++ assert tu._global_params(self_update=False, snapshot="continue") == [ ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--continue", ++ ] ++ ++ def test__pkg_params_no_packages(self): ++ """Test transactional_update._pkg_params without packages""" ++ with pytest.raises(CommandExecutionError): ++ tu._pkg_params(pkg=None, pkgs=None, args=None) ++ ++ def test__pkg_params_pkg(self): ++ """Test transactional_update._pkg_params with single package""" ++ assert tu._pkg_params(pkg="pkg1", pkgs=None, args=None) == ["pkg1"] ++ ++ def test__pkg_params_pkgs(self): ++ """Test transactional_update._pkg_params with packages""" ++ assert tu._pkg_params(pkg=None, pkgs="pkg1", args=None) == ["pkg1"] ++ assert tu._pkg_params(pkg=None, pkgs="pkg1 pkg2 ", args=None) == [ ++ "pkg1", ++ "pkg2", ++ ] ++ assert tu._pkg_params(pkg=None, pkgs=["pkg1", "pkg2"], args=None) == [ ++ "pkg1", ++ "pkg2", ++ ] ++ ++ def test__pkg_params_pkg_pkgs(self): ++ """Test transactional_update._pkg_params with packages""" ++ assert tu._pkg_params(pkg="pkg1", pkgs="pkg2", args=None) == [ ++ "pkg1", ++ "pkg2", ++ ] ++ ++ def test__pkg_params_args(self): ++ """Test transactional_update._pkg_params with argumens""" ++ assert tu._pkg_params(pkg="pkg1", pkgs=None, args="--arg1") == [ ++ "--arg1", ++ "pkg1", ++ ] ++ assert tu._pkg_params(pkg="pkg1", pkgs=None, args="--arg1 --arg2") == [ ++ "--arg1", ++ "--arg2", ++ "pkg1", ++ ] ++ assert tu._pkg_params(pkg="pkg1", pkgs=None, args=["--arg1", "--arg2"]) == [ ++ "--arg1", ++ "--arg2", ++ "pkg1", ++ ] ++ ++ def test_transactional_transactional(self): ++ """Test transactional_update.transactional""" ++ matrix = (("/usr/sbin/transactional-update", True), ("", False)) ++ ++ for path_which, result in matrix: ++ utils_mock = {"path.which": MagicMock(return_value=path_which)} ++ ++ with patch.dict(tu.__utils__, utils_mock): ++ assert tu.transactional() is result ++ utils_mock["path.which"].assert_called_with("transactional-update") ++ ++ def test_in_transaction(self): ++ """Test transactional_update.in_transaction""" ++ matrix = ( ++ ("/usr/sbin/transactional-update", True, True), ++ ("/usr/sbin/transactional-update", False, False), ++ ("", True, False), ++ ("", False, False), ++ ) ++ ++ for path_which, in_chroot, result in matrix: ++ utils_mock = {"path.which": MagicMock(return_value=path_which)} ++ salt_mock = {"chroot.in_chroot": MagicMock(return_value=in_chroot)} ++ ++ with patch.dict(tu.__utils__, utils_mock): ++ with patch.dict(tu.__salt__, salt_mock): ++ assert tu.in_transaction() is result ++ ++ def test_commands_with_global_params(self): ++ """Test commands that only accept global params""" ++ for cmd in [ ++ "cleanup", ++ "cleanup_snapshots", ++ "cleanup_overlays", ++ "grub_cfg", ++ "bootloader", ++ "initrd", ++ "kdump", ++ "reboot", ++ "dup", ++ "up", ++ "patch", ++ "migration", ++ ]: ++ salt_mock = { ++ "cmd.run_all": MagicMock( ++ return_value={"stdout": "output", "retcode": 0} ++ ) ++ } ++ with patch.dict(tu.__salt__, salt_mock): ++ assert getattr(tu, cmd)() == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ cmd.replace("_", ".") ++ if cmd.startswith("grub") ++ else cmd.replace("_", "-"), ++ ] ++ ) ++ ++ def test_run_error(self): ++ """Test transactional_update.run with missing command""" ++ with pytest.raises(CommandExecutionError): ++ tu.run(None) ++ ++ def test_run_string(self): ++ """Test transactional_update.run with command as string""" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(tu.__salt__, salt_mock): ++ assert tu.run("cmd --flag p1 p2") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--quiet", ++ "run", ++ "cmd", ++ "--flag", ++ "p1", ++ "p2", ++ ] ++ ) ++ ++ def test_run_array(self): ++ """Test transactional_update.run with command as array""" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(tu.__salt__, salt_mock): ++ assert tu.run(["cmd", "--flag", "p1", "p2"]) == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--quiet", ++ "run", ++ "cmd", ++ "--flag", ++ "p1", ++ "p2", ++ ] ++ ) ++ ++ def test_pkg_commands(self): ++ """Test transactional_update.pkg_* commands""" ++ for cmd in ["pkg_install", "pkg_remove", "pkg_update"]: ++ salt_mock = { ++ "cmd.run_all": MagicMock( ++ return_value={"stdout": "output", "retcode": 0} ++ ) ++ } ++ with patch.dict(tu.__salt__, salt_mock): ++ assert getattr(tu, cmd)("pkg1", "pkg2 pkg3", "--arg") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "pkg", ++ cmd.replace("pkg_", ""), ++ "--arg", ++ "pkg1", ++ "pkg2", ++ "pkg3", ++ ] ++ ) ++ ++ def test_rollback_error(self): ++ """Test transactional_update.rollback with wrong snapshot""" ++ with pytest.raises(CommandExecutionError): ++ tu.rollback("error") ++ ++ def test_rollback_default(self): ++ """Test transactional_update.rollback with default snapshot""" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(tu.__salt__, salt_mock): ++ assert tu.rollback() == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["transactional-update", "rollback"] ++ ) ++ ++ def test_rollback_snapshot_number(self): ++ """Test transactional_update.rollback with numeric snapshot""" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(tu.__salt__, salt_mock): ++ assert tu.rollback(10) == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["transactional-update", "rollback", 10] ++ ) ++ ++ def test_rollback_snapshot_str(self): ++ """Test transactional_update.rollback with string snapshot""" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(tu.__salt__, salt_mock): ++ assert tu.rollback("10") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["transactional-update", "rollback", "10"] ++ ) ++ ++ def test_rollback_last(self): ++ """Test transactional_update.rollback with last snapshot""" ++ salt_mock = { ++ "cmd.run_all": MagicMock(return_value={"stdout": "output", "retcode": 0}) ++ } ++ with patch.dict(tu.__salt__, salt_mock): ++ assert tu.rollback("last") == "output" ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["transactional-update", "rollback", "last"] ++ ) ++ ++ def test_pending_transaction(self): ++ """Test transactional_update.pending_transaction""" ++ matrix = ( ++ (False, ["1", "2+", "3-"], True), ++ (False, ["1", "2-", "3+"], True), ++ (False, ["1", "2", "3*"], False), ++ ) ++ ++ for in_transaction, snapshots, result in matrix: ++ salt_mock = { ++ "cmd.run_all": MagicMock( ++ return_value={"stdout": snapshots, "retcode": 0} ++ ) ++ } ++ ++ tu_in_transaction = "salt.modules.transactional_update.in_transaction" ++ with patch(tu_in_transaction) as in_transaction_mock: ++ in_transaction_mock.return_value = in_transaction ++ with patch.dict(tu.__salt__, salt_mock): ++ assert tu.pending_transaction() is result ++ salt_mock["cmd.run_all"].assert_called_with( ++ ["snapper", "--no-dbus", "list", "--columns", "number"] ++ ) ++ ++ def test_pending_transaction_in_transaction(self): ++ """Test transactional_update.pending_transaction when in transaction""" ++ tu_in_transaction = "salt.modules.transactional_update.in_transaction" ++ with patch(tu_in_transaction) as in_transaction_mock: ++ in_transaction_mock.return_value = True ++ with pytest.raises(CommandExecutionError): ++ tu.pending_transaction() ++ ++ def test_call_fails_input_validation(self): ++ """Test transactional_update.call missing function name""" ++ with pytest.raises(CommandExecutionError): ++ tu.call("") ++ ++ @patch("tempfile.mkdtemp") ++ def test_call_fails_untar(self, mkdtemp): ++ """Test transactional_update.call when tar fails""" ++ mkdtemp.return_value = "/var/cache/salt/minion/tmp01" ++ utils_mock = { ++ "thin.gen_thin": MagicMock(return_value="/salt-thin.tgz"), ++ "files.rm_rf": MagicMock(), ++ } ++ opts_mock = {"cachedir": "/var/cache/salt/minion"} ++ salt_mock = { ++ "cmd.run": MagicMock(return_value="Error"), ++ "config.option": MagicMock(), ++ } ++ with patch.dict(tu.__utils__, utils_mock), patch.dict( ++ tu.__opts__, opts_mock ++ ), patch.dict(tu.__salt__, salt_mock): ++ assert tu.call("/chroot", "test.ping") == { ++ "result": False, ++ "comment": "Error", ++ } ++ ++ utils_mock["thin.gen_thin"].assert_called_once() ++ salt_mock["config.option"].assert_called() ++ salt_mock["cmd.run"].assert_called_once() ++ utils_mock["files.rm_rf"].assert_called_once() ++ ++ @patch("tempfile.mkdtemp") ++ def test_call_fails_salt_thin(self, mkdtemp): ++ """Test transactional_update.chroot when fails salt_thin""" ++ mkdtemp.return_value = "/var/cache/salt/minion/tmp01" ++ utils_mock = { ++ "thin.gen_thin": MagicMock(return_value="/salt-thin.tgz"), ++ "files.rm_rf": MagicMock(), ++ "json.find_json": MagicMock(side_effect=ValueError()), ++ } ++ opts_mock = {"cachedir": "/var/cache/salt/minion"} ++ salt_mock = { ++ "cmd.run": MagicMock(return_value=""), ++ "config.option": MagicMock(), ++ "cmd.run_all": MagicMock(return_value={"retcode": 1, "stderr": "Error"}), ++ } ++ with patch.dict(tu.__utils__, utils_mock), patch.dict( ++ tu.__opts__, opts_mock ++ ), patch.dict(tu.__salt__, salt_mock): ++ assert tu.call("test.ping") == {"result": False, "comment": "Error"} ++ ++ utils_mock["thin.gen_thin"].assert_called_once() ++ salt_mock["config.option"].assert_called() ++ salt_mock["cmd.run"].assert_called_once() ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--continue", ++ "--quiet", ++ "run", ++ "python{}".format(sys.version_info[0]), ++ "/var/cache/salt/minion/tmp01/salt-call", ++ "--metadata", ++ "--local", ++ "--log-file", ++ "/var/cache/salt/minion/tmp01/log", ++ "--cachedir", ++ "/var/cache/salt/minion/tmp01/cache", ++ "--out", ++ "json", ++ "-l", ++ "quiet", ++ "--", ++ "test.ping", ++ ] ++ ) ++ utils_mock["files.rm_rf"].assert_called_once() ++ ++ @patch("tempfile.mkdtemp") ++ def test_call_fails_function(self, mkdtemp): ++ """Test transactional_update.chroot when fails the function""" ++ mkdtemp.return_value = "/var/cache/salt/minion/tmp01" ++ utils_mock = { ++ "thin.gen_thin": MagicMock(return_value="/salt-thin.tgz"), ++ "files.rm_rf": MagicMock(), ++ "json.find_json": MagicMock(side_effect=ValueError()), ++ } ++ opts_mock = {"cachedir": "/var/cache/salt/minion"} ++ salt_mock = { ++ "cmd.run": MagicMock(return_value=""), ++ "config.option": MagicMock(), ++ "cmd.run_all": MagicMock( ++ return_value={"retcode": 0, "stdout": "Not found", "stderr": ""} ++ ), ++ } ++ with patch.dict(tu.__utils__, utils_mock), patch.dict( ++ tu.__opts__, opts_mock ++ ), patch.dict(tu.__salt__, salt_mock): ++ assert tu.call("test.ping") == {"result": False, "comment": "Not found"} ++ ++ utils_mock["thin.gen_thin"].assert_called_once() ++ salt_mock["config.option"].assert_called() ++ salt_mock["cmd.run"].assert_called_once() ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--continue", ++ "--quiet", ++ "run", ++ "python{}".format(sys.version_info[0]), ++ "/var/cache/salt/minion/tmp01/salt-call", ++ "--metadata", ++ "--local", ++ "--log-file", ++ "/var/cache/salt/minion/tmp01/log", ++ "--cachedir", ++ "/var/cache/salt/minion/tmp01/cache", ++ "--out", ++ "json", ++ "-l", ++ "quiet", ++ "--", ++ "test.ping", ++ ] ++ ) ++ utils_mock["files.rm_rf"].assert_called_once() ++ ++ @patch("tempfile.mkdtemp") ++ def test_call_success_no_reboot(self, mkdtemp): ++ """Test transactional_update.chroot when succeed""" ++ mkdtemp.return_value = "/var/cache/salt/minion/tmp01" ++ utils_mock = { ++ "thin.gen_thin": MagicMock(return_value="/salt-thin.tgz"), ++ "files.rm_rf": MagicMock(), ++ "json.find_json": MagicMock(return_value={"return": "result"}), ++ } ++ opts_mock = {"cachedir": "/var/cache/salt/minion"} ++ salt_mock = { ++ "cmd.run": MagicMock(return_value=""), ++ "config.option": MagicMock(), ++ "cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": ""}), ++ } ++ with patch.dict(tu.__utils__, utils_mock), patch.dict( ++ tu.__opts__, opts_mock ++ ), patch.dict(tu.__salt__, salt_mock): ++ assert tu.call("test.ping") == "result" ++ ++ utils_mock["thin.gen_thin"].assert_called_once() ++ salt_mock["config.option"].assert_called() ++ salt_mock["cmd.run"].assert_called_once() ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--continue", ++ "--quiet", ++ "run", ++ "python{}".format(sys.version_info[0]), ++ "/var/cache/salt/minion/tmp01/salt-call", ++ "--metadata", ++ "--local", ++ "--log-file", ++ "/var/cache/salt/minion/tmp01/log", ++ "--cachedir", ++ "/var/cache/salt/minion/tmp01/cache", ++ "--out", ++ "json", ++ "-l", ++ "quiet", ++ "--", ++ "test.ping", ++ ] ++ ) ++ utils_mock["files.rm_rf"].assert_called_once() ++ ++ @patch("salt.modules.transactional_update.reboot") ++ @patch("salt.modules.transactional_update.pending_transaction") ++ @patch("tempfile.mkdtemp") ++ def test_call_success_reboot(self, mkdtemp, pending_transaction, reboot): ++ """Test transactional_update.chroot when succeed and reboot""" ++ mkdtemp.return_value = "/var/cache/salt/minion/tmp01" ++ pending_transaction.return_value = True ++ utils_mock = { ++ "thin.gen_thin": MagicMock(return_value="/salt-thin.tgz"), ++ "files.rm_rf": MagicMock(), ++ "json.find_json": MagicMock(return_value={"return": "result"}), ++ } ++ opts_mock = {"cachedir": "/var/cache/salt/minion"} ++ salt_mock = { ++ "cmd.run": MagicMock(return_value=""), ++ "config.option": MagicMock(), ++ "cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": ""}), ++ } ++ with patch.dict(tu.__utils__, utils_mock), patch.dict( ++ tu.__opts__, opts_mock ++ ), patch.dict(tu.__salt__, salt_mock): ++ assert ( ++ tu.call("transactional_update.dup", activate_transaction=True) ++ == "result" ++ ) ++ ++ utils_mock["thin.gen_thin"].assert_called_once() ++ salt_mock["config.option"].assert_called() ++ salt_mock["cmd.run"].assert_called_once() ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--continue", ++ "--quiet", ++ "run", ++ "python{}".format(sys.version_info[0]), ++ "/var/cache/salt/minion/tmp01/salt-call", ++ "--metadata", ++ "--local", ++ "--log-file", ++ "/var/cache/salt/minion/tmp01/log", ++ "--cachedir", ++ "/var/cache/salt/minion/tmp01/cache", ++ "--out", ++ "json", ++ "-l", ++ "quiet", ++ "--", ++ "transactional_update.dup", ++ ] ++ ) ++ utils_mock["files.rm_rf"].assert_called_once() ++ pending_transaction.assert_called_once() ++ reboot.assert_called_once() ++ ++ @patch("tempfile.mkdtemp") ++ def test_call_success_parameters(self, mkdtemp): ++ """Test transactional_update.chroot when succeed with parameters""" ++ mkdtemp.return_value = "/var/cache/salt/minion/tmp01" ++ utils_mock = { ++ "thin.gen_thin": MagicMock(return_value="/salt-thin.tgz"), ++ "files.rm_rf": MagicMock(), ++ "json.find_json": MagicMock(return_value={"return": "result"}), ++ } ++ opts_mock = {"cachedir": "/var/cache/salt/minion"} ++ salt_mock = { ++ "cmd.run": MagicMock(return_value=""), ++ "config.option": MagicMock(), ++ "cmd.run_all": MagicMock(return_value={"retcode": 0, "stdout": ""}), ++ } ++ with patch.dict(tu.__utils__, utils_mock), patch.dict( ++ tu.__opts__, opts_mock ++ ), patch.dict(tu.__salt__, salt_mock): ++ assert tu.call("module.function", key="value") == "result" ++ ++ utils_mock["thin.gen_thin"].assert_called_once() ++ salt_mock["config.option"].assert_called() ++ salt_mock["cmd.run"].assert_called_once() ++ salt_mock["cmd.run_all"].assert_called_with( ++ [ ++ "transactional-update", ++ "--non-interactive", ++ "--drop-if-no-change", ++ "--no-selfupdate", ++ "--continue", ++ "--quiet", ++ "run", ++ "python{}".format(sys.version_info[0]), ++ "/var/cache/salt/minion/tmp01/salt-call", ++ "--metadata", ++ "--local", ++ "--log-file", ++ "/var/cache/salt/minion/tmp01/log", ++ "--cachedir", ++ "/var/cache/salt/minion/tmp01/cache", ++ "--out", ++ "json", ++ "-l", ++ "quiet", ++ "--", ++ "module.function", ++ "key=value", ++ ] ++ ) ++ utils_mock["files.rm_rf"].assert_called_once() ++ ++ @patch("salt.modules.transactional_update._create_and_execute_salt_state") ++ @patch("salt.client.ssh.state.SSHHighState") ++ @patch("salt.fileclient.get_file_client") ++ @patch("salt.utils.state.get_sls_opts") ++ def test_sls( ++ self, ++ get_sls_opts, ++ get_file_client, ++ SSHHighState, ++ _create_and_execute_salt_state, ++ ): ++ """Test transactional_update.sls""" ++ SSHHighState.return_value = SSHHighState ++ SSHHighState.render_highstate.return_value = (None, []) ++ SSHHighState.state.reconcile_extend.return_value = (None, []) ++ SSHHighState.state.requisite_in.return_value = (None, []) ++ SSHHighState.state.verify_high.return_value = [] ++ ++ _create_and_execute_salt_state.return_value = "result" ++ opts_mock = { ++ "hash_type": "md5", ++ } ++ get_sls_opts.return_value = opts_mock ++ with patch.dict(tu.__opts__, opts_mock): ++ assert tu.sls("module") == "result" ++ _create_and_execute_salt_state.assert_called_once() ++ ++ @patch("salt.modules.transactional_update._create_and_execute_salt_state") ++ @patch("salt.client.ssh.state.SSHHighState") ++ @patch("salt.fileclient.get_file_client") ++ @patch("salt.utils.state.get_sls_opts") ++ def test_highstate( ++ self, ++ get_sls_opts, ++ get_file_client, ++ SSHHighState, ++ _create_and_execute_salt_state, ++ ): ++ """Test transactional_update.highstage""" ++ SSHHighState.return_value = SSHHighState ++ ++ _create_and_execute_salt_state.return_value = "result" ++ opts_mock = { ++ "hash_type": "md5", ++ } ++ get_sls_opts.return_value = opts_mock ++ with patch.dict(tu.__opts__, opts_mock): ++ assert tu.highstate() == "result" ++ _create_and_execute_salt_state.assert_called_once() ++ ++ @patch("salt.modules.transactional_update._create_and_execute_salt_state") ++ @patch("salt.client.ssh.state.SSHState") ++ @patch("salt.utils.state.get_sls_opts") ++ def test_single(self, get_sls_opts, SSHState, _create_and_execute_salt_state): ++ """Test transactional_update.single""" ++ SSHState.return_value = SSHState ++ SSHState.verify_data.return_value = None ++ ++ _create_and_execute_salt_state.return_value = "result" ++ opts_mock = { ++ "hash_type": "md5", ++ } ++ get_sls_opts.return_value = opts_mock ++ with patch.dict(tu.__opts__, opts_mock): ++ assert tu.single("pkg.installed", name="emacs") == "result" ++ _create_and_execute_salt_state.assert_called_once() +-- +2.28.0 + + diff --git a/xen-disk-fixes-264.patch b/xen-disk-fixes-264.patch new file mode 100644 index 0000000..406385a --- /dev/null +++ b/xen-disk-fixes-264.patch @@ -0,0 +1,1120 @@ +From d260c5984d64fc8448a6adf8d5bf07ebb08e4126 Mon Sep 17 00:00:00 2001 +From: Cedric Bosdonnat +Date: Mon, 5 Oct 2020 15:50:44 +0200 +Subject: [PATCH] Xen disk fixes (#264) + +* virt: convert volumes to disks for xen + +The libvirt xen driver does not handle disk of 'volume' type. We thus +need to convert them into their equivalent using the 'file' or 'block' +type (issue #58333). + +* Add pool and volume names to virt._get_all_volumes_paths + +In order to avoid code duplication, extend the _get_all_volumes_path() +helper function to also provide the volume and pool names. + +* virt.get_disk: show pools and volumes if possible + +In some cases like Xen we have to change the volume disks into file or +block ones. Show pool/volumes informations in the virt.get_disk if +possible. + +* virt: use the pool path in case the volume doesn't exist + +When computing the volume path to generate the XML of a domain, the +volume may not exist yet. This happens typically during a virt.update +when generating the new XML to compare. + +In such cases, use the pool target path to compute the volume path. +--- + changelog/58333.fixed | 1 + + salt/modules/virt.py | 258 +++++++++++------- + salt/templates/virt/libvirt_disks.jinja | 12 + + salt/templates/virt/libvirt_domain.jinja | 17 +- + tests/pytests/unit/modules/virt/__init__.py | 0 + tests/pytests/unit/modules/virt/conftest.py | 191 +++++++++++++ + .../pytests/unit/modules/virt/test_domain.py | 256 +++++++++++++++++ + .../pytests/unit/modules/virt/test_helpers.py | 11 + + tests/unit/modules/test_virt.py | 180 ++++-------- + 9 files changed, 698 insertions(+), 228 deletions(-) + create mode 100644 changelog/58333.fixed + create mode 100644 salt/templates/virt/libvirt_disks.jinja + create mode 100644 tests/pytests/unit/modules/virt/__init__.py + create mode 100644 tests/pytests/unit/modules/virt/conftest.py + create mode 100644 tests/pytests/unit/modules/virt/test_domain.py + create mode 100644 tests/pytests/unit/modules/virt/test_helpers.py + +diff --git a/changelog/58333.fixed b/changelog/58333.fixed +new file mode 100644 +index 0000000000..f958d40964 +--- /dev/null ++++ b/changelog/58333.fixed +@@ -0,0 +1 @@ ++Convert disks of volume type to file or block disks on Xen +diff --git a/salt/modules/virt.py b/salt/modules/virt.py +index 4a8a55ced6..34643787f9 100644 +--- a/salt/modules/virt.py ++++ b/salt/modules/virt.py +@@ -459,6 +459,8 @@ def _get_disks(conn, dom): + """ + disks = {} + doc = ElementTree.fromstring(dom.XMLDesc(0)) ++ # Get the path, pool, volume name of each volume we can ++ all_volumes = _get_all_volumes_paths(conn) + for elem in doc.findall("devices/disk"): + source = elem.find("source") + if source is None: +@@ -471,13 +473,61 @@ def _get_disks(conn, dom): + extra_properties = None + if "dev" in target.attrib: + disk_type = elem.get("type") ++ ++ def _get_disk_volume_data(pool_name, volume_name): ++ qemu_target = "{}/{}".format(pool_name, volume_name) ++ pool = conn.storagePoolLookupByName(pool_name) ++ vol = pool.storageVolLookupByName(volume_name) ++ vol_info = vol.info() ++ extra_properties = { ++ "virtual size": vol_info[1], ++ "disk size": vol_info[2], ++ } ++ ++ backing_files = [ ++ { ++ "file": node.find("source").get("file"), ++ "file format": node.find("format").get("type"), ++ } ++ for node in elem.findall(".//backingStore[source]") ++ ] ++ ++ if backing_files: ++ # We had the backing files in a flat list, nest them again. ++ extra_properties["backing file"] = backing_files[0] ++ parent = extra_properties["backing file"] ++ for sub_backing_file in backing_files[1:]: ++ parent["backing file"] = sub_backing_file ++ parent = sub_backing_file ++ ++ else: ++ # In some cases the backing chain is not displayed by the domain definition ++ # Try to see if we have some of it in the volume definition. ++ vol_desc = ElementTree.fromstring(vol.XMLDesc()) ++ backing_path = vol_desc.find("./backingStore/path") ++ backing_format = vol_desc.find("./backingStore/format") ++ if backing_path is not None: ++ extra_properties["backing file"] = {"file": backing_path.text} ++ if backing_format is not None: ++ extra_properties["backing file"][ ++ "file format" ++ ] = backing_format.get("type") ++ return (qemu_target, extra_properties) ++ + if disk_type == "file": + qemu_target = source.get("file", "") + if qemu_target.startswith("/dev/zvol/"): + disks[target.get("dev")] = {"file": qemu_target, "zfs": True} + continue +- # Extract disk sizes, snapshots, backing files +- if elem.get("device", "disk") != "cdrom": ++ ++ if qemu_target in all_volumes.keys(): ++ # If the qemu_target is a known path, output a volume ++ volume = all_volumes[qemu_target] ++ qemu_target, extra_properties = _get_disk_volume_data( ++ volume["pool"], volume["name"] ++ ) ++ elif elem.get("device", "disk") != "cdrom": ++ # Extract disk sizes, snapshots, backing files + try: + stdout = subprocess.Popen( + [ +@@ -499,6 +549,12 @@ def _get_disks(conn, dom): + disk.update({"file": "Does not exist"}) + elif disk_type == "block": + qemu_target = source.get("dev", "") ++ # If the qemu_target is a known path, output a volume ++ if qemu_target in all_volumes.keys(): ++ volume = all_volumes[qemu_target] ++ qemu_target, extra_properties = _get_disk_volume_data( ++ volume["pool"], volume["name"] ++ ) + elif disk_type == "network": + qemu_target = source.get("protocol") + source_name = source.get("name") +@@ -537,43 +593,9 @@ def _get_disks(conn, dom): + elif disk_type == "volume": + pool_name = source.get("pool") + volume_name = source.get("volume") +- qemu_target = "{}/{}".format(pool_name, volume_name) +- pool = conn.storagePoolLookupByName(pool_name) +- vol = pool.storageVolLookupByName(volume_name) +- vol_info = vol.info() +- extra_properties = { +- "virtual size": vol_info[1], +- "disk size": vol_info[2], +- } +- +- backing_files = [ +- { +- "file": node.find("source").get("file"), +- "file format": node.find("format").get("type"), +- } +- for node in elem.findall(".//backingStore[source]") +- ] +- +- if backing_files: +- # We had the backing files in a flat list, nest them again. +- extra_properties["backing file"] = backing_files[0] +- parent = extra_properties["backing file"] +- for sub_backing_file in backing_files[1:]: +- parent["backing file"] = sub_backing_file +- parent = sub_backing_file +- +- else: +- # In some cases the backing chain is not displayed by the domain definition +- # Try to see if we have some of it in the volume definition. +- vol_desc = ElementTree.fromstring(vol.XMLDesc()) +- backing_path = vol_desc.find("./backingStore/path") +- backing_format = vol_desc.find("./backingStore/format") +- if backing_path is not None: +- extra_properties["backing file"] = {"file": backing_path.text} +- if backing_format is not None: +- extra_properties["backing file"][ +- "file format" +- ] = backing_format.get("type") ++ qemu_target, extra_properties = _get_disk_volume_data( ++ pool_name, volume_name ++ ) + + if not qemu_target: + continue +@@ -636,6 +658,73 @@ def _get_target(target, ssh): + return " {}://{}/{}".format(proto, target, "system") + + ++def _get_volume_path(pool, volume_name): ++ """ ++ Get the path to a volume. If the volume doesn't exist, compute its path from the pool one. ++ """ ++ if volume_name in pool.listVolumes(): ++ volume = pool.storageVolLookupByName(volume_name) ++ volume_xml = ElementTree.fromstring(volume.XMLDesc()) ++ return volume_xml.find("./target/path").text ++ ++ # Get the path from the pool if the volume doesn't exist yet ++ pool_xml = ElementTree.fromstring(pool.XMLDesc()) ++ pool_path = pool_xml.find("./target/path").text ++ return pool_path + "/" + volume_name ++ ++ ++def _disk_from_pool(conn, pool, pool_xml, volume_name): ++ """ ++ Create a disk definition out of the pool XML and volume name. ++ The aim of this function is to replace the volume-based definition when not handled by libvirt. ++ It returns the disk Jinja context to be used when creating the VM ++ """ ++ pool_type = pool_xml.get("type") ++ disk_context = {} ++ ++ # handle dir, fs and netfs ++ if pool_type in ["dir", "netfs", "fs"]: ++ disk_context["type"] = "file" ++ disk_context["source_file"] = _get_volume_path(pool, volume_name) ++ ++ elif pool_type in ["logical", "disk", "iscsi", "scsi"]: ++ disk_context["type"] = "block" ++ disk_context["format"] = "raw" ++ disk_context["source_file"] = _get_volume_path(pool, volume_name) ++ ++ elif pool_type in ["rbd", "gluster", "sheepdog"]: ++ # libvirt can't handle rbd, gluster and sheepdog as volumes ++ disk_context["type"] = "network" ++ disk_context["protocol"] = pool_type ++ # Copy the hosts from the pool definition ++ disk_context["hosts"] = [ ++ {"name": host.get("name"), "port": host.get("port")} ++ for host in pool_xml.findall(".//host") ++ ] ++ dir_node = pool_xml.find("./source/dir") ++ # Gluster and RBD need pool/volume name ++ name_node = pool_xml.find("./source/name") ++ if name_node is not None: ++ disk_context["volume"] = "{}/{}".format(name_node.text, volume_name) ++ # Copy the authentication if any for RBD ++ auth_node = pool_xml.find("./source/auth") ++ if auth_node is not None: ++ username = auth_node.get("username") ++ secret_node = auth_node.find("./secret") ++ usage = secret_node.get("usage") ++ if not usage: ++ # Get the usage from the UUID ++ uuid = secret_node.get("uuid") ++ usage = conn.secretLookupByUUIDString(uuid).usageID() ++ disk_context["auth"] = { ++ "type": "ceph", ++ "username": username, ++ "usage": usage, ++ } ++ ++ return disk_context ++ ++ + def _gen_xml( + conn, + name, +@@ -741,41 +830,16 @@ def _gen_xml( + elif disk.get("pool"): + disk_context["volume"] = disk["filename"] + # If we had no source_file, then we want a volume +- pool_xml = ElementTree.fromstring( +- conn.storagePoolLookupByName(disk["pool"]).XMLDesc() +- ) ++ pool = conn.storagePoolLookupByName(disk["pool"]) ++ pool_xml = ElementTree.fromstring(pool.XMLDesc()) + pool_type = pool_xml.get("type") +- if pool_type in ["rbd", "gluster", "sheepdog"]: +- # libvirt can't handle rbd, gluster and sheepdog as volumes +- disk_context["type"] = "network" +- disk_context["protocol"] = pool_type +- # Copy the hosts from the pool definition +- disk_context["hosts"] = [ +- {"name": host.get("name"), "port": host.get("port")} +- for host in pool_xml.findall(".//host") +- ] +- dir_node = pool_xml.find("./source/dir") +- # Gluster and RBD need pool/volume name +- name_node = pool_xml.find("./source/name") +- if name_node is not None: +- disk_context["volume"] = "{}/{}".format( +- name_node.text, disk_context["volume"] +- ) +- # Copy the authentication if any for RBD +- auth_node = pool_xml.find("./source/auth") +- if auth_node is not None: +- username = auth_node.get("username") +- secret_node = auth_node.find("./secret") +- usage = secret_node.get("usage") +- if not usage: +- # Get the usage from the UUID +- uuid = secret_node.get("uuid") +- usage = conn.secretLookupByUUIDString(uuid).usageID() +- disk_context["auth"] = { +- "type": "ceph", +- "username": username, +- "usage": usage, +- } ++ ++ # For Xen VMs convert all pool types (issue #58333) ++ if hypervisor == "xen" or pool_type in ["rbd", "gluster", "sheepdog"]: ++ disk_context.update( ++ _disk_from_pool(conn, pool, pool_xml, disk_context["volume"]) ++ ) ++ + else: + if pool_type in ["disk", "logical"]: + # The volume format for these types doesn't match the driver format in the VM +@@ -3981,7 +4045,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs): + directories.add(os.path.dirname(disks[disk]["file"])) + else: + # We may have a volume to delete here +- matcher = re.match("^(?P[^/]+)/(?P.*)$", disks[disk]["file"]) ++ matcher = re.match("^(?P[^/]+)/(?P.*)$", disks[disk]["file"],) + if matcher: + pool_name = matcher.group("pool") + pool = None +@@ -6499,29 +6563,33 @@ def _is_valid_volume(vol): + + def _get_all_volumes_paths(conn): + """ +- Extract the path and backing stores path of all volumes. ++ Extract the path, name, pool name and backing stores path of all volumes. + + :param conn: libvirt connection to use + """ +- volumes = [ +- vol +- for l in [ +- obj.listAllVolumes() +- for obj in conn.listAllStoragePools() +- if obj.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING +- ] +- for vol in l ++ pools = [ ++ pool ++ for pool in conn.listAllStoragePools() ++ if pool.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING + ] +- return { +- vol.path(): [ +- path.text +- for path in ElementTree.fromstring(vol.XMLDesc()).findall( +- ".//backingStore/path" +- ) +- ] +- for vol in volumes +- if _is_valid_volume(vol) +- } ++ volumes = {} ++ for pool in pools: ++ pool_volumes = { ++ volume.path(): { ++ "pool": pool.name(), ++ "name": volume.name(), ++ "backing_stores": [ ++ path.text ++ for path in ElementTree.fromstring(volume.XMLDesc()).findall( ++ ".//backingStore/path" ++ ) ++ ], ++ } ++ for volume in pool.listAllVolumes() ++ if _is_valid_volume(volume) ++ } ++ volumes.update(pool_volumes) ++ return volumes + + + def volume_infos(pool=None, volume=None, **kwargs): +@@ -6592,8 +6660,8 @@ def volume_infos(pool=None, volume=None, **kwargs): + if vol.path(): + as_backing_store = { + path +- for (path, all_paths) in six.iteritems(backing_stores) +- if vol.path() in all_paths ++ for (path, volume) in six.iteritems(backing_stores) ++ if vol.path() in volume.get("backing_stores") + } + used_by = [ + vm_name +diff --git a/salt/templates/virt/libvirt_disks.jinja b/salt/templates/virt/libvirt_disks.jinja +new file mode 100644 +index 0000000000..38f836afbb +--- /dev/null ++++ b/salt/templates/virt/libvirt_disks.jinja +@@ -0,0 +1,12 @@ ++{% macro network_source(disk) -%} ++ ++ {%- for host in disk.get('hosts') %} ++ ++ {%- endfor %} ++ {%- if disk.get("auth") %} ++ ++ ++ ++ {%- endif %} ++ ++{%- endmacro %} +diff --git a/salt/templates/virt/libvirt_domain.jinja b/salt/templates/virt/libvirt_domain.jinja +index 04a61ffa78..18728a75b5 100644 +--- a/salt/templates/virt/libvirt_domain.jinja ++++ b/salt/templates/virt/libvirt_domain.jinja +@@ -1,3 +1,4 @@ ++{%- import 'libvirt_disks.jinja' as libvirt_disks -%} + + {{ name }} + {{ cpu }} +@@ -32,21 +33,13 @@ + {% if disk.type == 'file' and 'source_file' in disk -%} + + {% endif %} ++ {% if disk.type == 'block' -%} ++ ++ {% endif %} + {% if disk.type == 'volume' and 'pool' in disk -%} + + {% endif %} +- {%- if disk.type == 'network' %} +- +- {%- for host in disk.get('hosts') %} +- +- {%- endfor %} +- {%- if disk.get("auth") %} +- +- +- +- {%- endif %} +- +- {%- endif %} ++ {%- if disk.type == 'network' %}{{ libvirt_disks.network_source(disk) }}{%- endif %} + + {% if disk.address -%} +
+diff --git a/tests/pytests/unit/modules/virt/__init__.py b/tests/pytests/unit/modules/virt/__init__.py +new file mode 100644 +index 0000000000..e69de29bb2 +diff --git a/tests/pytests/unit/modules/virt/conftest.py b/tests/pytests/unit/modules/virt/conftest.py +new file mode 100644 +index 0000000000..1c32ae12eb +--- /dev/null ++++ b/tests/pytests/unit/modules/virt/conftest.py +@@ -0,0 +1,191 @@ ++import pytest ++import salt.modules.config as config ++import salt.modules.virt as virt ++from salt._compat import ElementTree as ET ++from tests.support.mock import MagicMock ++ ++ ++class LibvirtMock(MagicMock): # pylint: disable=too-many-ancestors ++ """ ++ Libvirt library mock ++ """ ++ ++ class virDomain(MagicMock): ++ """ ++ virDomain mock ++ """ ++ ++ class libvirtError(Exception): ++ """ ++ libvirtError mock ++ """ ++ ++ def __init__(self, msg): ++ super().__init__(msg) ++ self.msg = msg ++ ++ def get_error_message(self): ++ return self.msg ++ ++ ++class MappedResultMock(MagicMock): ++ """ ++ Mock class consistently return the same mock object based on the first argument. ++ """ ++ ++ _instances = {} ++ ++ def __init__(self): ++ def mapped_results(*args, **kwargs): ++ if args[0] not in self._instances.keys(): ++ raise virt.libvirt.libvirtError("Not found: {}".format(args[0])) ++ return self._instances[args[0]] ++ ++ super().__init__(side_effect=mapped_results) ++ ++ def add(self, name): ++ self._instances[name] = MagicMock() ++ ++ ++@pytest.fixture(autouse=True) ++def setup_loader(request): ++ # Create libvirt mock and connection mock ++ mock_libvirt = LibvirtMock() ++ mock_conn = MagicMock() ++ mock_conn.getStoragePoolCapabilities.return_value = "" ++ ++ mock_libvirt.openAuth.return_value = mock_conn ++ setup_loader_modules = { ++ virt: { ++ "libvirt": mock_libvirt, ++ "__salt__": {"config.get": config.get, "config.option": config.option}, ++ }, ++ config: {}, ++ } ++ with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock: ++ yield loader_mock ++ ++ ++@pytest.fixture ++def make_mock_vm(): ++ def _make_mock_vm(xml_def): ++ mocked_conn = virt.libvirt.openAuth.return_value ++ ++ doc = ET.fromstring(xml_def) ++ name = doc.find("name").text ++ os_type = "hvm" ++ os_type_node = doc.find("os/type") ++ if os_type_node is not None: ++ os_type = os_type_node.text ++ ++ mocked_conn.listDefinedDomains.return_value = [name] ++ ++ # Configure the mocked domain ++ domain_mock = virt.libvirt.virDomain() ++ if not isinstance(mocked_conn.lookupByName, MappedResultMock): ++ mocked_conn.lookupByName = MappedResultMock() ++ mocked_conn.lookupByName.add(name) ++ domain_mock = mocked_conn.lookupByName(name) ++ domain_mock.XMLDesc.return_value = xml_def ++ domain_mock.OSType.return_value = os_type ++ ++ # Return state as shutdown ++ domain_mock.info.return_value = [ ++ 4, ++ 2048 * 1024, ++ 1024 * 1024, ++ 2, ++ 1234, ++ ] ++ domain_mock.ID.return_value = 1 ++ domain_mock.name.return_value = name ++ ++ domain_mock.attachDevice.return_value = 0 ++ domain_mock.detachDevice.return_value = 0 ++ ++ return domain_mock ++ ++ return _make_mock_vm ++ ++ ++@pytest.fixture ++def make_mock_storage_pool(): ++ def _make_mock_storage_pool(name, type, volumes): ++ mocked_conn = virt.libvirt.openAuth.return_value ++ ++ # Append the pool name to the list of known mocked pools ++ all_pools = mocked_conn.listStoragePools.return_value ++ if not isinstance(all_pools, list): ++ all_pools = [] ++ all_pools.append(name) ++ mocked_conn.listStoragePools.return_value = all_pools ++ ++ # Ensure we have mapped results for the pools ++ if not isinstance(mocked_conn.storagePoolLookupByName, MappedResultMock): ++ mocked_conn.storagePoolLookupByName = MappedResultMock() ++ ++ # Configure the pool ++ mocked_conn.storagePoolLookupByName.add(name) ++ mocked_pool = mocked_conn.storagePoolLookupByName(name) ++ source = "" ++ if type == "disk": ++ source = "".format(name) ++ pool_path = "/path/to/{}".format(name) ++ mocked_pool.XMLDesc.return_value = """ ++ ++ ++ {} ++ ++ ++ {} ++ ++ ++ """.format( ++ type, source, pool_path ++ ) ++ mocked_pool.name.return_value = name ++ mocked_pool.info.return_value = [ ++ virt.libvirt.VIR_STORAGE_POOL_RUNNING, ++ ] ++ ++ # Append the pool to the listAllStoragePools list ++ all_pools_obj = mocked_conn.listAllStoragePools.return_value ++ if not isinstance(all_pools_obj, list): ++ all_pools_obj = [] ++ all_pools_obj.append(mocked_pool) ++ mocked_conn.listAllStoragePools.return_value = all_pools_obj ++ ++ # Configure the volumes ++ if not isinstance(mocked_pool.storageVolLookupByName, MappedResultMock): ++ mocked_pool.storageVolLookupByName = MappedResultMock() ++ mocked_pool.listVolumes.return_value = volumes ++ ++ all_volumes = [] ++ for volume in volumes: ++ mocked_pool.storageVolLookupByName.add(volume) ++ mocked_vol = mocked_pool.storageVolLookupByName(volume) ++ vol_path = "{}/{}".format(pool_path, volume) ++ mocked_vol.XMLDesc.return_value = """ ++ ++ ++ {} ++ ++ ++ """.format( ++ vol_path, ++ ) ++ mocked_vol.path.return_value = vol_path ++ mocked_vol.name.return_value = volume ++ ++ mocked_vol.info.return_value = [ ++ 0, ++ 1234567, ++ 12345, ++ ] ++ all_volumes.append(mocked_vol) ++ ++ # Set the listAllVolumes return_value ++ mocked_pool.listAllVolumes.return_value = all_volumes ++ return mocked_pool ++ ++ return _make_mock_storage_pool +diff --git a/tests/pytests/unit/modules/virt/test_domain.py b/tests/pytests/unit/modules/virt/test_domain.py +new file mode 100644 +index 0000000000..5f9b45ec9a +--- /dev/null ++++ b/tests/pytests/unit/modules/virt/test_domain.py +@@ -0,0 +1,256 @@ ++import salt.modules.virt as virt ++from salt._compat import ElementTree as ET ++from tests.support.mock import MagicMock, patch ++ ++from .test_helpers import append_to_XMLDesc ++ ++ ++def test_update_xen_disk_volumes(make_mock_vm, make_mock_storage_pool): ++ xml_def = """ ++ ++ my_vm ++ 524288 ++ 524288 ++ 1 ++ ++ linux ++ /usr/lib/grub2/x86_64-xen/grub.xen ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ """ ++ domain_mock = make_mock_vm(xml_def) ++ make_mock_storage_pool("default", "dir", ["my_vm_system"]) ++ make_mock_storage_pool("my-iscsi", "iscsi", ["unit:0:0:1"]) ++ make_mock_storage_pool("vdb", "disk", ["vdb1"]) ++ ++ ret = virt.update( ++ "my_vm", ++ disks=[ ++ {"name": "system", "pool": "default"}, ++ {"name": "iscsi-data", "pool": "my-iscsi", "source_file": "unit:0:0:1"}, ++ {"name": "vdb-data", "pool": "vdb", "source_file": "vdb1"}, ++ {"name": "file-data", "pool": "default", "size": "10240"}, ++ ], ++ ) ++ ++ assert ret["definition"] ++ define_mock = virt.libvirt.openAuth().defineXML ++ setxml = ET.fromstring(define_mock.call_args[0][0]) ++ assert "block" == setxml.find(".//disk[3]").get("type") ++ assert "/path/to/vdb/vdb1" == setxml.find(".//disk[3]/source").get("dev") ++ ++ # Note that my_vm-file-data was not an existing volume before the update ++ assert "file" == setxml.find(".//disk[4]").get("type") ++ assert "/path/to/default/my_vm_file-data" == setxml.find(".//disk[4]/source").get( ++ "file" ++ ) ++ ++ ++def test_get_disks(make_mock_vm, make_mock_storage_pool): ++ # test with volumes ++ vm_def = """ ++ srv01 ++ ++ ++ ++ ++ ++ ++ ++
++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++
++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++
++ ++ ++ ++ ++ ++ ++ ++ ++ ++
++ ++ ++ ++ """ ++ domain_mock = make_mock_vm(vm_def) ++ ++ pool_mock = make_mock_storage_pool( ++ "default", "dir", ["srv01_system", "srv01_data", "vm05_system"] ++ ) ++ ++ # Append backing store to srv01_data volume XML description ++ srv1data_mock = pool_mock.storageVolLookupByName("srv01_data") ++ append_to_XMLDesc( ++ srv1data_mock, ++ """ ++ ++ /var/lib/libvirt/images/vol01 ++ ++ """, ++ ) ++ ++ assert virt.get_disks("srv01") == { ++ "vda": { ++ "type": "disk", ++ "file": "default/srv01_system", ++ "file format": "qcow2", ++ "disk size": 12345, ++ "virtual size": 1234567, ++ }, ++ "vdb": { ++ "type": "disk", ++ "file": "default/srv01_data", ++ "file format": "qcow2", ++ "disk size": 12345, ++ "virtual size": 1234567, ++ "backing file": { ++ "file": "/var/lib/libvirt/images/vol01", ++ "file format": "qcow2", ++ }, ++ }, ++ "vdc": { ++ "type": "disk", ++ "file": "default/vm05_system", ++ "file format": "qcow2", ++ "disk size": 12345, ++ "virtual size": 1234567, ++ "backing file": { ++ "file": "/var/lib/libvirt/images/vm04_system.qcow2", ++ "file format": "qcow2", ++ "backing file": { ++ "file": "/var/testsuite-data/disk-image-template.raw", ++ "file format": "raw", ++ }, ++ }, ++ }, ++ "hda": { ++ "type": "cdrom", ++ "file format": "raw", ++ "file": "http://dev-srv.tf.local:80/pub/iso/myimage.iso?foo=bar&baz=flurb", ++ }, ++ } ++ ++ ++def test_get_disk_convert_volumes(make_mock_vm, make_mock_storage_pool): ++ vm_def = """ ++ srv01 ++ ++ ++ ++ ++ ++ ++
++ ++ ++ ++ ++ ++ ++
++ ++ ++ ++ ++ ++ ++
++ ++ ++ ++ """ ++ domain_mock = make_mock_vm(vm_def) ++ ++ pool_mock = make_mock_storage_pool("default", "dir", ["srv01_system", "srv01_data"]) ++ ++ subprocess_mock = MagicMock() ++ popen_mock = MagicMock(spec=virt.subprocess.Popen) ++ popen_mock.return_value.communicate.return_value = [ ++ """[ ++ { ++ "virtual-size": 214748364800, ++ "filename": "/path/to/srv01_extra", ++ "cluster-size": 65536, ++ "format": "qcow2", ++ "actual-size": 340525056, ++ "format-specific": { ++ "type": "qcow2", ++ "data": { ++ "compat": "1.1", ++ "lazy-refcounts": false, ++ "refcount-bits": 16, ++ "corrupt": false ++ } ++ }, ++ "dirty-flag": false ++ } ++ ] ++ """ ++ ] ++ subprocess_mock.Popen = popen_mock ++ ++ with patch.dict(virt.__dict__, {"subprocess": subprocess_mock}): ++ assert { ++ "vda": { ++ "type": "disk", ++ "file": "default/srv01_system", ++ "file format": "qcow2", ++ "disk size": 12345, ++ "virtual size": 1234567, ++ }, ++ "vdb": { ++ "type": "disk", ++ "file": "default/srv01_data", ++ "file format": "raw", ++ "disk size": 12345, ++ "virtual size": 1234567, ++ }, ++ "vdc": { ++ "type": "disk", ++ "file": "/path/to/srv01_extra", ++ "file format": "qcow2", ++ "cluster size": 65536, ++ "disk size": 340525056, ++ "virtual size": 214748364800, ++ }, ++ } == virt.get_disks("srv01") +diff --git a/tests/pytests/unit/modules/virt/test_helpers.py b/tests/pytests/unit/modules/virt/test_helpers.py +new file mode 100644 +index 0000000000..f64aee2821 +--- /dev/null ++++ b/tests/pytests/unit/modules/virt/test_helpers.py +@@ -0,0 +1,11 @@ ++from salt._compat import ElementTree as ET ++ ++ ++def append_to_XMLDesc(mocked, fragment): ++ """ ++ Append an XML fragment at the end of the mocked XMLDesc return_value of mocked. ++ """ ++ xml_doc = ET.fromstring(mocked.XMLDesc()) ++ xml_fragment = ET.fromstring(fragment) ++ xml_doc.append(xml_fragment) ++ mocked.XMLDesc.return_value = ET.tostring(xml_doc) +diff --git a/tests/unit/modules/test_virt.py b/tests/unit/modules/test_virt.py +index 27c4b9d1b0..6e61544a1f 100644 +--- a/tests/unit/modules/test_virt.py ++++ b/tests/unit/modules/test_virt.py +@@ -1141,6 +1141,65 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + self.assertEqual("vdb2", source.attrib["volume"]) + self.assertEqual("raw", disk.find("driver").get("type")) + ++ def test_get_xml_volume_xen_dir(self): ++ """ ++ Test virt._gen_xml generating disks for a Xen hypervisor ++ """ ++ self.mock_conn.listStoragePools.return_value = ["default"] ++ pool_mock = MagicMock() ++ pool_mock.XMLDesc.return_value = ( ++ "/path/to/images" ++ ) ++ volume_xml = "/path/to/images/hello_system" ++ pool_mock.storageVolLookupByName.return_value.XMLDesc.return_value = volume_xml ++ self.mock_conn.storagePoolLookupByName.return_value = pool_mock ++ diskp = virt._disk_profile( ++ self.mock_conn, ++ None, ++ "xen", ++ [{"name": "system", "pool": "default"}], ++ "hello", ++ ) ++ xml_data = virt._gen_xml( ++ self.mock_conn, "hello", 1, 512, diskp, [], "xen", "hvm", "x86_64", ++ ) ++ root = ET.fromstring(xml_data) ++ disk = root.findall(".//disk")[0] ++ self.assertEqual(disk.attrib["type"], "file") ++ self.assertEqual( ++ "/path/to/images/hello_system", disk.find("source").attrib["file"] ++ ) ++ ++ def test_get_xml_volume_xen_block(self): ++ """ ++ Test virt._gen_xml generating disks for a Xen hypervisor ++ """ ++ self.mock_conn.listStoragePools.return_value = ["default"] ++ pool_mock = MagicMock() ++ pool_mock.listVolumes.return_value = ["vol01"] ++ volume_xml = "/dev/to/vol01" ++ pool_mock.storageVolLookupByName.return_value.XMLDesc.return_value = volume_xml ++ self.mock_conn.storagePoolLookupByName.return_value = pool_mock ++ ++ for pool_type in ["logical", "disk", "iscsi", "scsi"]: ++ pool_mock.XMLDesc.return_value = "".format( ++ pool_type ++ ) ++ diskp = virt._disk_profile( ++ self.mock_conn, ++ None, ++ "xen", ++ [{"name": "system", "pool": "default", "source_file": "vol01"}], ++ "hello", ++ ) ++ xml_data = virt._gen_xml( ++ self.mock_conn, "hello", 1, 512, diskp, [], "xen", "hvm", "x86_64", ++ ) ++ root = ET.fromstring(xml_data) ++ disk = root.findall(".//disk")[0] ++ self.assertEqual(disk.attrib["type"], "block") ++ self.assertEqual("/dev/to/vol01", disk.find("source").attrib["dev"]) ++ + def test_gen_xml_cdrom(self): + """ + Test virt._gen_xml(), generating a cdrom device (different disk type, no source) +@@ -5503,124 +5562,3 @@ class VirtTestCase(TestCase, LoaderModuleMockMixin): + "vol1.qcow2", + "/path/to/file", + ) +- +- def test_get_disks(self): +- """ +- Test the virt.get_disks function +- """ +- # test with volumes +- vm_def = """ +- srv01 +- +- +- +- +- +- +- +-
+- +- +- +- +- +- +- +- +- +- +- +-
+- +- +- +- +- +- +- +- +- +- +- +- +- +- +- +-
+- +- +- +- +- +- +- +- +- +-
+- +- +- +- """ +- self.set_mock_vm("srv01", vm_def) +- +- pool_mock = MagicMock() +- pool_mock.storageVolLookupByName.return_value.info.return_value = [ +- 0, +- 1234567, +- 12345, +- ] +- pool_mock.storageVolLookupByName.return_value.XMLDesc.side_effect = [ +- "", +- """ +- +- +- /var/lib/libvirt/images/vol01 +- +- +- """, +- ] +- self.mock_conn.storagePoolLookupByName.return_value = pool_mock +- +- self.assertDictEqual( +- virt.get_disks("srv01"), +- { +- "vda": { +- "type": "disk", +- "file": "default/srv01_system", +- "file format": "qcow2", +- "disk size": 12345, +- "virtual size": 1234567, +- }, +- "vdb": { +- "type": "disk", +- "file": "default/srv01_data", +- "file format": "qcow2", +- "disk size": 12345, +- "virtual size": 1234567, +- "backing file": { +- "file": "/var/lib/libvirt/images/vol01", +- "file format": "qcow2", +- }, +- }, +- "vdc": { +- "type": "disk", +- "file": "default/vm05_system", +- "file format": "qcow2", +- "disk size": 12345, +- "virtual size": 1234567, +- "backing file": { +- "file": "/var/lib/libvirt/images/vm04_system.qcow2", +- "file format": "qcow2", +- "backing file": { +- "file": "/var/testsuite-data/disk-image-template.raw", +- "file format": "raw", +- }, +- }, +- }, +- "hda": { +- "type": "cdrom", +- "file format": "raw", +- "file": "http://dev-srv.tf.local:80/pub/iso/myimage.iso?foo=bar&baz=flurb", +- }, +- }, +- ) +-- +2.28.0 + + diff --git a/zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch b/zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch new file mode 100644 index 0000000..ffcfe5c --- /dev/null +++ b/zypperpkg-ignore-retcode-104-for-search-bsc-1176697-.patch @@ -0,0 +1,285 @@ +From 76c38695fa663d55876902feda4a1c93211a1a9f Mon Sep 17 00:00:00 2001 +From: Alberto Planas +Date: Mon, 5 Oct 2020 16:24:16 +0200 +Subject: [PATCH] zypperpkg: ignore retcode 104 for search() + (bsc#1176697) (#270) + +--- + salt/modules/zypperpkg.py | 38 ++++++++++-- + tests/unit/modules/test_zypperpkg.py | 87 ++++++++++++++++++++++------ + 2 files changed, 101 insertions(+), 24 deletions(-) + +diff --git a/salt/modules/zypperpkg.py b/salt/modules/zypperpkg.py +index 96c3eed851..ad11da4ad1 100644 +--- a/salt/modules/zypperpkg.py ++++ b/salt/modules/zypperpkg.py +@@ -98,6 +98,8 @@ class _Zypper(object): + } + + LOCK_EXIT_CODE = 7 ++ NOT_FOUND_EXIT_CODE = 104 ++ + XML_DIRECTIVES = ['-x', '--xmlout'] + # ZYPPER_LOCK is not affected by --root + ZYPPER_LOCK = '/var/run/zypp.pid' +@@ -128,6 +130,7 @@ class _Zypper(object): + self.__no_raise = False + self.__refresh = False + self.__ignore_repo_failure = False ++ self.__ignore_not_found = False + self.__systemd_scope = False + self.__root = None + +@@ -147,6 +150,9 @@ class _Zypper(object): + # Ignore exit code for 106 (repo is not available) + if 'no_repo_failure' in kwargs: + self.__ignore_repo_failure = kwargs['no_repo_failure'] ++ # Ignore exit code for 104 (package not found) ++ if "ignore_not_found" in kwargs: ++ self.__ignore_not_found = kwargs["ignore_not_found"] + if 'systemd_scope' in kwargs: + self.__systemd_scope = kwargs['systemd_scope'] + if 'root' in kwargs: +@@ -296,6 +302,10 @@ class _Zypper(object): + if self.__root: + self.__cmd.extend(['--root', self.__root]) + ++ # Do not consider 104 as a retcode error ++ if self.__ignore_not_found: ++ kwargs["success_retcodes"] = [_Zypper.NOT_FOUND_EXIT_CODE] ++ + self.__cmd.extend(args) + kwargs['output_loglevel'] = 'trace' + kwargs['python_shell'] = False +@@ -405,7 +415,11 @@ class Wildcard(object): + Get available versions of the package. + :return: + ''' +- solvables = self.zypper.nolock.xml.call('se', '-xv', self.name).getElementsByTagName('solvable') ++ solvables = ( ++ self.zypper(ignore_not_found=True) ++ .nolock.xml.call("se", "-v", self.name) ++ .getElementsByTagName("solvable") ++ ) + if not solvables: + raise CommandExecutionError('No packages found matching \'{0}\''.format(self.name)) + +@@ -983,7 +997,11 @@ def list_repo_pkgs(*args, **kwargs): + return False + + root = kwargs.get('root') or None +- for node in __zypper__(root=root).xml.call('se', '-s', *targets).getElementsByTagName('solvable'): ++ for node in ( ++ __zypper__(root=root, ignore_not_found=True) ++ .xml.call("se", "-s", *targets) ++ .getElementsByTagName("solvable") ++ ): + pkginfo = dict(node.attributes.items()) + try: + if pkginfo['kind'] != 'package': +@@ -2261,7 +2279,9 @@ def owner(*paths, **kwargs): + def _get_visible_patterns(root=None): + '''Get all available patterns in the repo that are visible.''' + patterns = {} +- search_patterns = __zypper__(root=root).nolock.xml.call('se', '-t', 'pattern') ++ search_patterns = __zypper__(root=root, ignore_not_found=True).nolock.xml.call( ++ "se", "-t", "pattern" ++ ) + for element in search_patterns.getElementsByTagName('solvable'): + installed = element.getAttribute('status') == 'installed' + patterns[element.getAttribute('name')] = { +@@ -2455,7 +2475,11 @@ def search(criteria, refresh=False, **kwargs): + cmd.append(ALLOWED_SEARCH_OPTIONS.get(opt)) + + cmd.append(criteria) +- solvables = __zypper__(root=root).nolock.noraise.xml.call(*cmd).getElementsByTagName('solvable') ++ solvables = ( ++ __zypper__(root=root, ignore_not_found=True) ++ .nolock.noraise.xml.call(*cmd) ++ .getElementsByTagName("solvable") ++ ) + if not solvables: + raise CommandExecutionError( + 'No packages found matching \'{0}\''.format(criteria) +@@ -2690,7 +2714,11 @@ def _get_patches(installed_only=False, root=None): + List all known patches in repos. + ''' + patches = {} +- for element in __zypper__(root=root).nolock.xml.call('se', '-t', 'patch').getElementsByTagName('solvable'): ++ for element in ( ++ __zypper__(root=root, ignore_not_found=True) ++ .nolock.xml.call("se", "-t", "patch") ++ .getElementsByTagName("solvable") ++ ): + installed = element.getAttribute('status') == 'installed' + if (installed_only and installed) or not installed_only: + patches[element.getAttribute('name')] = { +diff --git a/tests/unit/modules/test_zypperpkg.py b/tests/unit/modules/test_zypperpkg.py +index 1fce3352c6..a3d20f66d5 100644 +--- a/tests/unit/modules/test_zypperpkg.py ++++ b/tests/unit/modules/test_zypperpkg.py +@@ -39,7 +39,10 @@ class ZyppCallMock(object): + + def __call__(self, *args, **kwargs): + # If the call is for a configuration modifier, we return self +- if any(i in kwargs for i in ('no_repo_failure', 'systemd_scope', 'root')): ++ if any( ++ i in kwargs ++ for i in ("no_repo_failure", "ignore_not_found", "systemd_scope", "root") ++ ): + return self + return MagicMock(return_value=self.__return_value)() + +@@ -1303,8 +1306,9 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + + """ +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) + wcard = zypper.Wildcard(_zpr) + wcard.name, wcard.version = 'libzypp', '*' + assert wcard._get_scope_versions(wcard._get_available_versions()) == ['16.2.4-19.5', '16.3.2-25.1', '16.5.2-27.9.1'] +@@ -1322,8 +1326,9 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + """ + +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) + wcard = zypper.Wildcard(_zpr) + wcard.name, wcard.version = 'libzypp', '16.2.*-2*' + assert wcard._get_scope_versions(wcard._get_available_versions()) == ['16.2.5-25.1', '16.2.6-27.9.1'] +@@ -1341,8 +1346,9 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + """ + +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) + wcard = zypper.Wildcard(_zpr) + wcard.name, wcard.version = 'libzypp', '16.2.5*' + assert wcard._get_scope_versions(wcard._get_available_versions()) == ['16.2.5-25.1'] +@@ -1360,8 +1366,9 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + """ + +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) + wcard = zypper.Wildcard(_zpr) + wcard.name, wcard.version = 'libzypp', '*.1' + assert wcard._get_scope_versions(wcard._get_available_versions()) == ['16.2.5-25.1', '17.2.6-27.9.1'] +@@ -1379,8 +1386,9 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + + """ +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) + assert zypper.Wildcard(_zpr)('libzypp', '16.2.4*') == '16.2.4-19.5' + assert zypper.Wildcard(_zpr)('libzypp', '16.2*') == '16.2.5-25.1' + assert zypper.Wildcard(_zpr)('libzypp', '*6-*') == '17.2.6-27.9.1' +@@ -1399,8 +1407,10 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + + """ +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) ++ + assert zypper.Wildcard(_zpr)('libzypp', None) is None + + def test_wildcard_to_query_typecheck(self): +@@ -1416,8 +1426,9 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + + """ +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) + assert isinstance(zypper.Wildcard(_zpr)('libzypp', '*.1'), six.string_types) + + def test_wildcard_to_query_condition_preservation(self): +@@ -1433,8 +1444,9 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + + """ +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) + + for op in zypper.Wildcard.Z_OP: + assert zypper.Wildcard(_zpr)('libzypp', '{0}*.1'.format(op)) == '{0}17.2.6-27.9.1'.format(op) +@@ -1456,8 +1468,10 @@ Repository 'DUMMY' not found by its alias, number, or URI. + + + """ +- _zpr = MagicMock() +- _zpr.nolock.xml.call = MagicMock(return_value=minidom.parseString(xmldoc)) ++ __zpr = MagicMock() ++ __zpr.nolock.xml.call.return_value = minidom.parseString(xmldoc) ++ _zpr = MagicMock(return_value=__zpr) ++ + with self.assertRaises(CommandExecutionError): + for op in ['>>', '==', '<<', '+']: + zypper.Wildcard(_zpr)('libzypp', '{0}*.1'.format(op)) +@@ -1557,3 +1571,38 @@ pattern() = package-c""" + with patch.dict(zypper.__context__, context): + zypper._clean_cache() + self.assertEqual(zypper.__context__, {'pkg.other_data': None}) ++ ++ def test_search(self): ++ """Test zypperpkg.search()""" ++ xml_mock = MagicMock(return_value=[]) ++ zypp_mock = MagicMock(return_value=xml_mock) ++ ZyppCallMock(return_value=xml_mock) ++ with patch("salt.modules.zypperpkg.__zypper__", zypp_mock): ++ zypper.search("emacs") ++ zypp_mock.assert_called_with(root=None, ignore_not_found=True) ++ xml_mock.nolock.noraise.xml.call.assert_called_with("search", "emacs") ++ ++ def test_search_not_found(self): ++ """Test zypperpkg.search()""" ++ ret = { ++ "stdout": "", ++ "stderr": None, ++ "retcode": 104, ++ } ++ run_all_mock = MagicMock(return_value=ret) ++ with patch.dict(zypper.__salt__, {"cmd.run_all": run_all_mock}): ++ self.assertRaises(CommandExecutionError, zypper.search, "vim") ++ run_all_mock.assert_called_with( ++ [ ++ "zypper", ++ "--non-interactive", ++ "--xmlout", ++ "--no-refresh", ++ "search", ++ "vim", ++ ], ++ success_retcodes=[104], ++ output_loglevel="trace", ++ python_shell=False, ++ env={"ZYPP_READONLY_HACK": "1"}, ++ ) +-- +2.28.0 + +